From 1644b6ca2273ac49b74df54f259fd26bcf7d3e73 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 3 Mar 2026 14:23:10 +0100 Subject: [PATCH] feat: add OTel resource attributes persistence per time series Signed-off-by: Arve Knudsen --- cmd/mimir/config-descriptor.json | 11 + cmd/mimir/help-all.txt.tmpl | 2 + development/README.md | 12 + .../mimir-ingest-storage/config/runtime.yaml | 3 + .../docker-compose.jsonnet | 3 +- .../mimir-ingest-storage/docker-compose.yml | 3 +- .../scripts/otlp-resource-attrs-demo.sh | 657 + .../mimir-monolithic-mode/config/mimir.yaml | 5 + .../mimir-monolithic-mode/config/runtime.yaml | 11 + .../mimir-monolithic-mode/scripts/README.md | 162 + .../scripts/otlp-resource-attrs-demo.sh | 686 + .../configuration-parameters/index.md | 7 + go.mod | 9 +- go.sum | 20 +- pkg/api/api.go | 2 + pkg/api/handlers.go | 17 +- pkg/blockbuilder/tsdb.go | 176 +- pkg/blockbuilder/tsdb_test.go | 26 +- pkg/compactor/block_upload.go | 2 +- pkg/compactor/split_merge_compactor.go | 7 +- pkg/distributor/distributor.go | 136 + pkg/distributor/otel.go | 4 + .../otlpappender/mimir_appender.go | 80 +- .../otlpappender/mimir_appender_test.go | 213 + pkg/distributor/push_test.go | 4 + pkg/frontend/querymiddleware/roundtrip.go | 24 +- pkg/ingester/client/compat.go | 14 + pkg/ingester/client/ingester.pb.go | 10727 +- pkg/ingester/client/ingester.proto | 52 + pkg/ingester/client/mimir_mock_test.go | 5 + pkg/ingester/ingester.go | 363 +- pkg/ingester/ingester_activity.go | 9 + pkg/ingester/ingester_profiling.go | 23 + pkg/ingester/resource_attributes.go | 308 + pkg/ingester/user_tsdb.go | 42 + pkg/mimir/mimir.go | 1 + pkg/mimir/modules.go | 2 + pkg/mimirpb/mimir.pb.go | 1503 +- pkg/mimirpb/mimir.pb.go.expdiff | 1299 +- pkg/mimirpb/mimir.proto | 6 + pkg/mimirpb/timeseries.go | 133 + pkg/mimirpb/timeseries_pools.go | 4 +- pkg/mimirpb/timeseries_pools_test.go | 1 + pkg/mimirpb/timeseries_test.go | 153 + pkg/querier/blocks_store_queryable.go | 204 +- pkg/querier/blocks_store_queryable_test.go | 10 + pkg/querier/distributor_queryable.go | 14 +- pkg/querier/distributor_queryable_test.go | 5 + pkg/querier/engine/config.go | 7 + pkg/querier/error_translate_queryable.go | 17 + pkg/querier/error_translate_queryable_test.go | 8 +- pkg/querier/querier.go | 42 + pkg/querier/querier_test.go | 4 + pkg/querier/resource_attributes_handler.go | 560 + .../resource_attributes_handler_test.go | 196 + pkg/querier/resource_querier_cache.go | 390 + pkg/querier/resource_querier_cache_test.go | 205 + pkg/querier/stats_renderer_test.go | 8 +- pkg/querier/store_gateway_client_test.go | 5 + pkg/ruler/compat.go | 8 + pkg/storage/lazyquery/lazyquery.go | 17 + pkg/storage/tsdb/block/block.go | 33 + pkg/storage/tsdb/block/block_test.go | 11 +- pkg/storage/tsdb/bucketindex/index.go | 6 + pkg/storegateway/bucket.go | 509 + pkg/storegateway/bucket_stores.go | 18 + pkg/storegateway/bucket_test.go | 107 + pkg/storegateway/gateway.go | 10 + pkg/storegateway/hintspb/hints.pb.go | 564 +- pkg/storegateway/hintspb/hints.proto | 12 + pkg/storegateway/storegatewaypb/custom.go | 28 + pkg/storegateway/storegatewaypb/gateway.pb.go | 103 +- pkg/storegateway/storegatewaypb/gateway.proto | 3 + pkg/storegateway/storepb/rpc.pb.go | 4406 +- pkg/storegateway/storepb/rpc.pb.go.expdiff | 9 +- pkg/storegateway/storepb/rpc.proto | 58 + pkg/util/validation/limits.go | 21 + vendor/github.com/andybalholm/brotli/LICENSE | 19 + .../github.com/andybalholm/brotli/README.md | 14 + .../andybalholm/brotli/backward_references.go | 185 + .../brotli/backward_references_hq.go | 796 + .../github.com/andybalholm/brotli/bit_cost.go | 436 + .../andybalholm/brotli/bit_reader.go | 266 + .../andybalholm/brotli/bitwriter.go | 56 + .../andybalholm/brotli/block_splitter.go | 144 + .../brotli/block_splitter_command.go | 434 + .../brotli/block_splitter_distance.go | 433 + .../brotli/block_splitter_literal.go | 433 + .../andybalholm/brotli/brotli_bit_stream.go | 1539 + .../github.com/andybalholm/brotli/cluster.go | 30 + .../andybalholm/brotli/cluster_command.go | 164 + .../andybalholm/brotli/cluster_distance.go | 326 + .../andybalholm/brotli/cluster_literal.go | 326 + .../github.com/andybalholm/brotli/command.go | 254 + .../andybalholm/brotli/compress_fragment.go | 834 + .../brotli/compress_fragment_two_pass.go | 773 + .../andybalholm/brotli/constants.go | 77 + .../github.com/andybalholm/brotli/context.go | 2176 + .../github.com/andybalholm/brotli/decode.go | 2581 + .../andybalholm/brotli/dictionary.go | 122890 +++++++++++++++ .../andybalholm/brotli/dictionary_hash.go | 32779 ++++ .../github.com/andybalholm/brotli/encode.go | 1220 + .../github.com/andybalholm/brotli/encoder.go | 177 + .../andybalholm/brotli/encoder_dict.go | 22 + .../andybalholm/brotli/entropy_encode.go | 592 + .../brotli/entropy_encode_static.go | 4399 + .../github.com/andybalholm/brotli/fast_log.go | 290 + .../andybalholm/brotli/find_match_length.go | 45 + vendor/github.com/andybalholm/brotli/h10.go | 287 + vendor/github.com/andybalholm/brotli/h5.go | 214 + vendor/github.com/andybalholm/brotli/h6.go | 216 + vendor/github.com/andybalholm/brotli/hash.go | 342 + .../andybalholm/brotli/hash_composite.go | 93 + .../brotli/hash_forgetful_chain.go | 252 + .../brotli/hash_longest_match_quickly.go | 214 + .../andybalholm/brotli/hash_rolling.go | 168 + .../andybalholm/brotli/histogram.go | 226 + vendor/github.com/andybalholm/brotli/http.go | 184 + .../github.com/andybalholm/brotli/huffman.go | 653 + .../andybalholm/brotli/literal_cost.go | 182 + .../andybalholm/brotli/matchfinder/emitter.go | 34 + .../andybalholm/brotli/matchfinder/m0.go | 169 + .../andybalholm/brotli/matchfinder/m4.go | 308 + .../brotli/matchfinder/matchfinder.go | 103 + .../brotli/matchfinder/textencoder.go | 53 + .../github.com/andybalholm/brotli/memory.go | 66 + .../andybalholm/brotli/metablock.go | 574 + .../andybalholm/brotli/metablock_command.go | 165 + .../andybalholm/brotli/metablock_distance.go | 165 + .../andybalholm/brotli/metablock_literal.go | 165 + .../github.com/andybalholm/brotli/params.go | 37 + .../github.com/andybalholm/brotli/platform.go | 103 + .../github.com/andybalholm/brotli/prefix.go | 30 + .../andybalholm/brotli/prefix_dec.go | 723 + .../github.com/andybalholm/brotli/quality.go | 196 + .../github.com/andybalholm/brotli/reader.go | 108 + .../andybalholm/brotli/ringbuffer.go | 134 + vendor/github.com/andybalholm/brotli/state.go | 294 + .../andybalholm/brotli/static_dict.go | 662 + .../andybalholm/brotli/static_dict_lut.go | 75094 +++++++++ .../andybalholm/brotli/symbol_list.go | 22 + .../andybalholm/brotli/transform.go | 641 + .../andybalholm/brotli/utf8_util.go | 70 + vendor/github.com/andybalholm/brotli/util.go | 7 + .../andybalholm/brotli/write_bits.go | 52 + .../github.com/andybalholm/brotli/writer.go | 162 + .../github.com/parquet-go/bitpack/.gitignore | 21 + vendor/github.com/parquet-go/bitpack/LICENSE | 201 + .../github.com/parquet-go/bitpack/README.md | 31 + .../github.com/parquet-go/bitpack/bitpack.go | 14 + .../parquet-go/bitpack/masks_int32_amd64.s | 1288 + .../parquet-go/bitpack/masks_int64_amd64.s | 427 + vendor/github.com/parquet-go/bitpack/pack.go | 19 + .../parquet-go/bitpack/pack_arm64.go | 31 + .../parquet-go/bitpack/pack_int32_arm64.s | 462 + .../parquet-go/bitpack/pack_int64_arm64.s | 514 + .../parquet-go/bitpack/pack_purego.go | 94 + .../github.com/parquet-go/bitpack/unpack.go | 29 + .../bitpack/unpack_int32_1bit_arm64.s | 184 + .../bitpack/unpack_int32_2bit_arm64.s | 136 + .../bitpack/unpack_int32_4bit_arm64.s | 106 + .../bitpack/unpack_int32_8bit_arm64.s | 65 + .../parquet-go/bitpack/unpack_int32_amd64.go | 36 + .../parquet-go/bitpack/unpack_int32_amd64.s | 352 + .../parquet-go/bitpack/unpack_int32_arm64.go | 48 + .../parquet-go/bitpack/unpack_int32_arm64.s | 732 + .../parquet-go/bitpack/unpack_int32_be.go | 15 + .../parquet-go/bitpack/unpack_int32_le.go | 9 + .../parquet-go/bitpack/unpack_int32_purego.go | 21 + .../bitpack/unpack_int64_1bit_amd64.s | 123 + .../bitpack/unpack_int64_1bit_arm64.s | 239 + .../bitpack/unpack_int64_2bit_amd64.s | 124 + .../bitpack/unpack_int64_2bit_arm64.s | 161 + .../bitpack/unpack_int64_4bit_amd64.s | 124 + .../bitpack/unpack_int64_4bit_arm64.s | 118 + .../bitpack/unpack_int64_8bit_amd64.s | 105 + .../bitpack/unpack_int64_8bit_arm64.s | 71 + .../parquet-go/bitpack/unpack_int64_amd64.go | 38 + .../parquet-go/bitpack/unpack_int64_amd64.s | 824 + .../parquet-go/bitpack/unpack_int64_arm64.go | 50 + .../parquet-go/bitpack/unpack_int64_arm64.s | 943 + .../parquet-go/bitpack/unpack_int64_purego.go | 25 + .../bitpack/unpack_neon_macros_arm64.h | 18 + .../bitpack/unsafecast/unsafecast.go | 54 + .../github.com/parquet-go/jsonlite/.gitignore | 21 + vendor/github.com/parquet-go/jsonlite/LICENSE | 21 + .../github.com/parquet-go/jsonlite/README.md | 54 + .../github.com/parquet-go/jsonlite/convert.go | 352 + vendor/github.com/parquet-go/jsonlite/doc.go | 8 + .../parquet-go/jsonlite/iterator.go | 562 + .../github.com/parquet-go/jsonlite/parse.go | 305 + .../github.com/parquet-go/jsonlite/quote.go | 121 + .../github.com/parquet-go/jsonlite/unquote.go | 218 + .../github.com/parquet-go/jsonlite/valid.go | 244 + .../github.com/parquet-go/jsonlite/value.go | 415 + .../parquet-go/parquet-go/.gitattributes | 2 + .../parquet-go/parquet-go/.gitignore | 21 + .../github.com/parquet-go/parquet-go/.mailmap | 2 + .../github.com/parquet-go/parquet-go/.words | 27 + .../parquet-go/parquet-go/AUTHORS.txt | 5 + .../parquet-go/parquet-go/CHANGELOG.md | 16 + .../parquet-go/parquet-go/CODEOWNERS | 1 + .../parquet-go/parquet-go/CODE_OF_CONDUCT.md | 73 + .../parquet-go/parquet-go/CONTRIBUTING.md | 52 + .../github.com/parquet-go/parquet-go/LICENSE | 213 + .../github.com/parquet-go/parquet-go/Makefile | 15 + .../parquet-go/parquet-go/README.md | 615 + .../parquet-go/parquet-go/allocator.go | 64 + .../github.com/parquet-go/parquet-go/array.go | 27 + .../parquet-go/parquet-go/bitmap.go | 38 + .../github.com/parquet-go/parquet-go/bloom.go | 280 + .../parquet-go/parquet-go/bloom/block.go | 28 + .../parquet-go/bloom/block_amd64.go | 39 + .../parquet-go/parquet-go/bloom/block_amd64.s | 129 + .../parquet-go/bloom/block_default.go | 65 + .../parquet-go/bloom/block_optimized.go | 53 + .../parquet-go/parquet-go/bloom/bloom.go | 13 + .../parquet-go/parquet-go/bloom/filter.go | 94 + .../parquet-go/bloom/filter_amd64.go | 33 + .../parquet-go/bloom/filter_amd64.s | 214 + .../parquet-go/bloom/filter_default.go | 17 + .../parquet-go/parquet-go/bloom/hash.go | 77 + .../parquet-go/bloom/xxhash/LICENSE | 27 + .../parquet-go/bloom/xxhash/sum64uint.go | 37 + .../bloom/xxhash/sum64uint_amd64.go | 49 + .../parquet-go/bloom/xxhash/sum64uint_amd64.s | 755 + .../bloom/xxhash/sum64uint_purego.go | 53 + .../parquet-go/bloom/xxhash/xxhash.go | 55 + .../parquet-go/bloom/xxhash/xxhash_amd64.go | 6 + .../parquet-go/bloom/xxhash/xxhash_amd64.s | 180 + .../parquet-go/bloom/xxhash/xxhash_purego.go | 50 + .../parquet-go/parquet-go/bloom_be.go | 19 + .../parquet-go/parquet-go/bloom_le.go | 12 + .../parquet-go/parquet-go/buf.gen.yaml | 6 + .../github.com/parquet-go/parquet-go/buf.yaml | 9 + .../parquet-go/parquet-go/buffer.go | 708 + .../parquet-go/parquet-go/buffer_pool.go | 146 + .../parquet-go/parquet-go/column.go | 849 + .../parquet-go/parquet-go/column_buffer.go | 150 + .../parquet-go/column_buffer_amd64.go | 30 + .../parquet-go/column_buffer_amd64.s | 67 + .../parquet-go/column_buffer_be128.go | 157 + .../parquet-go/column_buffer_boolean.go | 221 + .../parquet-go/column_buffer_byte_array.go | 271 + .../parquet-go/column_buffer_double.go | 145 + .../column_buffer_fixed_len_byte_array.go | 218 + .../parquet-go/column_buffer_float.go | 145 + .../parquet-go/column_buffer_int32.go | 145 + .../parquet-go/column_buffer_int64.go | 146 + .../parquet-go/column_buffer_int96.go | 149 + .../parquet-go/column_buffer_json.go | 226 + .../parquet-go/column_buffer_optional.go | 367 + .../parquet-go/column_buffer_proto.go | 206 + .../parquet-go/column_buffer_proto_any.go | 60 + .../parquet-go/column_buffer_proto_purego.go | 84 + .../parquet-go/column_buffer_purego.go | 30 + .../parquet-go/column_buffer_reflect.go | 897 + .../parquet-go/column_buffer_repeated.go | 451 + .../parquet-go/column_buffer_uint32.go | 146 + .../parquet-go/column_buffer_uint64.go | 146 + .../parquet-go/column_buffer_write.go | 786 + .../parquet-go/parquet-go/column_chunk.go | 347 + .../parquet-go/parquet-go/column_index.go | 754 + .../parquet-go/parquet-go/column_index_be.go | 84 + .../parquet-go/parquet-go/column_index_le.go | 38 + .../parquet-go/parquet-go/column_mapping.go | 88 + .../parquet-go/parquet-go/column_path.go | 108 + .../parquet-go/parquet-go/compare.go | 356 + .../parquet-go/parquet-go/compress.go | 96 + .../parquet-go/compress/brotli/brotli.go | 54 + .../parquet-go/compress/compress.go | 148 + .../parquet-go/compress/gzip/gzip.go | 67 + .../parquet-go/parquet-go/compress/lz4/lz4.go | 87 + .../parquet-go/compress/snappy/snappy.go | 31 + .../compress/uncompressed/uncompressed.go | 27 + .../parquet-go/compress/zstd/zstd.go | 105 + .../parquet-go/parquet-go/config.go | 1054 + .../parquet-go/parquet-go/convert.go | 1534 + .../parquet-go/parquet-go/dedupe.go | 107 + .../parquet-go/parquet-go/deprecated/int96.go | 179 + .../parquet-go/deprecated/parquet.go | 112 + .../parquet-go/parquet-go/dictionary.go | 427 + .../parquet-go/parquet-go/dictionary_amd64.go | 168 + .../parquet-go/parquet-go/dictionary_amd64.s | 941 + .../parquet-go/parquet-go/dictionary_be128.go | 184 + .../parquet-go/dictionary_boolean.go | 181 + .../parquet-go/dictionary_byte_array.go | 165 + .../parquet-go/dictionary_double.go | 137 + .../dictionary_fixed_len_byte_array.go | 198 + .../parquet-go/parquet-go/dictionary_float.go | 137 + .../parquet-go/parquet-go/dictionary_int32.go | 150 + .../parquet-go/parquet-go/dictionary_int64.go | 134 + .../parquet-go/parquet-go/dictionary_int96.go | 143 + .../parquet-go/parquet-go/dictionary_null.go | 75 + .../parquet-go/dictionary_purego.go | 210 + .../parquet-go/dictionary_uint32.go | 141 + .../parquet-go/dictionary_uint64.go | 141 + .../parquet-go/parquet-go/encoding.go | 160 + .../encoding/bitpacked/bitpacked.go | 119 + .../bytestreamsplit/bytestreamsplit.go | 60 + .../bytestreamsplit/bytestreamsplit_amd64.go | 35 + .../bytestreamsplit/bytestreamsplit_amd64.s | 426 + .../bytestreamsplit/bytestreamsplit_purego.go | 83 + .../encoding/delta/binary_packed.go | 488 + .../encoding/delta/binary_packed_amd64.go | 256 + .../encoding/delta/binary_packed_amd64.s | 920 + .../encoding/delta/binary_packed_purego.go | 105 + .../parquet-go/encoding/delta/byte_array.go | 212 + .../encoding/delta/byte_array_amd64.go | 164 + .../encoding/delta/byte_array_amd64.s | 243 + .../encoding/delta/byte_array_purego.go | 63 + .../parquet-go/encoding/delta/delta.go | 99 + .../parquet-go/encoding/delta/delta_amd64.go | 16 + .../parquet-go/encoding/delta/delta_amd64.s | 13 + .../encoding/delta/length_byte_array.go | 81 + .../encoding/delta/length_byte_array_amd64.go | 9 + .../encoding/delta/length_byte_array_amd64.s | 122 + .../delta/length_byte_array_purego.go | 24 + .../parquet-go/encoding/encoding.go | 72 + .../parquet-go/encoding/notsupported.go | 213 + .../parquet-go/encoding/plain/dictionary.go | 27 + .../parquet-go/encoding/plain/plain.go | 246 + .../parquet-go/encoding/plain/plain_be.go | 113 + .../parquet-go/encoding/plain/plain_le.go | 52 + .../parquet-go/encoding/rle/dictionary.go | 59 + .../parquet-go/parquet-go/encoding/rle/rle.go | 570 + .../parquet-go/encoding/rle/rle_amd64.go | 60 + .../parquet-go/encoding/rle/rle_amd64.s | 174 + .../parquet-go/encoding/rle/rle_purego.go | 22 + .../parquet-go/encoding/thrift/LICENSE | 21 + .../parquet-go/encoding/thrift/binary.go | 369 + .../parquet-go/encoding/thrift/compact.go | 348 + .../parquet-go/encoding/thrift/debug.go | 230 + .../parquet-go/encoding/thrift/decode.go | 689 + .../parquet-go/encoding/thrift/encode.go | 399 + .../parquet-go/encoding/thrift/error.go | 111 + .../parquet-go/encoding/thrift/protocol.go | 73 + .../parquet-go/encoding/thrift/struct.go | 143 + .../parquet-go/encoding/thrift/thrift.go | 164 + .../parquet-go/encoding/thrift/unsafe.go | 20 + .../parquet-go/parquet-go/encoding/values.go | 276 + .../parquet-go/parquet-go/errors.go | 87 + .../github.com/parquet-go/parquet-go/file.go | 1188 + .../parquet-go/parquet-go/filter.go | 82 + .../parquet-go/parquet-go/format/parquet.go | 1232 + .../parquet-go/parquet-go/go.tools.mod | 24 + .../parquet-go/parquet-go/go.tools.sum | 30 + .../parquet-go/hashprobe/aeshash/aeshash.go | 21 + .../hashprobe/aeshash/aeshash_amd64.go | 60 + .../hashprobe/aeshash/aeshash_amd64.s | 155 + .../hashprobe/aeshash/aeshash_purego.go | 29 + .../parquet-go/hashprobe/hashprobe.go | 783 + .../parquet-go/hashprobe/hashprobe_amd64.go | 38 + .../parquet-go/hashprobe/hashprobe_amd64.s | 197 + .../parquet-go/hashprobe/hashprobe_purego.go | 19 + .../parquet-go/hashprobe/wyhash/wyhash.go | 49 + .../hashprobe/wyhash/wyhash_amd64.go | 14 + .../hashprobe/wyhash/wyhash_amd64.s | 118 + .../hashprobe/wyhash/wyhash_purego.go | 23 + .../internal/bytealg/broadcast_amd64.go | 17 + .../internal/bytealg/broadcast_amd64.s | 51 + .../internal/bytealg/broadcast_purego.go | 9 + .../parquet-go/internal/bytealg/bytealg.go | 2 + .../internal/bytealg/bytealg_amd64.go | 17 + .../internal/bytealg/count_amd64.go | 26 + .../parquet-go/internal/bytealg/count_amd64.s | 100 + .../internal/bytealg/count_purego.go | 9 + .../parquet-go/internal/debug/debug.go | 95 + .../internal/debug/finalizer_off.go | 6 + .../parquet-go/internal/debug/finalizer_on.go | 7 + .../parquet-go/internal/memory/buffer.go | 128 + .../internal/memory/chunk_buffer.go | 116 + .../parquet-go/internal/memory/memory.go | 7 + .../parquet-go/internal/memory/pool.go | 25 + .../internal/memory/slice_buffer.go | 352 + .../internal/unsafecast/unsafecast.go | 54 + .../github.com/parquet-go/parquet-go/level.go | 33 + .../parquet-go/parquet-go/limits.go | 64 + .../github.com/parquet-go/parquet-go/merge.go | 816 + .../parquet-go/parquet-go/multi_row_group.go | 537 + .../github.com/parquet-go/parquet-go/node.go | 618 + .../github.com/parquet-go/parquet-go/null.go | 127 + .../parquet-go/parquet-go/null_amd64.go | 74 + .../parquet-go/parquet-go/null_amd64.s | 227 + .../parquet-go/parquet-go/null_purego.go | 64 + .../parquet-go/parquet-go/offset_index.go | 128 + .../github.com/parquet-go/parquet-go/order.go | 102 + .../parquet-go/parquet-go/order_amd64.go | 21 + .../parquet-go/parquet-go/order_amd64.s | 547 + .../parquet-go/parquet-go/order_purego.go | 42 + .../github.com/parquet-go/parquet-go/page.go | 414 + .../parquet-go/parquet-go/page_be128.go | 99 + .../parquet-go/parquet-go/page_boolean.go | 157 + .../parquet-go/parquet-go/page_bounds.go | 25 + .../parquet-go/page_bounds_amd64.go | 129 + .../parquet-go/parquet-go/page_bounds_amd64.s | 551 + .../parquet-go/page_bounds_purego.go | 143 + .../parquet-go/parquet-go/page_byte_array.go | 204 + .../parquet-go/parquet-go/page_double.go | 107 + .../parquet-go/page_fixed_len_byte_array.go | 133 + .../parquet-go/parquet-go/page_float.go | 107 + .../parquet-go/parquet-go/page_header.go | 221 + .../parquet-go/parquet-go/page_int32.go | 108 + .../parquet-go/parquet-go/page_int64.go | 108 + .../parquet-go/parquet-go/page_int96.go | 107 + .../parquet-go/parquet-go/page_max.go | 23 + .../parquet-go/parquet-go/page_max_amd64.go | 24 + .../parquet-go/parquet-go/page_max_amd64.s | 598 + .../parquet-go/parquet-go/page_max_purego.go | 72 + .../parquet-go/parquet-go/page_min.go | 23 + .../parquet-go/parquet-go/page_min_amd64.go | 24 + .../parquet-go/parquet-go/page_min_amd64.s | 592 + .../parquet-go/parquet-go/page_min_purego.go | 72 + .../parquet-go/parquet-go/page_null.go | 57 + .../parquet-go/parquet-go/page_optional.go | 112 + .../parquet-go/parquet-go/page_repeated.go | 172 + .../parquet-go/parquet-go/page_uint32.go | 108 + .../parquet-go/parquet-go/page_uint64.go | 108 + .../parquet-go/parquet-go/parquet.go | 122 + .../parquet-go/parquet-go/parquet_amd64.go | 18 + .../github.com/parquet-go/parquet-go/print.go | 363 + .../parquet-go/parquet-go/reader.go | 671 + .../github.com/parquet-go/parquet-go/row.go | 903 + .../parquet-go/parquet-go/row_buffer.go | 463 + .../parquet-go/parquet-go/row_builder.go | 202 + .../parquet-go/parquet-go/row_group.go | 493 + .../github.com/parquet-go/parquet-go/scan.go | 33 + .../parquet-go/parquet-go/schema.go | 1214 + .../parquet-go/parquet-go/search.go | 95 + .../parquet-go/parquet-go/sorting.go | 270 + .../parquet-go/parquet-go/sparse/array.go | 320 + .../parquet-go/parquet-go/sparse/gather.go | 37 + .../parquet-go/sparse/gather_amd64.go | 85 + .../parquet-go/sparse/gather_amd64.s | 193 + .../parquet-go/sparse/gather_purego.go | 72 + .../parquet-go/parquet-go/sparse/sparse.go | 20 + .../github.com/parquet-go/parquet-go/tags.go | 54 + .../parquet-go/parquet-go/transform.go | 140 + .../github.com/parquet-go/parquet-go/type.go | 284 + .../parquet-go/parquet-go/type_boolean.go | 90 + .../parquet-go/parquet-go/type_bson.go | 87 + .../parquet-go/parquet-go/type_byte_array.go | 102 + .../parquet-go/parquet-go/type_date.go | 117 + .../parquet-go/parquet-go/type_decimal.go | 40 + .../parquet-go/parquet-go/type_double.go | 90 + .../parquet-go/parquet-go/type_enum.go | 87 + .../parquet-go/type_fixed_len_byte_array.go | 234 + .../parquet-go/parquet-go/type_float.go | 90 + .../parquet-go/parquet-go/type_group.go | 75 + .../parquet-go/parquet-go/type_int32.go | 119 + .../parquet-go/parquet-go/type_int64.go | 119 + .../parquet-go/parquet-go/type_int96.go | 86 + .../parquet-go/parquet-go/type_int_logical.go | 206 + .../parquet-go/parquet-go/type_json.go | 106 + .../parquet-go/parquet-go/type_list.go | 86 + .../parquet-go/parquet-go/type_map.go | 91 + .../parquet-go/parquet-go/type_null.go | 73 + .../parquet-go/parquet-go/type_string.go | 121 + .../parquet-go/parquet-go/type_time.go | 279 + .../parquet-go/parquet-go/type_timestamp.go | 257 + .../parquet-go/parquet-go/type_uuid.go | 80 + .../parquet-go/parquet-go/type_variant.go | 97 + .../github.com/parquet-go/parquet-go/value.go | 1080 + .../parquet-go/parquet-go/value_amd64.go | 18 + .../parquet-go/parquet-go/value_amd64.s | 59 + .../parquet-go/parquet-go/value_be.go | 19 + .../parquet-go/parquet-go/value_le.go | 8 + .../parquet-go/parquet-go/values_purego.go | 9 + .../parquet-go/parquet-go/writer.go | 2157 + .../parquet-go/writer_statistics.go | 42 + .../libopenapi/datamodel/low/base/schema.go | 2 - .../datamodel/low/base/schema_proxy.go | 228 +- .../pb33f/libopenapi/index/extract_refs.go | 15 +- .../prometheus/prometheus/config/config.go | 7 +- .../model/histogram/float_histogram.go | 334 - .../prometheus/prometheus/promql/engine.go | 97 +- .../prometheus/prometheus/promql/info.go | 511 +- .../promql/parser/generated_parser.y | 6 +- .../promql/parser/generated_parser.y.go | 1313 +- .../prometheus/promql/parser/lex.go | 38 +- .../testdata/native_histograms.test | 353 - .../prometheus/prometheus/scrape/scrape.go | 19 +- .../prometheus/prometheus/storage/fanout.go | 14 + .../prometheus/prometheus/storage/generic.go | 37 + .../prometheus/storage/interface.go | 49 + .../prometheus/storage/interface_append.go | 27 + .../prometheus/prometheus/storage/merge.go | 54 + .../prometheus/prometheus/storage/noop.go | 22 + .../prometheusremotewrite/helper.go | 5 + .../prometheusremotewrite/metrics_to_prw.go | 122 + .../prometheus/storage/remote/write.go | 4 + .../storage/remote/write_otlp_handler.go | 1 + .../prometheus/prometheus/tsdb/block.go | 151 +- .../prometheus/prometheus/tsdb/compact.go | 154 + .../prometheus/prometheus/tsdb/db.go | 230 +- .../prometheus/prometheus/tsdb/head.go | 514 +- .../prometheus/prometheus/tsdb/head_append.go | 297 + .../prometheus/tsdb/head_append_v2.go | 99 +- .../prometheus/prometheus/tsdb/head_wal.go | 86 +- .../prometheus/tsdb/ooo_head_read.go | 114 + .../prometheus/prometheus/tsdb/querier.go | 76 +- .../prometheus/tsdb/record/record.go | 230 +- .../prometheus/tsdb/seriesmetadata/README.md | 377 + .../seriesmetadata/content_hash.go} | 25 +- .../prometheus/tsdb/seriesmetadata/entity.go | 335 + .../tsdb/seriesmetadata/layered_reader.go | 268 + .../tsdb/seriesmetadata/mem_store.go | 395 + .../tsdb/seriesmetadata/parquet_schema.go | 141 + .../tsdb/seriesmetadata/reader_options.go | 82 + .../tsdb/seriesmetadata/registry.go | 177 + .../seriesmetadata/resource_attributes.go | 55 + .../tsdb/seriesmetadata/resource_kind.go | 243 + .../prometheus/tsdb/seriesmetadata/scope.go | 127 + .../tsdb/seriesmetadata/scope_kind.go | 225 + .../tsdb/seriesmetadata/seriesmetadata.go | 1270 + .../tsdb/seriesmetadata/versioned.go | 154 + .../tsdb/seriesmetadata/writer_options.go | 109 + .../prometheus/tsdb/wlog/checkpoint.go | 195 +- .../prometheus/util/runtime/limits_default.go | 51 - .../prometheus/util/runtime/limits_windows.go | 26 - .../prometheus/util/runtime/statfs.go | 26 - .../prometheus/util/runtime/statfs_default.go | 93 - .../util/runtime/statfs_linux_386.go | 88 - .../prometheus/util/runtime/statfs_uint32.go | 86 - .../prometheus/util/runtime/statfs_windows.go | 56 - .../prometheus/util/runtime/uname_default.go | 23 - .../prometheus/util/runtime/uname_linux.go | 33 - .../util/runtime/vmlimits_default.go | 25 - .../prometheus/util/teststorage/appender.go | 11 + .../prometheus/web/api/testhelpers/api.go | 2 + .../prometheus/web/api/testhelpers/mocks.go | 5 + .../prometheus/prometheus/web/api/v1/api.go | 886 +- .../prometheus/web/api/v1/openapi.go | 5 + .../prometheus/web/api/v1/openapi_examples.go | 62 + .../prometheus/web/api/v1/openapi_paths.go | 70 + .../prometheus/web/api/v1/openapi_schemas.go | 194 + .../prometheus/web/api/v1/test_helpers.go | 6 + .../collector/pdata/xpdata/LICENSE | 202 + .../collector/pdata/xpdata/entity/entity.go | 45 + .../xpdata/entity/entity_attribute_map.go | 103 + .../pdata/xpdata/entity/entity_map.go | 121 + .../xpdata/entity/generated_entityref.go | 90 + .../xpdata/entity/generated_entityrefslice.go | 168 + .../pdata/xpdata/entity/resource_entities.go | 27 + vendor/modules.txt | 51 +- 545 files changed, 349797 insertions(+), 7539 deletions(-) create mode 100755 development/mimir-ingest-storage/scripts/otlp-resource-attrs-demo.sh create mode 100644 development/mimir-monolithic-mode/scripts/README.md create mode 100755 development/mimir-monolithic-mode/scripts/otlp-resource-attrs-demo.sh create mode 100644 pkg/ingester/resource_attributes.go create mode 100644 pkg/querier/resource_attributes_handler.go create mode 100644 pkg/querier/resource_attributes_handler_test.go create mode 100644 pkg/querier/resource_querier_cache.go create mode 100644 pkg/querier/resource_querier_cache_test.go create mode 100644 vendor/github.com/andybalholm/brotli/LICENSE create mode 100644 vendor/github.com/andybalholm/brotli/README.md create mode 100644 vendor/github.com/andybalholm/brotli/backward_references.go create mode 100644 vendor/github.com/andybalholm/brotli/backward_references_hq.go create mode 100644 vendor/github.com/andybalholm/brotli/bit_cost.go create mode 100644 vendor/github.com/andybalholm/brotli/bit_reader.go create mode 100644 vendor/github.com/andybalholm/brotli/bitwriter.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_command.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/brotli_bit_stream.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_command.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/cluster_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/command.go create mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment.go create mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go create mode 100644 vendor/github.com/andybalholm/brotli/constants.go create mode 100644 vendor/github.com/andybalholm/brotli/context.go create mode 100644 vendor/github.com/andybalholm/brotli/decode.go create mode 100644 vendor/github.com/andybalholm/brotli/dictionary.go create mode 100644 vendor/github.com/andybalholm/brotli/dictionary_hash.go create mode 100644 vendor/github.com/andybalholm/brotli/encode.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder_dict.go create mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode.go create mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode_static.go create mode 100644 vendor/github.com/andybalholm/brotli/fast_log.go create mode 100644 vendor/github.com/andybalholm/brotli/find_match_length.go create mode 100644 vendor/github.com/andybalholm/brotli/h10.go create mode 100644 vendor/github.com/andybalholm/brotli/h5.go create mode 100644 vendor/github.com/andybalholm/brotli/h6.go create mode 100644 vendor/github.com/andybalholm/brotli/hash.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_composite.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go create mode 100644 vendor/github.com/andybalholm/brotli/hash_rolling.go create mode 100644 vendor/github.com/andybalholm/brotli/histogram.go create mode 100644 vendor/github.com/andybalholm/brotli/http.go create mode 100644 vendor/github.com/andybalholm/brotli/huffman.go create mode 100644 vendor/github.com/andybalholm/brotli/literal_cost.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/emitter.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m0.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m4.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go create mode 100644 vendor/github.com/andybalholm/brotli/memory.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_command.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_distance.go create mode 100644 vendor/github.com/andybalholm/brotli/metablock_literal.go create mode 100644 vendor/github.com/andybalholm/brotli/params.go create mode 100644 vendor/github.com/andybalholm/brotli/platform.go create mode 100644 vendor/github.com/andybalholm/brotli/prefix.go create mode 100644 vendor/github.com/andybalholm/brotli/prefix_dec.go create mode 100644 vendor/github.com/andybalholm/brotli/quality.go create mode 100644 vendor/github.com/andybalholm/brotli/reader.go create mode 100644 vendor/github.com/andybalholm/brotli/ringbuffer.go create mode 100644 vendor/github.com/andybalholm/brotli/state.go create mode 100644 vendor/github.com/andybalholm/brotli/static_dict.go create mode 100644 vendor/github.com/andybalholm/brotli/static_dict_lut.go create mode 100644 vendor/github.com/andybalholm/brotli/symbol_list.go create mode 100644 vendor/github.com/andybalholm/brotli/transform.go create mode 100644 vendor/github.com/andybalholm/brotli/utf8_util.go create mode 100644 vendor/github.com/andybalholm/brotli/util.go create mode 100644 vendor/github.com/andybalholm/brotli/write_bits.go create mode 100644 vendor/github.com/andybalholm/brotli/writer.go create mode 100644 vendor/github.com/parquet-go/bitpack/.gitignore create mode 100644 vendor/github.com/parquet-go/bitpack/LICENSE create mode 100644 vendor/github.com/parquet-go/bitpack/README.md create mode 100644 vendor/github.com/parquet-go/bitpack/bitpack.go create mode 100644 vendor/github.com/parquet-go/bitpack/masks_int32_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/masks_int64_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/pack.go create mode 100644 vendor/github.com/parquet-go/bitpack/pack_arm64.go create mode 100644 vendor/github.com/parquet-go/bitpack/pack_int32_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/pack_int64_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/pack_purego.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_1bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_2bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_4bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_8bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_be.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_le.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int32_purego.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.s create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_int64_purego.go create mode 100644 vendor/github.com/parquet-go/bitpack/unpack_neon_macros_arm64.h create mode 100644 vendor/github.com/parquet-go/bitpack/unsafecast/unsafecast.go create mode 100644 vendor/github.com/parquet-go/jsonlite/.gitignore create mode 100644 vendor/github.com/parquet-go/jsonlite/LICENSE create mode 100644 vendor/github.com/parquet-go/jsonlite/README.md create mode 100644 vendor/github.com/parquet-go/jsonlite/convert.go create mode 100644 vendor/github.com/parquet-go/jsonlite/doc.go create mode 100644 vendor/github.com/parquet-go/jsonlite/iterator.go create mode 100644 vendor/github.com/parquet-go/jsonlite/parse.go create mode 100644 vendor/github.com/parquet-go/jsonlite/quote.go create mode 100644 vendor/github.com/parquet-go/jsonlite/unquote.go create mode 100644 vendor/github.com/parquet-go/jsonlite/valid.go create mode 100644 vendor/github.com/parquet-go/jsonlite/value.go create mode 100644 vendor/github.com/parquet-go/parquet-go/.gitattributes create mode 100644 vendor/github.com/parquet-go/parquet-go/.gitignore create mode 100644 vendor/github.com/parquet-go/parquet-go/.mailmap create mode 100644 vendor/github.com/parquet-go/parquet-go/.words create mode 100644 vendor/github.com/parquet-go/parquet-go/AUTHORS.txt create mode 100644 vendor/github.com/parquet-go/parquet-go/CHANGELOG.md create mode 100644 vendor/github.com/parquet-go/parquet-go/CODEOWNERS create mode 100644 vendor/github.com/parquet-go/parquet-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/parquet-go/parquet-go/CONTRIBUTING.md create mode 100644 vendor/github.com/parquet-go/parquet-go/LICENSE create mode 100644 vendor/github.com/parquet-go/parquet-go/Makefile create mode 100644 vendor/github.com/parquet-go/parquet-go/README.md create mode 100644 vendor/github.com/parquet-go/parquet-go/allocator.go create mode 100644 vendor/github.com/parquet-go/parquet-go/array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bitmap.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/block.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/block_default.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/block_optimized.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/bloom.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/filter.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/filter_default.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/hash.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/LICENSE create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom_be.go create mode 100644 vendor/github.com/parquet-go/parquet-go/bloom_le.go create mode 100644 vendor/github.com/parquet-go/parquet-go/buf.gen.yaml create mode 100644 vendor/github.com/parquet-go/parquet-go/buf.yaml create mode 100644 vendor/github.com/parquet-go/parquet-go/buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/buffer_pool.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_be128.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_boolean.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_double.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_fixed_len_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_float.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_int32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_int64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_int96.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_json.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_optional.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_proto.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_proto_any.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_proto_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_reflect.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_repeated.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_uint32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_uint64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_buffer_write.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_chunk.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_index.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_index_be.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_index_le.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_mapping.go create mode 100644 vendor/github.com/parquet-go/parquet-go/column_path.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compare.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/brotli/brotli.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/compress.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/gzip/gzip.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/lz4/lz4.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/snappy/snappy.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/uncompressed/uncompressed.go create mode 100644 vendor/github.com/parquet-go/parquet-go/compress/zstd/zstd.go create mode 100644 vendor/github.com/parquet-go/parquet-go/config.go create mode 100644 vendor/github.com/parquet-go/parquet-go/convert.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dedupe.go create mode 100644 vendor/github.com/parquet-go/parquet-go/deprecated/int96.go create mode 100644 vendor/github.com/parquet-go/parquet-go/deprecated/parquet.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_be128.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_boolean.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_double.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_fixed_len_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_float.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_int32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_int64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_int96.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_null.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_uint32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/dictionary_uint64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/bitpacked/bitpacked.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/delta.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/encoding.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/notsupported.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/plain/dictionary.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/rle/dictionary.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/rle/rle.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/LICENSE create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/binary.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/compact.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/debug.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/decode.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/encode.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/error.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/protocol.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/struct.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/thrift.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/thrift/unsafe.go create mode 100644 vendor/github.com/parquet-go/parquet-go/encoding/values.go create mode 100644 vendor/github.com/parquet-go/parquet-go/errors.go create mode 100644 vendor/github.com/parquet-go/parquet-go/file.go create mode 100644 vendor/github.com/parquet-go/parquet-go/filter.go create mode 100644 vendor/github.com/parquet-go/parquet-go/format/parquet.go create mode 100644 vendor/github.com/parquet-go/parquet-go/go.tools.mod create mode 100644 vendor/github.com/parquet-go/parquet-go/go.tools.sum create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/debug/debug.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_off.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_on.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/memory/buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/memory/chunk_buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/memory/memory.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/memory/pool.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/memory/slice_buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/internal/unsafecast/unsafecast.go create mode 100644 vendor/github.com/parquet-go/parquet-go/level.go create mode 100644 vendor/github.com/parquet-go/parquet-go/limits.go create mode 100644 vendor/github.com/parquet-go/parquet-go/merge.go create mode 100644 vendor/github.com/parquet-go/parquet-go/multi_row_group.go create mode 100644 vendor/github.com/parquet-go/parquet-go/node.go create mode 100644 vendor/github.com/parquet-go/parquet-go/null.go create mode 100644 vendor/github.com/parquet-go/parquet-go/null_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/null_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/null_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/offset_index.go create mode 100644 vendor/github.com/parquet-go/parquet-go/order.go create mode 100644 vendor/github.com/parquet-go/parquet-go/order_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/order_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/order_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_be128.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_boolean.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_bounds.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/page_bounds_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_double.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_fixed_len_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_float.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_header.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_int32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_int64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_int96.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_max.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_max_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_max_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/page_max_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_min.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_min_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_min_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/page_min_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_null.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_optional.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_repeated.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_uint32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/page_uint64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/parquet.go create mode 100644 vendor/github.com/parquet-go/parquet-go/parquet_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/print.go create mode 100644 vendor/github.com/parquet-go/parquet-go/reader.go create mode 100644 vendor/github.com/parquet-go/parquet-go/row.go create mode 100644 vendor/github.com/parquet-go/parquet-go/row_buffer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/row_builder.go create mode 100644 vendor/github.com/parquet-go/parquet-go/row_group.go create mode 100644 vendor/github.com/parquet-go/parquet-go/scan.go create mode 100644 vendor/github.com/parquet-go/parquet-go/schema.go create mode 100644 vendor/github.com/parquet-go/parquet-go/search.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sorting.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/gather.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/gather_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/sparse/sparse.go create mode 100644 vendor/github.com/parquet-go/parquet-go/tags.go create mode 100644 vendor/github.com/parquet-go/parquet-go/transform.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_boolean.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_bson.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_date.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_decimal.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_double.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_enum.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_fixed_len_byte_array.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_float.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_group.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_int32.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_int64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_int96.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_int_logical.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_json.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_list.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_map.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_null.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_string.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_time.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_timestamp.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_uuid.go create mode 100644 vendor/github.com/parquet-go/parquet-go/type_variant.go create mode 100644 vendor/github.com/parquet-go/parquet-go/value.go create mode 100644 vendor/github.com/parquet-go/parquet-go/value_amd64.go create mode 100644 vendor/github.com/parquet-go/parquet-go/value_amd64.s create mode 100644 vendor/github.com/parquet-go/parquet-go/value_be.go create mode 100644 vendor/github.com/parquet-go/parquet-go/value_le.go create mode 100644 vendor/github.com/parquet-go/parquet-go/values_purego.go create mode 100644 vendor/github.com/parquet-go/parquet-go/writer.go create mode 100644 vendor/github.com/parquet-go/parquet-go/writer_statistics.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/README.md rename vendor/github.com/prometheus/prometheus/{util/runtime/vmlimits_openbsd.go => tsdb/seriesmetadata/content_hash.go} (54%) create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/entity.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/layered_reader.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/mem_store.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/parquet_schema.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/reader_options.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/registry.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_attributes.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_kind.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope_kind.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/seriesmetadata.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/versioned.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/writer_options.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/limits_default.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/limits_windows.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/statfs.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/statfs_default.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/statfs_linux_386.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/statfs_uint32.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/statfs_windows.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/uname_default.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/uname_linux.go delete mode 100644 vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_default.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_attribute_map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityref.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityrefslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/resource_entities.go diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 623f084b899..92dda3c69c2 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -7135,6 +7135,17 @@ "fieldType": "boolean", "fieldCategory": "advanced" }, + { + "kind": "field", + "name": "otel_persist_resource_attributes", + "required": false, + "desc": "Whether to persist OTel resource attributes per time series as metadata in Prometheus TSDB blocks. Resource attributes are stored in series_metadata.parquet files within blocks and can be queried via the /api/v1/resource_attributes endpoint.", + "fieldValue": null, + "fieldDefaultValue": false, + "fieldFlag": "distributor.otel-persist-resource-attributes", + "fieldType": "boolean", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "ingest_storage_read_consistency", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index c0e869adf7f..37194311119 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1315,6 +1315,8 @@ Usage of ./cmd/mimir/mimir: Whether to enable automatic suffixes to names of metrics ingested through OTLP. -distributor.otel-native-delta-ingestion [experimental] Whether to enable native ingestion of delta OTLP metrics, which will store the raw delta sample values without conversion. If disabled, delta metrics will be rejected. Delta support is in an early stage of development. The ingestion and querying process is likely to change over time. + -distributor.otel-persist-resource-attributes + [experimental] Whether to persist OTel resource attributes per time series as metadata in Prometheus TSDB blocks. Resource attributes are stored in series_metadata.parquet files within blocks and can be queried via the /api/v1/resource_attributes endpoint. -distributor.otel-promote-resource-attributes comma-separated-list-of-strings [experimental] Optionally specify OTel resource attributes to promote to labels. -distributor.otel-promote-scope-metadata diff --git a/development/README.md b/development/README.md index b9d2c2ff442..e1cc01ffb9d 100644 --- a/development/README.md +++ b/development/README.md @@ -39,6 +39,18 @@ Available profiles: > **Note**: Compose down will stop all profiles unless specified. +## OTLP Resource Attributes Demo + +The monolithic mode includes a demo script that showcases how Mimir persists OTel resource attributes from OTLP metrics. This demonstrates the end-to-end flow from OTLP ingestion through both ingesters and store-gateways. + +```bash +cd mimir-monolithic-mode +./compose-up.sh +./scripts/otlp-resource-attrs-demo.sh +``` + +See [mimir-monolithic-mode/scripts/README.md](./mimir-monolithic-mode/scripts/README.md) for details. + ## OTEL collector Experimental support for running OpenTelemetry collector in the Monolithic mode. diff --git a/development/mimir-ingest-storage/config/runtime.yaml b/development/mimir-ingest-storage/config/runtime.yaml index 8565a65c9b8..a110251a0e3 100644 --- a/development/mimir-ingest-storage/config/runtime.yaml +++ b/development/mimir-ingest-storage/config/runtime.yaml @@ -3,3 +3,6 @@ overrides: anonymous: labels_query_optimizer_enabled: true ruler_evaluation_consistency_max_delay: 1m + otel_persist_resource_attributes: true + # Enable experimental PromQL functions for the info() function demo. + enabled_promql_experimental_functions: all diff --git a/development/mimir-ingest-storage/docker-compose.jsonnet b/development/mimir-ingest-storage/docker-compose.jsonnet index 8f118b2a141..c2a61a7d789 100644 --- a/development/mimir-ingest-storage/docker-compose.jsonnet +++ b/development/mimir-ingest-storage/docker-compose.jsonnet @@ -184,6 +184,7 @@ std.manifestYamlDoc({ hostname: 'nginx', image: 'nginxinc/nginx-unprivileged:1.22-alpine', depends_on: [ + 'distributor-1', 'ingester-zone-a-1', 'alertmanager-1', 'ruler-1', @@ -193,7 +194,7 @@ std.manifestYamlDoc({ ], environment: [ 'NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx', - 'DISTRIBUTOR_HOST=ingester-zone-a-1:8080', + 'DISTRIBUTOR_HOST=distributor-1:8080', 'ALERT_MANAGER_HOST=alertmanager-1:8080', 'RULER_HOST=ruler-1:8080', 'QUERY_FRONTEND_HOST=query-frontend:8080', diff --git a/development/mimir-ingest-storage/docker-compose.yml b/development/mimir-ingest-storage/docker-compose.yml index ee73175a503..fb6a5c1c138 100644 --- a/development/mimir-ingest-storage/docker-compose.yml +++ b/development/mimir-ingest-storage/docker-compose.yml @@ -291,6 +291,7 @@ - ".data-minio:/data:delegated" "nginx": "depends_on": + - "distributor-1" - "ingester-zone-a-1" - "alertmanager-1" - "ruler-1" @@ -299,7 +300,7 @@ - "grafana" "environment": - "NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx" - - "DISTRIBUTOR_HOST=ingester-zone-a-1:8080" + - "DISTRIBUTOR_HOST=distributor-1:8080" - "ALERT_MANAGER_HOST=alertmanager-1:8080" - "RULER_HOST=ruler-1:8080" - "QUERY_FRONTEND_HOST=query-frontend:8080" diff --git a/development/mimir-ingest-storage/scripts/otlp-resource-attrs-demo.sh b/development/mimir-ingest-storage/scripts/otlp-resource-attrs-demo.sh new file mode 100755 index 00000000000..60ea21df17f --- /dev/null +++ b/development/mimir-ingest-storage/scripts/otlp-resource-attrs-demo.sh @@ -0,0 +1,657 @@ +#!/bin/bash +# SPDX-License-Identifier: AGPL-3.0-only + +# OTLP Resource Attributes Persistence Demo for Grafana Mimir (Ingest Storage) +# +# This demo showcases how Mimir persists OTel resource attributes from OTLP metrics +# and makes them queryable via the /api/v1/resources endpoint and info() function. +# +# This version is adapted for the ingest storage architecture where: +# - Metrics flow: Distributor -> Kafka -> Block Builder -> Object Storage +# - Instead of flushing ingesters, we wait for the block builder to process Kafka data +# +# Prerequisites: +# - curl and jq installed +# - Either: Mimir running with ingest storage (./compose-up.sh) +# - Or: Use --start-stack to automatically start the stack +# +# Usage: +# ./scripts/otlp-resource-attrs-demo.sh [--start-stack] [--stop-stack] +# +# Options: +# --start-stack Start the docker-compose stack before running the demo +# --stop-stack Stop the docker-compose stack after the demo completes +# --help, -h Show this help message + +set -e + +# Script directory for locating compose scripts +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPOSE_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# Flags +START_STACK=false +STOP_STACK=false + +show_usage() { + cat < /dev/null 2>&1; then + echo -e "${service_name} is ready." + return 0 + fi + attempt=$((attempt + 1)) + printf "\r Attempt %d/%d..." "$attempt" "$max_attempts" + sleep 2 + done + echo "" + echo "Error: ${service_name} did not become ready within $((max_attempts * 2)) seconds" + return 1 +} + +start_stack() { + echo "Starting docker-compose stack..." + cd "${COMPOSE_DIR}" + + # Start the main stack (detached) + ./compose-up.sh -d + + # Wait for distributor to be ready (indicates core services are up) + echo "" + wait_for_service "http://localhost:8000/ready" "Distributor" + + # Wait for ingesters to be ready (needed for Kafka partition ownership) + echo "" + wait_for_service "http://localhost:8002/ready" "Ingester zone-a" + echo "" + wait_for_service "http://localhost:8003/ready" "Ingester zone-b" + + # Start nginx separately (handles case where grafana port 3000 conflicts) + # Using --no-deps to avoid issues with grafana dependency + echo "" + echo "Starting nginx gateway..." + docker compose up -d --no-deps nginx + + # Wait for nginx to be ready + echo "" + wait_for_service "http://localhost:8080/" "Nginx gateway" + + # Wait for OTLP endpoint to be ready (distributor behind nginx) + echo "" + wait_for_service "http://localhost:8080/distributor/ready" "OTLP endpoint" + + # Wait for OTLP ingestion to actually work (ingesters need to claim Kafka partitions) + echo "" + echo "Waiting for OTLP ingestion to be ready..." + local otlp_ready=false + local otlp_attempts=0 + local otlp_max_attempts=30 + while [ "$otlp_ready" = false ] && [ $otlp_attempts -lt $otlp_max_attempts ]; do + # Send a test metric and check for 200 response + local test_response + test_response=$(curl -s -w "%{http_code}" -o /dev/null -X POST "http://localhost:8080/otlp/v1/metrics" \ + -H "Content-Type: application/json" \ + -H "X-Scope-OrgID: anonymous" \ + -d '{"resourceMetrics":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"startup-test"}}]},"scopeMetrics":[{"metrics":[{"name":"startup_test","sum":{"dataPoints":[{"asDouble":1,"timeUnixNano":"1234567890000000000"}],"aggregationTemporality":2,"isMonotonic":true}}]}]}]}') + if [ "$test_response" = "200" ]; then + otlp_ready=true + else + otlp_attempts=$((otlp_attempts + 1)) + printf "\r Attempt %d/%d (HTTP %s)..." "$otlp_attempts" "$otlp_max_attempts" "$test_response" + sleep 2 + fi + done + if [ "$otlp_ready" = false ]; then + echo "" + echo "Warning: OTLP endpoint may not be fully ready" + else + echo "OTLP ingestion is ready." + fi + + echo "" + echo "Stack is ready!" + echo "" +} + +stop_stack() { + echo "" + echo "Stopping docker-compose stack..." + cd "${COMPOSE_DIR}" + ./compose-down.sh +} + +# Register cleanup trap if --stop-stack is set +if [ "$STOP_STACK" = true ]; then + trap stop_stack EXIT +fi + +# Start stack if requested +if [ "$START_STACK" = true ]; then + start_stack +fi + +# ANSI color codes +BOLD='\033[1m' +RESET='\033[0m' +GRAY='\033[37m' +RED='\033[31m' +GREEN='\033[32m' +YELLOW='\033[33m' +CYAN='\033[36m' +MAGENTA='\033[35m' + +# Mimir endpoint (nginx gateway for ingest storage) +MIMIR_URL="${MIMIR_URL:-http://localhost:8080}" +OTLP_ENDPOINT="${MIMIR_URL}/otlp/v1/metrics" +RESOURCES_ENDPOINT="${MIMIR_URL}/prometheus/api/v1/resources" +QUERY_ENDPOINT="${MIMIR_URL}/prometheus/api/v1/query" + +echo -e "${BOLD}${CYAN}=== Grafana Mimir OTel Resource Attributes Persistence Demo ===${RESET}" +echo -e "${CYAN}=== (Ingest Storage Architecture) ===${RESET}\n" + +# Check if Mimir is running (nginx gateway returns 200 at /) +echo -e "${GRAY}Checking Mimir connectivity...${RESET}" +if ! curl -s "${MIMIR_URL}/" > /dev/null 2>&1; then + echo -e "${RED}Error: Mimir is not reachable at ${MIMIR_URL}${RESET}" + echo -e "${YELLOW}Please start Mimir first: ./compose-up.sh${RESET}" + exit 1 +fi +echo -e "${GREEN}Mimir is ready at ${MIMIR_URL}${RESET}\n" + +print_phase() { + echo -e "\n${BOLD}${MAGENTA}--- Phase $1: $2 ---${RESET}\n" +} + +# Function to send OTLP metrics +send_otlp_metrics() { + local payload="$1" + local description="$2" + + response=$(curl -s -w "\n%{http_code}" -X POST "${OTLP_ENDPOINT}" \ + -H "Content-Type: application/json" \ + -H "X-Scope-OrgID: anonymous" \ + -d "$payload") + + http_code=$(echo "$response" | tail -n1) + + if [ "$http_code" -eq 200 ]; then + echo -e "${GREEN}Sent: ${description}${RESET}" + else + echo -e "${RED}Failed to send metrics (HTTP ${http_code}): ${description}${RESET}" + echo "$response" | sed '$d' + return 1 + fi +} + +# Function to query resource attributes +query_resources() { + local match="$1" + local description="$2" + + echo -e "${BOLD}${description}${RESET}" + + # URL encode the match parameter + encoded_match=$(printf '%s' "$match" | jq -sRr @uri) + + response=$(curl -s -H "X-Scope-OrgID: anonymous" "${RESOURCES_ENDPOINT}?match[]=${encoded_match}") + + # Check if response is valid JSON + if ! echo "$response" | jq -e . > /dev/null 2>&1; then + echo -e "${RED}Invalid JSON response${RESET}" + echo "$response" + return 1 + fi + + # Check status + status=$(echo "$response" | jq -r '.status') + if [ "$status" != "success" ]; then + echo -e "${RED}Query failed: $(echo "$response" | jq -r '.error // "unknown error"')${RESET}" + return 1 + fi + + # Pretty print the response + echo "$response" | jq -C '.data.series[] | { + labels: .labels, + versions: [.versions[] | { + identifying: .identifying, + descriptive: .descriptive, + entities: .entities, + minTimeMs: .minTimeMs, + maxTimeMs: .maxTimeMs + }] + }' 2>/dev/null || echo "$response" | jq -C '.' + + echo "" +} + +# Function to execute PromQL instant query +query_promql() { + local query="$1" + local time="$2" + local description="$3" + + echo -e "${BOLD}${description}${RESET}" + + # URL encode the query + encoded_query=$(printf '%s' "$query" | jq -sRr @uri) + + if [ -n "$time" ]; then + response=$(curl -s -H "X-Scope-OrgID: anonymous" "${QUERY_ENDPOINT}?query=${encoded_query}&time=${time}") + else + response=$(curl -s -H "X-Scope-OrgID: anonymous" "${QUERY_ENDPOINT}?query=${encoded_query}") + fi + + # Check if response is valid JSON + if ! echo "$response" | jq -e . > /dev/null 2>&1; then + echo -e "${RED}Invalid JSON response${RESET}" + echo "$response" + return 1 + fi + + # Check status + status=$(echo "$response" | jq -r '.status') + if [ "$status" != "success" ]; then + echo -e "${RED}Query failed: $(echo "$response" | jq -r '.error // "unknown error"')${RESET}" + return 1 + fi + + # Pretty print the result + echo "$response" | jq -C '.data.result[] | {metric: .metric, value: .value[1]}' 2>/dev/null || echo "$response" | jq -C '.data' + + echo "" +} + +# Get current timestamp in milliseconds +now_ms() { + echo $(($(date +%s) * 1000)) +} + +# Get current timestamp in seconds (for PromQL) +now_sec() { + date +%s +} + +# Store timestamps for later info() queries +ORIGINAL_TIMESTAMP_SEC=$(now_sec) + +# === PHASE 1: Send OTLP metrics with resource attributes === +print_phase 1 "Sending OTLP metrics with resource attributes" + +echo -e "${GRAY}Sending metrics from multiple services with diverse resource attributes...${RESET}\n" + +TIMESTAMP=$(now_ms) + +# Resource 1: payment-service in production (with entity_refs) +# Entity refs define service entity + host entity with identifying/descriptive key assignments +PAYLOAD1=$(cat < ${YELLOW}Kafka${RESET} -> ${MAGENTA}Block Builder${RESET} -> ${GREEN}Object Storage${RESET}" +echo -e "" +echo -e "${GRAY}Block builder scheduler configuration:${RESET}" +echo -e " - Scheduling interval: 30s (checks for work every 30s)" +echo -e " - Job size: 1m (processes 1 minute of Kafka data per job)" +echo -e "" +echo -e "${YELLOW}Waiting ~90 seconds for block builder to process data...${RESET}" +echo -e "${GRAY}(This simulates real-world behavior where blocks are created automatically)${RESET}\n" + +# Wait with progress indicator +WAIT_TIME=90 +for i in $(seq 1 $WAIT_TIME); do + printf "\r${GRAY}Progress: [%-50s] %d/%d seconds${RESET}" "$(printf '#%.0s' $(seq 1 $((i * 50 / WAIT_TIME))))" "$i" "$WAIT_TIME" + sleep 1 +done +echo -e "\n" + +echo -e "${GREEN}Block builder should have created blocks with series_metadata.parquet files${RESET}" + +# === PHASE 4: Query from blocks === +print_phase 4 "Querying resource attributes from persisted blocks" + +echo -e "${GRAY}Now querying will include data from store-gateways (blocks in object storage).${RESET}\n" + +query_resources '{__name__=~".+"}' "Resource attributes (from both ingesters and blocks):" + +# === PHASE 5: Demonstrate descriptive attributes changing over time === +print_phase 5 "Descriptive attributes changing over time" + +echo -e "${BOLD}Scenario:${RESET} payment-service is migrated to a new host in a different region." +echo -e "The ${CYAN}identifying${RESET} attributes (service.name, service.namespace, service.instance.id) stay the same," +echo -e "but the ${YELLOW}descriptive${RESET} attributes (host.name, cloud.region) change.\n" + +# Wait so timestamps visibly differ +sleep 2 + +TIMESTAMP2=$(now_ms) +MIGRATED_TIMESTAMP_SEC=$(now_sec) + +# Send metrics with changed descriptive attributes (same identifying attributes) +# Also includes entity_refs with updated descriptive key assignments +PAYLOAD_MIGRATED=$(cat < ${GREEN}prod-payment-2.example.com${RESET}" +echo -e " ${YELLOW}cloud.region${RESET}: us-west-2 -> ${GREEN}eu-west-1${RESET}" +echo -e " ${YELLOW}k8s.pod.name${RESET}: (new) ${GREEN}payment-7d4f8b9c5-xk2pq${RESET}" + +# Show the version history +echo -e "\n${BOLD}Version history for production/payment-service:${RESET}" +echo -e "${GRAY}(Original version in block, new version in ingester)${RESET}\n" + +sleep 1 + +query_resources '{service_name="payment-service",service_namespace="production"}' "production/payment-service resource attribute versions:" + +# === PHASE 6: Demonstrate info() function with time-varying attributes === +print_phase 6 "Querying with info() to include resource attributes" + +echo -e "The ${BOLD}info()${RESET} function enriches metrics with resource attributes at query time." +echo -e "When descriptive attributes change over time, info() returns the values" +echo -e "that were active at the requested timestamp.\n" + +QUERY='sum by (method, status, "cloud.region", "host.name") (info(http_requests_total{method="GET",status="200"}))' +echo -e "${BOLD}Query:${RESET} ${QUERY}\n" + +# Query at original timestamp (before migration) +echo -e "${BOLD}At timestamp BEFORE migration (${ORIGINAL_TIMESTAMP_SEC}):${RESET}" +query_promql "$QUERY" "$ORIGINAL_TIMESTAMP_SEC" "" + +# Query at migrated timestamp (after migration) +echo -e "${BOLD}At timestamp AFTER migration (${MIGRATED_TIMESTAMP_SEC}):${RESET}" +query_promql "$QUERY" "$MIGRATED_TIMESTAMP_SEC" "" + +echo -e "${CYAN}This enables time-accurate correlation of metrics with OTel traces/logs," +echo -e "even when infrastructure changes occur during the query time range.${RESET}\n" + +# === PHASE 7: Show API response format === +print_phase 7 "API response format for /api/v1/resources" + +echo -e "${BOLD}API Response (/api/v1/resources):${RESET}" +echo -e "${GRAY}Full response format showing labels, versions with identifying/descriptive attributes, and entities:${RESET}\n" + +# Query and show full response format +encoded_match=$(printf '%s' '{service_name="payment-service"}' | jq -sRr @uri) +curl -s -H "X-Scope-OrgID: anonymous" "${RESOURCES_ENDPOINT}?match[]=${encoded_match}" | jq -C '.' + +echo "" + +# === Summary === +print_phase 8 "Summary" + +echo -e "${BOLD}This demo showed how Grafana Mimir persists OTel resource attributes" +echo -e "in the ingest storage architecture:${RESET}" +echo -e "" +echo -e " ${GREEN}1.${RESET} Resource attributes arrive via OTLP metrics (service.name, etc.)" +echo -e " ${GREEN}2.${RESET} Distributor writes metrics to ${YELLOW}Kafka${RESET}" +echo -e " ${GREEN}3.${RESET} Ingesters consume from Kafka and store attributes in-memory" +echo -e " ${GREEN}4.${RESET} ${MAGENTA}Block builder${RESET} processes Kafka data and creates blocks (~30-90s)" +echo -e " ${GREEN}5.${RESET} Blocks with series_metadata.parquet are stored in object storage" +echo -e " ${GREEN}6.${RESET} ${CYAN}Identifying${RESET} attributes (service.name, etc.) remain constant for a series" +echo -e " ${GREEN}7.${RESET} ${YELLOW}Descriptive${RESET} attributes (host.name, cloud.region) can change over time" +echo -e " ${GREEN}8.${RESET} The ${BOLD}info()${RESET} function enriches queries with time-appropriate attributes" +echo "" +echo -e "${BOLD}Ingest Storage Architecture:${RESET}" +echo -e " ${CYAN}Distributor${RESET} -> ${YELLOW}Kafka${RESET} -> ${MAGENTA}Block Builder${RESET} -> ${GREEN}Object Storage${RESET}" +echo -e " |" +echo -e " v" +echo -e " ${CYAN}Ingesters${RESET} (for real-time queries)" +echo "" +echo -e "${CYAN}This enables correlation of Prometheus metrics with OTel traces/logs" +echo -e "using the identifying resource attributes (service.name, etc.)." +echo -e "The version history allows tracking infrastructure changes over time.${RESET}" +echo "" +echo -e "${BOLD}Endpoints used:${RESET}" +echo -e " ${GRAY}OTLP ingest:${RESET} ${OTLP_ENDPOINT}" +echo -e " ${GRAY}Resource API:${RESET} ${RESOURCES_ENDPOINT}" +echo -e " ${GRAY}Query API:${RESET} ${QUERY_ENDPOINT}" diff --git a/development/mimir-monolithic-mode/config/mimir.yaml b/development/mimir-monolithic-mode/config/mimir.yaml index ef730e3b9da..f7083aaccb3 100644 --- a/development/mimir-monolithic-mode/config/mimir.yaml +++ b/development/mimir-monolithic-mode/config/mimir.yaml @@ -35,6 +35,9 @@ store_gateway: consul: host: consul:8500 +querier: + query_store_after: 0s # Query store-gateway for all time ranges (default 12h) + blocks_storage: backend: s3 @@ -46,6 +49,7 @@ blocks_storage: bucket_store: sync_dir: /data/mimir-tsdb-querier + sync_interval: 1m # Faster sync for demo/development (default is 15m) s3: endpoint: minio:9000 @@ -75,6 +79,7 @@ limits: native_histograms_ingestion_enabled: true max_global_exemplars_per_user: 100000 otel_metric_suffixes_enabled: true + otel_persist_resource_attributes: true runtime_config: file: ./config/runtime.yaml diff --git a/development/mimir-monolithic-mode/config/runtime.yaml b/development/mimir-monolithic-mode/config/runtime.yaml index 9374fcb0233..fbfbeb1a4d8 100644 --- a/development/mimir-monolithic-mode/config/runtime.yaml +++ b/development/mimir-monolithic-mode/config/runtime.yaml @@ -2,3 +2,14 @@ overrides: anonymous: otel_keep_identifying_resource_attributes: true + # Enable resource attribute persistence for the OTLP resource attributes demo. + # This allows resource attributes from OTLP metrics to be stored per-series + # and queried via /api/v1/resources endpoint. + otel_persist_resource_attributes: true + # Enable experimental PromQL functions for the info() function demo. + # The info() function enriches metrics with resource attributes at query time. + enabled_promql_experimental_functions: all + demo: + otel_keep_identifying_resource_attributes: true + otel_persist_resource_attributes: true + enabled_promql_experimental_functions: all diff --git a/development/mimir-monolithic-mode/scripts/README.md b/development/mimir-monolithic-mode/scripts/README.md new file mode 100644 index 00000000000..690e6373550 --- /dev/null +++ b/development/mimir-monolithic-mode/scripts/README.md @@ -0,0 +1,162 @@ +# Development Scripts + +This directory contains demo and utility scripts for the monolithic mode development environment. + +## OTLP Resource Attributes Demo + +The `otlp-resource-attrs-demo.sh` script demonstrates how Mimir persists OTel resource attributes from OTLP metrics and makes them queryable via the `/api/v1/resources` endpoint and the `info()` PromQL function. + +This is a faithful port of the Prometheus demo at `documentation/examples/otlp-resource-attributes/main.go`. + +### Overview + +When Mimir receives metrics via OTLP, each resource contains attributes that describe the source of the metrics (service.name, host.name, etc.). This demo shows how these attributes are: + +1. Ingested from OTLP metrics +2. Stored per-series in the ingester's TSDB head (in-memory) +3. Persisted to Parquet files during block compaction +4. Retrieved from both ingesters and store-gateways +5. Exposed via the `/api/v1/resources` API +6. Enriched into PromQL queries via the `info()` function + +### Prerequisites + +- Mimir running in monolithic mode +- `curl` and `jq` installed + +### Running the Demo + +1. Start Mimir: + + ```bash + cd development/mimir-monolithic-mode + ./compose-up.sh + ``` + +2. Wait for Mimir to be ready (check http://localhost:8101/ready) + +3. Run the demo: + ```bash + ./scripts/otlp-resource-attrs-demo.sh + ``` + +### Configuration + +The demo requires `otel_persist_resource_attributes: true` to be set in the runtime configuration. This is already configured in `config/runtime.yaml` for the anonymous tenant. + +### Demo Phases + +1. **Send OTLP Metrics**: Sends metrics from multiple services with diverse resource attributes and entity_refs +2. **Query from Head**: Shows resource attributes stored in-memory in the ingester +3. **Compact to Disk**: Triggers ingester flush to persist data to Parquet block files +4. **Query from Blocks**: Shows resource attributes retrieved from store-gateways +5. **Descriptive Attributes Changing**: Demonstrates how non-identifying (descriptive) attributes can change over time while identifying attributes remain constant +6. **Query with info()**: Shows how the `info()` function enriches metrics with time-appropriate resource attributes +7. **API Response Format**: Displays the full `/api/v1/resources` JSON response structure +8. **Summary**: Summarizes the key concepts demonstrated + +### Resource Attributes + +The demo uses these OTel resource attributes: + +**Identifying Attributes** (constant for a series, used for correlation): + +- `service.name` - The logical name of the service +- `service.namespace` - The namespace/environment +- `service.instance.id` - Unique instance identifier + +These attributes uniquely identify the resource and remain constant throughout the lifetime of a series. They enable correlation with traces and logs. + +**Descriptive Attributes** (can change over time): + +- `host.name` - Hostname of the service (can change during migration) +- `cloud.region` - Cloud provider region (can change during migration) +- `deployment.environment` - Deployment environment +- `k8s.pod.name` - Kubernetes pod name (changes on pod restart) + +These attributes describe the current state of the resource and may change over time as infrastructure evolves (e.g., during migrations, scaling, restarts). + +### Entity Refs + +The demo demonstrates OTel entity_refs which structure resources into typed entities: + +```json +{ + "resource": { + "attributes": [...], + "entityRefs": [ + { + "type": "service", + "idKeys": ["service.name", "service.namespace", "service.instance.id"], + "descriptionKeys": ["deployment.environment"] + }, + { + "type": "host", + "idKeys": ["host.name"], + "descriptionKeys": ["cloud.region"] + } + ] + } +} +``` + +### Architecture + +``` +OTLP Metrics TSDB Head Parquet Block +┌─────────────────┐ ┌────────────┐ ┌────────────┐ +│ ResourceMetrics │ ──────► │ In-memory │ ──────► │ series_ │ +│ └─ Resource │ Ingest │ storage │ Compact │ metadata. │ +│ └─ Attrs │ │ │ │ parquet │ +│ └─ Entities│ │ │ │ │ +└─────────────────┘ └────────────┘ └────────────┘ + │ │ + ▼ ▼ + ┌─────────────────────────────────┐ + │ /api/v1/resources │ + │ (combined head + blocks) │ + └─────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────┐ + │ info() PromQL function │ + │ (enriches metrics at query time)│ + └─────────────────────────────────┘ +``` + +### API Reference + +Query resource attributes: + +```bash +curl 'http://localhost:8101/prometheus/api/v1/resources?match[]={__name__=~".+"}' +``` + +Query with info() function: + +```bash +curl 'http://localhost:8101/prometheus/api/v1/query?query=info(http_requests_total)&time=1234567890' +``` + +Send OTLP metrics with entity_refs: + +```bash +curl -X POST 'http://localhost:8101/otlp/v1/metrics' \ + -H 'Content-Type: application/json' \ + -d '{ + "resourceMetrics": [{ + "resource": { + "attributes": [...], + "entityRefs": [...] + }, + "scopeMetrics": [...] + }] + }' +``` + +### Use Cases + +- **Trace-to-Metrics Correlation**: Use service.name, service.namespace, and service.instance.id to correlate metrics with distributed traces +- **Resource Discovery**: Query what resources have reported metrics +- **Historical Analysis**: Understand which services were active during time ranges +- **Infrastructure Tracking**: Track changes in descriptive attributes (host migrations, region changes) over time diff --git a/development/mimir-monolithic-mode/scripts/otlp-resource-attrs-demo.sh b/development/mimir-monolithic-mode/scripts/otlp-resource-attrs-demo.sh new file mode 100755 index 00000000000..be94aefdc79 --- /dev/null +++ b/development/mimir-monolithic-mode/scripts/otlp-resource-attrs-demo.sh @@ -0,0 +1,686 @@ +#!/bin/bash +# SPDX-License-Identifier: AGPL-3.0-only + +# OTLP Resource & Scope Attributes Persistence Demo for Grafana Mimir +# +# This demo showcases how Mimir persists OTel resource attributes and scope +# (instrumentation library) attributes from OTLP metrics, and makes resource +# attributes queryable via the /api/v1/resources endpoint and info() function. +# +# This is a faithful port of the Prometheus demo at: +# documentation/examples/otlp-resource-attributes/main.go +# +# Prerequisites: +# - curl and jq installed +# - Either: Mimir running in monolithic mode (./compose-up.sh) +# - Or: Use --start-stack to automatically start the stack +# +# Usage: +# ./scripts/otlp-resource-attrs-demo.sh [--start-stack] [--stop-stack] +# +# Options: +# --start-stack Start the docker-compose stack before running the demo +# --stop-stack Stop the docker-compose stack after the demo completes +# --help, -h Show this help message + +set -e + +# Script directory for locating compose scripts +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPOSE_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# Flags +START_STACK=false +STOP_STACK=false + +show_usage() { + cat </dev/null) || true + if [ "$http_code" = "200" ]; then + echo -e "${service_name} is ready." + return 0 + fi + attempt=$((attempt + 1)) + printf "\r Attempt %d/%d..." "$attempt" "$max_attempts" + sleep 2 + done + echo "" + echo "Error: ${service_name} did not become ready within $((max_attempts * 2)) seconds" + return 1 +} + +start_stack() { + echo "Starting docker-compose stack..." + cd "${COMPOSE_DIR}" + ./compose-up.sh -d + + echo "" + wait_for_service "http://localhost:8101/ready" "Mimir" + + # Wait for OTLP ingestion to actually work (ring needs to be formed) + echo "" + echo "Waiting for OTLP ingestion to be ready..." + local otlp_ready=false + local otlp_attempts=0 + local otlp_max_attempts=30 + while [ "$otlp_ready" = false ] && [ $otlp_attempts -lt $otlp_max_attempts ]; do + # Send a test metric and check for 200 response + local test_response + test_response=$(curl -s -w "%{http_code}" -o /dev/null -X POST "http://localhost:8101/otlp/v1/metrics" \ + -H "Content-Type: application/json" \ + -d '{"resourceMetrics":[{"resource":{"attributes":[{"key":"service.name","value":{"stringValue":"startup-test"}}]},"scopeMetrics":[{"metrics":[{"name":"startup_test","sum":{"dataPoints":[{"asDouble":1,"timeUnixNano":"1234567890000000000"}],"aggregationTemporality":2,"isMonotonic":true}}]}]}]}') + if [ "$test_response" = "200" ]; then + otlp_ready=true + else + otlp_attempts=$((otlp_attempts + 1)) + printf "\r Attempt %d/%d (HTTP %s)..." "$otlp_attempts" "$otlp_max_attempts" "$test_response" + sleep 2 + fi + done + if [ "$otlp_ready" = false ]; then + echo "" + echo "Warning: OTLP endpoint may not be fully ready" + else + echo "OTLP ingestion is ready." + fi + + echo "" + echo "Stack is ready!" + echo "" +} + +stop_stack() { + echo "" + echo "Stopping docker-compose stack..." + cd "${COMPOSE_DIR}" + ./compose-down.sh +} + +# Register cleanup trap if --stop-stack is set +if [ "$STOP_STACK" = true ]; then + trap stop_stack EXIT +fi + +# Start stack if requested +if [ "$START_STACK" = true ]; then + start_stack +fi + +# ANSI color codes +BOLD='\033[1m' +RESET='\033[0m' +GRAY='\033[37m' +RED='\033[31m' +GREEN='\033[32m' +YELLOW='\033[33m' +CYAN='\033[36m' +MAGENTA='\033[35m' + +# Mimir endpoint +MIMIR_URL="${MIMIR_URL:-http://localhost:8101}" +OTLP_ENDPOINT="${MIMIR_URL}/otlp/v1/metrics" +RESOURCES_ENDPOINT="${MIMIR_URL}/prometheus/api/v1/resources" +QUERY_ENDPOINT="${MIMIR_URL}/prometheus/api/v1/query" +FLUSH_ENDPOINT="${MIMIR_URL}/ingester/flush" + +echo -e "${BOLD}${CYAN}=== Grafana Mimir OTel Resource Attributes Persistence Demo ===${RESET}\n" + +# Check if Mimir is running +echo -e "${GRAY}Checking Mimir connectivity...${RESET}" +if ! curl -s "${MIMIR_URL}/ready" > /dev/null 2>&1; then + echo -e "${RED}Error: Mimir is not reachable at ${MIMIR_URL}${RESET}" + echo -e "${YELLOW}Please start Mimir first: ./compose-up.sh${RESET}" + exit 1 +fi +echo -e "${GREEN}Mimir is ready at ${MIMIR_URL}${RESET}\n" + +print_phase() { + echo -e "\n${BOLD}${MAGENTA}--- Phase $1: $2 ---${RESET}\n" +} + +# Function to send OTLP metrics +send_otlp_metrics() { + local payload="$1" + local description="$2" + + response=$(curl -s -w "\n%{http_code}" -X POST "${OTLP_ENDPOINT}" \ + -H "Content-Type: application/json" \ + -d "$payload") + + http_code=$(echo "$response" | tail -n1) + + if [ "$http_code" -eq 200 ]; then + echo -e "${GREEN}Sent: ${description}${RESET}" + else + echo -e "${RED}Failed to send metrics (HTTP ${http_code}): ${description}${RESET}" + echo "$response" | sed '$d' + return 1 + fi +} + +# Function to query resource attributes +query_resources() { + local match="$1" + local description="$2" + + echo -e "${BOLD}${description}${RESET}" + + # URL encode the match parameter + encoded_match=$(printf '%s' "$match" | jq -sRr @uri) + + response=$(curl -s "${RESOURCES_ENDPOINT}?match[]=${encoded_match}") + + # Check if response is valid JSON + if ! echo "$response" | jq -e . > /dev/null 2>&1; then + echo -e "${RED}Invalid JSON response${RESET}" + echo "$response" + return 1 + fi + + # Check status + status=$(echo "$response" | jq -r '.status') + if [ "$status" != "success" ]; then + echo -e "${RED}Query failed: $(echo "$response" | jq -r '.error // "unknown error"')${RESET}" + return 1 + fi + + # Pretty print the response + echo "$response" | jq -C '.data.series[] | { + labels: .labels, + versions: [.versions[] | { + identifying: .identifying, + descriptive: .descriptive, + entities: .entities, + minTimeMs: .minTimeMs, + maxTimeMs: .maxTimeMs + }] + }' 2>/dev/null || echo "$response" | jq -C '.' + + echo "" +} + +# Function to execute PromQL instant query +query_promql() { + local query="$1" + local time="$2" + local description="$3" + + echo -e "${BOLD}${description}${RESET}" + + # URL encode the query + encoded_query=$(printf '%s' "$query" | jq -sRr @uri) + + if [ -n "$time" ]; then + response=$(curl -s "${QUERY_ENDPOINT}?query=${encoded_query}&time=${time}") + else + response=$(curl -s "${QUERY_ENDPOINT}?query=${encoded_query}") + fi + + # Check if response is valid JSON + if ! echo "$response" | jq -e . > /dev/null 2>&1; then + echo -e "${RED}Invalid JSON response${RESET}" + echo "$response" + return 1 + fi + + # Check status + status=$(echo "$response" | jq -r '.status') + if [ "$status" != "success" ]; then + echo -e "${RED}Query failed: $(echo "$response" | jq -r '.error // "unknown error"')${RESET}" + return 1 + fi + + # Pretty print the result + echo "$response" | jq -C '.data.result[] | {metric: .metric, value: .value[1]}' 2>/dev/null || echo "$response" | jq -C '.data' + + echo "" +} + +# Get current timestamp in milliseconds +now_ms() { + echo $(($(date +%s) * 1000)) +} + +# Get current timestamp in seconds (for PromQL) +now_sec() { + date +%s +} + +# Store timestamps for later info() queries +ORIGINAL_TIMESTAMP_SEC=$(now_sec) + +# === PHASE 1: Send OTLP metrics with resource attributes === +print_phase 1 "Sending OTLP metrics with resource attributes" + +echo -e "${GRAY}Sending metrics from multiple services with diverse resource attributes...${RESET}\n" + +TIMESTAMP=$(now_ms) + +# Resource 1: payment-service in production (with entity_refs) +# Entity refs define service entity + host entity with identifying/descriptive key assignments +PAYLOAD1=$(cat < /dev/null 2>&1 + +# Wait for Mimir to be ready again +wait_for_service "http://localhost:8101/ready" "Mimir (after restart)" + +# Wait for ingester ring to be fully formed (ready endpoint can return 200 before queries work) +echo "Waiting for ingester to accept queries..." +ingester_ready=false +for i in $(seq 1 30); do + test_query=$(curl -s -o /dev/null -w "%{http_code}" "${RESOURCES_ENDPOINT}?match[]=%7B__name__%3D~%22.%2B%22%7D" 2>/dev/null) || true + if [ "$test_query" = "200" ]; then + ingester_ready=true + break + fi + printf "\r Attempt %d/30 (HTTP %s)..." "$i" "$test_query" + sleep 2 +done +if [ "$ingester_ready" = true ]; then + echo "Ingester is ready." +else + echo "Warning: Ingester may not be fully ready" +fi +echo "" + +# Query again to verify data survived WAL replay +echo -e "${BOLD}Querying after WAL replay:${RESET}\n" +query_resources '{__name__=~".+"}' "Resource attributes after WAL replay:" + +echo -e "\n${GREEN}Resource attributes survived WAL replay!${RESET}" + +# === PHASE 4: Flush to blocks === +print_phase 4 "Compacting TSDB head to persist resource attributes to disk" + +echo -e "${GRAY}Triggering ingester flush to create blocks with series_metadata.parquet...${RESET}\n" + +flush_response=$(curl -s -X POST "${FLUSH_ENDPOINT}") +echo -e "${GREEN}Flush triggered${RESET}" + +# Wait for flush to complete +echo -e "${GRAY}Waiting for flush to complete...${RESET}" +sleep 5 + +echo -e "\n${GREEN}Blocks should now contain resource attributes in series_metadata.parquet files${RESET}" + +# === PHASE 5: Query from blocks === +print_phase 5 "Querying resource attributes from persisted blocks" + +echo -e "${GRAY}Now querying will also include data from store-gateways (if blocks are available).${RESET}\n" + +query_resources '{__name__=~".+"}' "Resource attributes (from both head and blocks):" + +# === PHASE 6: Demonstrate descriptive attributes changing over time === +print_phase 6 "Descriptive attributes changing over time" + +echo -e "${BOLD}Scenario:${RESET} payment-service is migrated to a new host in a different region." +echo -e "The ${CYAN}identifying${RESET} attributes (service.name, service.namespace, service.instance.id) stay the same," +echo -e "but the ${YELLOW}descriptive${RESET} attributes (host.name, cloud.region) change." +echo -e "The instrumentation library is also upgraded: scope version ${BOLD}1.2.0 -> 1.3.0${RESET}.\n" + +# Wait so timestamps visibly differ +sleep 2 + +TIMESTAMP2=$(now_ms) +MIGRATED_TIMESTAMP_SEC=$(now_sec) + +# Send metrics with changed descriptive attributes (same identifying attributes) +# Also includes entity_refs with updated descriptive key assignments +PAYLOAD_MIGRATED=$(cat < ${GREEN}prod-payment-2.example.com${RESET}" +echo -e " ${YELLOW}cloud.region${RESET}: us-west-2 -> ${GREEN}eu-west-1${RESET}" +echo -e " ${YELLOW}k8s.pod.name${RESET}: (new) ${GREEN}payment-7d4f8b9c5-xk2pq${RESET}" +echo -e " ${YELLOW}scope version${RESET}: 1.2.0 -> ${GREEN}1.3.0${RESET} (library upgraded during migration)" + +# Show the version history +echo -e "\n${BOLD}Version history for production/payment-service:${RESET}" +echo -e "${GRAY}(Original version in block, new version in head)${RESET}\n" + +sleep 1 + +query_resources '{job="production/payment-service"}' "production/payment-service resource attribute versions:" + +# === PHASE 7: Demonstrate info() function with time-varying attributes === +print_phase 7 "Querying with info() to include resource attributes" + +echo -e "The ${BOLD}info()${RESET} function enriches metrics with resource attributes at query time." +echo -e "When descriptive attributes change over time, info() returns the values" +echo -e "that were active at the requested timestamp.\n" + +QUERY='sum by (method, status, "cloud.region", "host.name") (info(http_requests_total{method="GET",status="200"}))' +echo -e "${BOLD}Query:${RESET} ${QUERY}\n" + +# Query at original timestamp (before migration) +echo -e "${BOLD}At timestamp BEFORE migration (${ORIGINAL_TIMESTAMP_SEC}):${RESET}" +query_promql "$QUERY" "$ORIGINAL_TIMESTAMP_SEC" "" + +# Query at migrated timestamp (after migration) +echo -e "${BOLD}At timestamp AFTER migration (${MIGRATED_TIMESTAMP_SEC}):${RESET}" +query_promql "$QUERY" "$MIGRATED_TIMESTAMP_SEC" "" + +echo -e "${CYAN}This enables time-accurate correlation of metrics with OTel traces/logs," +echo -e "even when infrastructure changes occur during the query time range.${RESET}\n" + +# === PHASE 8: Show API response format === +print_phase 8 "API response format for /api/v1/resources" + +echo -e "${BOLD}API Response (/api/v1/resources):${RESET}" +echo -e "${GRAY}Full response format showing labels, versions with identifying/descriptive attributes, and entities:${RESET}\n" + +# Query and show full response format +encoded_match=$(printf '%s' '{job=~".*payment-service.*"}' | jq -sRr @uri) +curl -s "${RESOURCES_ENDPOINT}?match[]=${encoded_match}" | jq -C '.' + +echo "" + +# === Summary === +print_phase 9 "Summary" + +echo -e "${BOLD}This demo showed how Grafana Mimir persists OTel resource and scope attributes:${RESET}" +echo -e " ${GREEN}1.${RESET} Resource attributes arrive via OTLP metrics (service.name, etc.)" +echo -e " ${GREEN}2.${RESET} Attributes are stored per-series in ingester's TSDB head (in-memory)" +echo -e " ${GREEN}3.${RESET} Attributes survive ingester restart via WAL replay" +echo -e " ${GREEN}4.${RESET} When blocks are flushed, attributes are persisted to series_metadata.parquet" +echo -e " ${GREEN}5.${RESET} ${CYAN}Identifying${RESET} attributes (service.name, etc.) remain constant for a series" +echo -e " ${GREEN}6.${RESET} ${YELLOW}Descriptive${RESET} attributes (host.name, cloud.region) can change over time" +echo -e " ${GREEN}7.${RESET} ${MAGENTA}Versioned storage${RESET} preserves attribute history with time ranges" +echo -e " ${GREEN}8.${RESET} Each version tracks when specific attributes were active (MinTime/MaxTime)" +echo -e " ${GREEN}9.${RESET} The ${BOLD}info()${RESET} function enriches queries with time-appropriate attributes" +echo -e " ${GREEN}10.${RESET} ${CYAN}Scope attributes${RESET} (library name, version, custom attrs) are persisted per-series" +echo "" +echo -e "${CYAN}This enables correlation of Prometheus metrics with OTel traces/logs" +echo -e "using the identifying resource attributes (service.name, etc.)." +echo -e "Scope attributes track which instrumentation library produced the metrics," +echo -e "including version changes across deployments.${RESET}" +echo "" +echo -e "${BOLD}Endpoints used:${RESET}" +echo -e " ${GRAY}OTLP ingest:${RESET} ${OTLP_ENDPOINT}" +echo -e " ${GRAY}Resource API:${RESET} ${RESOURCES_ENDPOINT}" +echo -e " ${GRAY}Query API:${RESET} ${QUERY_ENDPOINT}" +echo -e " ${GRAY}Flush:${RESET} ${FLUSH_ENDPOINT}" diff --git a/docs/sources/mimir/configure/configuration-parameters/index.md b/docs/sources/mimir/configure/configuration-parameters/index.md index 01177bdfa40..73b3f5043c8 100644 --- a/docs/sources/mimir/configure/configuration-parameters/index.md +++ b/docs/sources/mimir/configure/configuration-parameters/index.md @@ -5076,6 +5076,13 @@ ruler_alertmanager_client_config: # CLI flag: -distributor.otel-label-name-preserve-underscores [otel_label_name_preserve_multiple_underscores: | default = true] +# (experimental) Whether to persist OTel resource attributes per time series as +# metadata in Prometheus TSDB blocks. Resource attributes are stored in +# series_metadata.parquet files within blocks and can be queried via the +# /api/v1/resource_attributes endpoint. +# CLI flag: -distributor.otel-persist-resource-attributes +[otel_persist_resource_attributes: | default = false] + # (experimental) The default consistency level to enforce for queries when using # the ingest storage. Supports values: strong, eventual. # CLI flag: -ingest-storage.read-consistency diff --git a/go.mod b/go.mod index ce86c70769c..3ffec2f553c 100644 --- a/go.mod +++ b/go.mod @@ -115,6 +115,7 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.2.1 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/at-wat/mqtt-go v0.19.6 // indirect github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect @@ -176,8 +177,11 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 // indirect + github.com/parquet-go/bitpack v1.0.0 // indirect + github.com/parquet-go/jsonlite v1.0.0 // indirect + github.com/parquet-go/parquet-go v0.26.3 // indirect github.com/pb33f/jsonpath v0.7.1 // indirect - github.com/pb33f/libopenapi v0.33.4 // indirect + github.com/pb33f/libopenapi v0.33.5 // indirect github.com/pb33f/libopenapi-validator v0.11.1 // indirect github.com/pb33f/ordered-map/v2 v2.3.0 // indirect github.com/philhofer/fwd v1.2.0 // indirect @@ -204,6 +208,7 @@ require ( go.opentelemetry.io/collector/consumer v1.51.0 // indirect go.opentelemetry.io/collector/featuregate v1.52.0 // indirect go.opentelemetry.io/collector/internal/componentalias v0.145.0 // indirect + go.opentelemetry.io/collector/pdata/xpdata v0.142.0 // indirect go.opentelemetry.io/collector/pipeline v1.51.0 // indirect go.opentelemetry.io/collector/processor v1.51.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 // indirect @@ -361,7 +366,7 @@ require ( sigs.k8s.io/yaml v1.6.0 // indirect ) -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20260225105904-7c22e95a1b6f +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20260303092349-c93a08fb3844 // Replace memberlist with our fork which includes some changes that haven't been // merged upstream yet for years and we don't expect to change anytime soon. diff --git a/go.sum b/go.sum index 3badef597e2..b33c68809bd 100644 --- a/go.sum +++ b/go.sum @@ -142,6 +142,8 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vS github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -609,8 +611,8 @@ github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 h1:aTwfQuroOm github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/grafana/mimir-otlptranslator v0.0.0-20251017074411-ea1e8f863e1d h1:k4NIVPYPP0sLJoGNzGwoQs2MpnWTvTcgbWPCzfdX66c= github.com/grafana/mimir-otlptranslator v0.0.0-20251017074411-ea1e8f863e1d/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= -github.com/grafana/mimir-prometheus v1.8.2-0.20260225105904-7c22e95a1b6f h1:kRG9PEIdSnoFpv16FeSjn9JheN/8wzoMCHoVH1RLAsA= -github.com/grafana/mimir-prometheus v1.8.2-0.20260225105904-7c22e95a1b6f/go.mod h1:BAiocBzCS2Jcm1iSrDLW3u1OP25FOPd4VE9m/T7xXYY= +github.com/grafana/mimir-prometheus v1.8.2-0.20260303092349-c93a08fb3844 h1:NiCB0Psw/oE1G1EWcI6ffmkGPN3mz5xk2FbQDsvX/II= +github.com/grafana/mimir-prometheus v1.8.2-0.20260303092349-c93a08fb3844/go.mod h1:/yzU/tGC6/XaYzzeAn3/HcVeFWRQ/+62ejWRXoNbwhk= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= @@ -891,12 +893,18 @@ github.com/oracle/oci-go-sdk/v65 v65.41.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQ github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxPcQA= +github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs= +github.com/parquet-go/jsonlite v1.0.0 h1:87QNdi56wOfsE5bdgas0vRzHPxfJgzrXGml1zZdd7VU= +github.com/parquet-go/jsonlite v1.0.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0= +github.com/parquet-go/parquet-go v0.26.3 h1:kJY+xmjcR7BH77tyHqasJpIl3kch/6EIO3TW4tFj69M= +github.com/parquet-go/parquet-go v0.26.3/go.mod h1:h9GcSt41Knf5qXI1tp1TfR8bDBUtvdUMzSKe26aZcHk= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pb33f/jsonpath v0.7.1 h1:dEp6oIZuJbpDSyuHAl9m7GonoDW4M20BcD5vT0tPYRE= github.com/pb33f/jsonpath v0.7.1/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo= -github.com/pb33f/libopenapi v0.33.4 h1:Rgczgrg4VQKXW/NtSj/nApmtYKS+TVpLgTsG692JxmE= -github.com/pb33f/libopenapi v0.33.4/go.mod h1:e/dmd2Pf1nkjqkI0r7guFSyt9T5V0IIQKgs0L6B/3b0= +github.com/pb33f/libopenapi v0.33.5 h1:AzILVrOzMaawLFhQENmwmn7h/TIDH2QEgUd0PfxS2xE= +github.com/pb33f/libopenapi v0.33.5/go.mod h1:e/dmd2Pf1nkjqkI0r7guFSyt9T5V0IIQKgs0L6B/3b0= github.com/pb33f/libopenapi-validator v0.11.1 h1:lTW738oB3lwpS9poDzmI3jpTPZSb5W46vklZqtyf7+Q= github.com/pb33f/libopenapi-validator v0.11.1/go.mod h1:7CfboslU/utKhiuQRuenriGYZ+HQLDOvARxjqRwd57w= github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwKBZ5WQ= @@ -1088,6 +1096,8 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1145,6 +1155,8 @@ go.opentelemetry.io/collector/pdata/pprofile v0.145.0 h1:ASMKpoqokf8HhzjoeMKZf0K go.opentelemetry.io/collector/pdata/pprofile v0.145.0/go.mod h1:a60GC7wQPhLAixWzKbbP51QLwwc+J0Cmp4SurOlhGUk= go.opentelemetry.io/collector/pdata/testdata v0.145.0 h1:iFsxsCMtE3lnAc/5kZbhZHpRv1OMmM+O5ry46xdQHbg= go.opentelemetry.io/collector/pdata/testdata v0.145.0/go.mod h1:0y2ERArdzqmYdJHdKLKue+AUubSEGlwK49F+23+Mbic= +go.opentelemetry.io/collector/pdata/xpdata v0.142.0 h1:xRpmhY12JnJ89E2kM2maOjG7C9QK6dSnTr03Ce8qfPA= +go.opentelemetry.io/collector/pdata/xpdata v0.142.0/go.mod h1:0e/FY0Stzxx4M2sqELIRrXzeoTsAwjVPKT9mQvL4hmc= go.opentelemetry.io/collector/pipeline v1.51.0 h1:GZBNW+aaOE+zufGzAkXy0OI7n1cqepEa5J+beaOpS2k= go.opentelemetry.io/collector/pipeline v1.51.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= go.opentelemetry.io/collector/processor v1.51.0 h1:PKpCzkLQmqaW08TOVh/zM0qx07Ihq+DR5J/OBkPiL9o= diff --git a/pkg/api/api.go b/pkg/api/api.go index 7f3948d8d82..60d41640029 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -468,6 +468,8 @@ func (a *API) RegisterQueryAPI(handler http.Handler, buildInfoHandler http.Handl a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/series"), handler, true, true, "GET", "POST", "DELETE") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/status/buildinfo"), buildInfoHandler, false, true, "GET") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/metadata"), handler, true, true, "GET") + a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/resources"), handler, true, true, "GET") + a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/resources/series"), handler, true, true, "GET") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/cardinality/label_names"), handler, true, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/cardinality/label_values"), handler, true, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/cardinality/active_series"), handler, true, true, "GET", "POST") diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index d6ca511bf1d..35e1e2b6315 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -225,6 +225,7 @@ func NewQuerierHandler( metadataSupplier querier.MetadataSupplier, engine promql.QueryEngine, distributor Distributor, + blocksQueryable querier.ResourceAttributesBlocksQueryable, metrics *querier.RequestMetrics, reg prometheus.Registerer, logger log.Logger, @@ -240,8 +241,7 @@ func NewQuerierHandler( api := v1.NewAPI( engine, querier.NewErrorTranslateSampleAndChunkQueryable(queryable), // Translate errors to errors expected by API. - nil, // No remote write support. - nil, // No remote write V2 support. + nil, nil, // No remote write support (Appendable, AppendableV2). exemplarQueryable, func(context.Context) v1.ScrapePoolsRetriever { return &querier.DummyTargetRetriever{} }, func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, @@ -275,8 +275,9 @@ func NewQuerierHandler( querierCfg.EngineConfig.LookbackDelta, false, appendMetadata, - nil, - nil, + nil, // overrideErrorCode + false, // enableNativeMetadata + nil, // featureRegistry v1.OpenAPIOptions{}, promqlext.NewPromQLParser(), ) @@ -330,6 +331,14 @@ func NewQuerierHandler( router.Path(path.Join(promPrefix, "/label/{name}/values")).Methods("GET").Handler(labelsQueryStats.Wrap(promRouter)) router.Path(path.Join(promPrefix, "/series")).Methods("GET", "POST", "DELETE").Handler(seriesQueryStats.Wrap(unlimitedMemoryTrackerMiddleware.Wrap(promRouter))) router.Path(path.Join(promPrefix, "/metadata")).Methods("GET").Handler(metadataQueryStats.Wrap(querier.NewMetadataHandler(metadataSupplier))) + router.Path(path.Join(promPrefix, "/resources")).Methods("GET").Handler(querier.NewResourceAttributesHandler(distributor, blocksQueryable, querier.ResourceAttributesHandlerConfig{ + QueryStoreAfter: querierCfg.QueryStoreAfter, + QueryIngestersWithin: limits.QueryIngestersWithin, + })) + router.Path(path.Join(promPrefix, "/resources/series")).Methods("GET").Handler(querier.NewResourceAttributesSeriesHandler(distributor, blocksQueryable, querier.ResourceAttributesHandlerConfig{ + QueryStoreAfter: querierCfg.QueryStoreAfter, + QueryIngestersWithin: limits.QueryIngestersWithin, + })) router.Path(path.Join(promPrefix, "/cardinality/label_names")).Methods("GET", "POST").Handler(cardinalityQueryStats.Wrap(querier.LabelNamesCardinalityHandler(distributor, limits))) router.Path(path.Join(promPrefix, "/cardinality/label_values")).Methods("GET", "POST").Handler(cardinalityQueryStats.Wrap(querier.LabelValuesCardinalityHandler(distributor, limits))) router.Path(path.Join(promPrefix, "/cardinality/active_series")).Methods("GET", "POST").Handler(cardinalityQueryStats.Wrap(querier.ActiveSeriesCardinalityHandler(distributor, limits))) diff --git a/pkg/blockbuilder/tsdb.go b/pkg/blockbuilder/tsdb.go index bad8e6d6055..c03da859023 100644 --- a/pkg/blockbuilder/tsdb.go +++ b/pkg/blockbuilder/tsdb.go @@ -18,6 +18,7 @@ import ( dskittenant "github.com/grafana/dskit/tenant" "github.com/oklog/ulid/v2" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -111,7 +112,7 @@ func (b *TSDBBuilder) PushToStorageAndReleaseRequest(ctx context.Context, req *m return fmt.Errorf("get tsdb for tenant %s: %w", tenantID, err) } - app := db.Appender(ctx).(extendedAppender) + app := db.AppenderV2(ctx).(extendedAppender) defer func() { if err != nil { if e := app.Rollback(); e != nil && !errors.Is(e, tsdb.ErrAppenderClosed) { @@ -139,40 +140,66 @@ func (b *TSDBBuilder) PushToStorageAndReleaseRequest(ctx context.Context, req *m ingestCreatedTimestamp := ts.CreatedTimestamp > 0 + // Build resource context once per time series. + var resourceCtx *storage.ResourceContext + if ts.ResourceAttributes != nil && len(ts.ResourceAttributes.Identifying) > 0 { + metricName := nonCopiedLabels.Get(model.MetricNameLabel) + if metricName != "target_info" { + resourceCtx = &storage.ResourceContext{ + Identifying: entriesToMap(ts.ResourceAttributes.Identifying), + Descriptive: entriesToMap(ts.ResourceAttributes.Descriptive), + Entities: convertResourceEntities(ts.ResourceAttributes.Entities), + } + } + } + + // Build scope context once per time series. + var scopeCtx *storage.ScopeContext + if ts.ScopeAttributes != nil { + if ts.ScopeAttributes.Name != "" || ts.ScopeAttributes.Version != "" || ts.ScopeAttributes.SchemaURL != "" || len(ts.ScopeAttributes.Attrs) > 0 { + scopeCtx = &storage.ScopeContext{ + Name: ts.ScopeAttributes.Name, + Version: ts.ScopeAttributes.Version, + SchemaURL: ts.ScopeAttributes.SchemaURL, + Attrs: entriesToMap(ts.ScopeAttributes.Attrs), + } + } + } + for _, s := range ts.Samples { + // Append ST zero sample (created timestamp) before the regular sample. if ingestCreatedTimestamp && ts.CreatedTimestamp < s.TimestampMs && (!nativeHistogramsIngestionEnabled || len(ts.Histograms) == 0 || ts.Histograms[0].Timestamp >= s.TimestampMs) { + stOpts := storage.AppendV2Options{RejectOutOfOrder: true} if ref != 0 { - // If the cached reference exists, we try to use it. - _, err = app.AppendSTZeroSample(ref, copiedLabels, s.TimestampMs, ts.CreatedTimestamp) + _, err = app.Append(ref, copiedLabels, 0, ts.CreatedTimestamp, 0, nil, nil, stOpts) } else { - // Copy the label set because TSDB may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - ref, err = app.AppendSTZeroSample(0, copiedLabels, s.TimestampMs, ts.CreatedTimestamp) + ref, err = app.Append(0, copiedLabels, 0, ts.CreatedTimestamp, 0, nil, nil, stOpts) } if err != nil && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) && !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrOutOfOrderSample) { - // According to OTEL spec: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time - // if the start time is unknown, then it should equal to the timestamp of the first sample, - // which will mean a created timestamp equal to the timestamp of the first sample for later - // samples. Thus we ignore if zero sample would cause duplicate. - // We also ignore out of order sample as created timestamp is out of order most of the time, - // except when written before the first sample. - level.Warn(b.logger).Log("msg", "failed to store zero float sample for created timestamp", "tenant", tenantID, "err", err) + level.Warn(b.logger).Log("msg", "failed to ingest ST zero sample", "err", err) discardedSamples++ } - ingestCreatedTimestamp = false // Only try to append created timestamp once per series. + ingestCreatedTimestamp = false + err = nil + } + + opts := storage.AppendV2Options{ + Resource: resourceCtx, + Scope: scopeCtx, } if ref != 0 { // If the cached reference exists, we try to use it. - if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil { + if _, err = app.Append(ref, copiedLabels, 0, s.TimestampMs, s.Value, nil, nil, opts); err == nil { continue } } else { // Copy the label set because TSDB may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) // Retain the reference in case there are multiple samples for the series. - if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil { + if ref, err = app.Append(0, copiedLabels, 0, s.TimestampMs, s.Value, nil, nil, opts); err == nil { continue } } @@ -191,58 +218,66 @@ func (b *TSDBBuilder) PushToStorageAndReleaseRequest(ctx context.Context, req *m } for _, h := range ts.Histograms { + var ( + ih *histogram.Histogram + fh *histogram.FloatHistogram + ) + + if h.IsFloatHistogram() { + fh = mimirpb.FromFloatHistogramProtoToFloatHistogram(&h) + } else { + ih = mimirpb.FromHistogramProtoToHistogram(&h) + } + + // Append ST zero sample (created timestamp) before the regular histogram. if ingestCreatedTimestamp && ts.CreatedTimestamp < h.Timestamp { - var ( - ih *histogram.Histogram - fh *histogram.FloatHistogram - ) - // AppendHistogramCTZeroSample doesn't care about the content of the passed histograms, - // just uses it to decide the type, so don't convert the input, use dummy histograms. - if h.IsFloatHistogram() { - fh = zeroFloatHistogram - } else { - ih = zeroHistogram + var zeroIH *histogram.Histogram + var zeroFH *histogram.FloatHistogram + if fh != nil { + zeroFH = &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterReset, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + CustomValues: fh.CustomValues, + } + } else if ih != nil { + zeroIH = &histogram.Histogram{ + CounterResetHint: histogram.CounterReset, + Schema: ih.Schema, + ZeroThreshold: ih.ZeroThreshold, + CustomValues: ih.CustomValues, + } } + stOpts := storage.AppendV2Options{RejectOutOfOrder: true} if ref != 0 { - _, err = app.AppendHistogramSTZeroSample(ref, copiedLabels, h.Timestamp, ts.CreatedTimestamp, ih, fh) + _, err = app.Append(ref, copiedLabels, 0, ts.CreatedTimestamp, 0, zeroIH, zeroFH, stOpts) } else { - // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - ref, err = app.AppendHistogramSTZeroSample(0, copiedLabels, h.Timestamp, ts.CreatedTimestamp, ih, fh) + ref, err = app.Append(0, copiedLabels, 0, ts.CreatedTimestamp, 0, zeroIH, zeroFH, stOpts) } if err != nil && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) && !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrOutOfOrderSample) { - // According to OTEL spec: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time - // if the start time is unknown, then it should equal to the timestamp of the first sample, - // which will mean a created timestamp equal to the timestamp of the first sample for later - // samples. Thus we ignore if zero sample would cause duplicate. - // We also ignore out of order sample as created timestamp is out of order most of the time, - // except when written before the first sample. - level.Warn(b.logger).Log("msg", "failed to store zero histogram sample for created timestamp", "tenant", tenantID, "err", err) + level.Warn(b.logger).Log("msg", "failed to ingest ST zero histogram sample", "err", err) discardedSamples++ } - ingestCreatedTimestamp = false // Only try to append created timestamp once per series. + ingestCreatedTimestamp = false + err = nil } - var ( - ih *histogram.Histogram - fh *histogram.FloatHistogram - ) - if h.IsFloatHistogram() { - fh = mimirpb.FromFloatHistogramProtoToFloatHistogram(&h) - } else { - ih = mimirpb.FromHistogramProtoToHistogram(&h) + opts := storage.AppendV2Options{ + Resource: resourceCtx, + Scope: scopeCtx, } if ref != 0 { // If the cached reference exists, we try to use it. - if _, err = app.AppendHistogram(ref, copiedLabels, h.Timestamp, ih, fh); err == nil { + if _, err = app.Append(ref, copiedLabels, 0, h.Timestamp, 0, ih, fh, opts); err == nil { continue } } else { // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) // Retain the reference in case there are multiple samples for the series. - if ref, err = app.AppendHistogram(0, copiedLabels, h.Timestamp, ih, fh); err == nil { + if ref, err = app.Append(0, copiedLabels, 0, h.Timestamp, 0, ih, fh, opts); err == nil { continue } } @@ -267,11 +302,6 @@ func (b *TSDBBuilder) PushToStorageAndReleaseRequest(ctx context.Context, req *m return app.Commit() } -var ( - zeroHistogram = &histogram.Histogram{} - zeroFloatHistogram = &histogram.FloatHistogram{} -) - func (b *TSDBBuilder) getOrCreateTSDB(tenant tsdbTenant) (*userTSDB, error) { b.tsdbsMu.RLock() db := b.tsdbs[tenant] @@ -348,6 +378,10 @@ func (b *TSDBBuilder) newTSDB(tenant tsdbTenant) (*userTSDB, error) { HeadPostingsForMatchersCacheMetrics: tsdb.NewPostingsForMatchersCacheMetrics(nil), // No need for these metrics; no one queries tsdb through block-builder BlockPostingsForMatchersCacheMetrics: tsdb.NewPostingsForMatchersCacheMetrics(nil), // No need for these metrics; no one queries tsdb through block-builder PostingsClonerFactory: tsdb.DefaultPostingsClonerFactory{}, + EnableSTAsZeroSample: false, + EnableNativeMetadata: b.limits.OTelPersistResourceAttributes(userID), + EnableResourceAttrIndex: b.limits.OTelResourceAttrIndexEnabled(userID), + IndexedResourceAttrs: stringSliceToSet(b.limits.OTelIndexedResourceAttributes(userID)), }, nil) if err != nil { return nil, err @@ -362,6 +396,18 @@ func (b *TSDBBuilder) newTSDB(tenant tsdbTenant) (*userTSDB, error) { return udb, nil } +// stringSliceToSet converts a string slice to a set (map[string]struct{}). +func stringSliceToSet(s []string) map[string]struct{} { + if len(s) == 0 { + return nil + } + m := make(map[string]struct{}, len(s)) + for _, v := range s { + m[v] = struct{}{} + } + return m +} + func (b *TSDBBuilder) NotifyPreCommit(_ context.Context) error { return nil } @@ -490,7 +536,7 @@ func (b *TSDBBuilder) Close() error { } type extendedAppender interface { - storage.Appender + storage.AppenderV2 storage.GetRef } @@ -576,3 +622,29 @@ func (b *TSDBBuilder) buildSparseIndexHeader(ctx context.Context, dbDir string, } return br.Close() } + +func entriesToMap(entries []mimirpb.AttributeEntry) map[string]string { + if len(entries) == 0 { + return nil + } + m := make(map[string]string, len(entries)) + for _, e := range entries { + m[e.Key] = e.Value + } + return m +} + +func convertResourceEntities(entities []mimirpb.ResourceEntity) []storage.EntityData { + if len(entities) == 0 { + return nil + } + result := make([]storage.EntityData, len(entities)) + for i, e := range entities { + result[i] = storage.EntityData{ + Type: e.Type, + ID: entriesToMap(e.ID), + Description: entriesToMap(e.Description), + } + } + return result +} diff --git a/pkg/blockbuilder/tsdb_test.go b/pkg/blockbuilder/tsdb_test.go index a2b2f2a4bc1..fa8425e1e4e 100644 --- a/pkg/blockbuilder/tsdb_test.go +++ b/pkg/blockbuilder/tsdb_test.go @@ -37,6 +37,22 @@ import ( "github.com/grafana/mimir/pkg/util/validation" ) +// zeroHistogramWithThreshold returns a zero histogram matching what TSDB's +// bestEffortAppendSTZeroSample creates: the same ZeroThreshold as the original +// histogram (to avoid needless chunk creation). CounterResetHint is left at +// the default (UnknownCounterReset) since TSDB recomputes it on read-back. +func zeroHistogramWithThreshold(threshold float64) *histogram.Histogram { + return &histogram.Histogram{ + ZeroThreshold: threshold, + } +} + +func zeroFloatHistogramWithThreshold(threshold float64) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + ZeroThreshold: threshold, + } +} + func createWriteRequest(suffix string, samples []mimirpb.Sample, histograms []mimirpb.Histogram) mimirpb.WriteRequest { var req mimirpb.WriteRequest @@ -809,20 +825,20 @@ func TestBuilderCreatedTimestamp(t *testing.T) { }, expectSamples: []test.Sample{ expectedHistogram(lastEnd-50000+100, 1), - {TS: lastEnd - 50000 + 200, Hist: zeroHistogram}, + {TS: lastEnd - 50000 + 200, Hist: zeroHistogramWithThreshold(1e-128)}, expectedHistogram(lastEnd-50000+300, 2), expectedHistogram(lastEnd+100, 3), - {TS: lastEnd + 200, Hist: zeroHistogram}, + {TS: lastEnd + 200, Hist: zeroHistogramWithThreshold(1e-128)}, expectedHistogram(lastEnd+300, 4), expectedHistogram(lastEnd+400, 5), expectedHistogram(lastEnd+500, 6), expectedHistogram(lastEnd+600, 7), - {TS: lastEnd + 700, Hist: zeroHistogram}, + {TS: lastEnd + 700, Hist: zeroHistogramWithThreshold(1e-128)}, expectedHistogram(lastEnd+800, 8), - {TS: lastEnd + 1000, FloatHist: zeroFloatHistogram}, + {TS: lastEnd + 1000, FloatHist: zeroFloatHistogramWithThreshold(1e-128)}, expectedFloatHistogram(lastEnd+1100, 8.5), expectedHistogram(currEnd-200, 9), - {TS: currEnd - 100, Hist: zeroHistogram}, + {TS: currEnd - 100, Hist: zeroHistogramWithThreshold(1e-128)}, expectedHistogram(currEnd+200, 10), }, }, diff --git a/pkg/compactor/block_upload.go b/pkg/compactor/block_upload.go index b0c6ab27e17..520af05fbc5 100644 --- a/pkg/compactor/block_upload.go +++ b/pkg/compactor/block_upload.go @@ -45,7 +45,7 @@ const ( ) var maxBlockUploadSizeBytesFormat = "block exceeds the maximum block size limit of %d bytes" -var rePath = regexp.MustCompile(`^(index|chunks/\d{6})$`) +var rePath = regexp.MustCompile(`^(index|chunks/\d{6}|series_metadata\.parquet)$`) var errValidationCompleted = cancellation.NewErrorf("validation completed") // StartBlockUpload handles request for starting block upload. diff --git a/pkg/compactor/split_merge_compactor.go b/pkg/compactor/split_merge_compactor.go index bfc63ad1431..93dbdbd10e9 100644 --- a/pkg/compactor/split_merge_compactor.go +++ b/pkg/compactor/split_merge_compactor.go @@ -22,8 +22,11 @@ func splitAndMergeGrouperFactory(_ context.Context, cfg Config, cfgProvider Conf } func splitAndMergeCompactorFactory(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) { - // We don't need to customise the TSDB compactor so we're just using the Prometheus one. - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, util_log.SlogFromGoKit(logger), cfg.BlockRanges.ToMilliseconds(), nil, nil) + compactor, err := tsdb.NewLeveledCompactorWithOptions(ctx, reg, util_log.SlogFromGoKit(logger), cfg.BlockRanges.ToMilliseconds(), nil, tsdb.LeveledCompactorOptions{ + EnableOverlappingCompaction: true, + EnableNativeMetadata: true, + EnableResourceAttrIndex: true, // Preserve inverted index data during re-compaction. + }) if err != nil { return nil, nil, err } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index cd51aebe62f..37d75b1107e 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -3217,6 +3217,142 @@ func maxFromZones[T ~float64 | ~uint64](seriesCountByZone map[string]T) (val T) return val } +// ResourceAttributes queries the ingester replication set for OTel resource attributes +// matching the given selector. It combines and deduplicates the results. +func (d *Distributor) ResourceAttributes(ctx context.Context, startMs, endMs int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*ingester_client.ResourceAttrFilter) ([]*ingester_client.SeriesResourceAttributes, error) { + replicationSets, err := d.getIngesterReplicationSetsForQuery(ctx) + if err != nil { + return nil, err + } + + // When ingest storage is disabled, if ingesters are running in a single zone we can't tolerate any errors. + if !d.cfg.IngestStorageConfig.Enabled && len(replicationSets) == 1 && replicationSets[0].ZoneCount() == 1 { + replicationSets[0].MaxErrors = 0 + } + + req, err := ingester_client.ToResourceAttributesRequest(startMs, endMs, matchers, limit, resourceAttrFilters) + if err != nil { + return nil, err + } + + res := newResourceAttributesResponse() + + ingesterQuery := func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { + // This function is invoked purely for its side effects on the captured + // resourceAttributesResponse, its return value is never used. + type ignored struct{} + + log, ctx := spanlogger.New(ctx, d.log, tracer, "Distributor.ResourceAttributes.queryIngester") + defer log.Finish() + + stream, err := client.ResourceAttributes(ctx, req) + if err != nil { + if errors.Is(globalerror.WrapGRPCErrorWithContextError(ctx, err), context.Canceled) { + return ignored{}, nil + } + level.Error(log).Log("msg", "error creating resource attributes response stream", "err", err) + log.SetError() + return nil, err + } + + defer func() { + err = util.CloseAndExhaust[*ingester_client.ResourceAttributesResponse](stream) + if err != nil && !errors.Is(globalerror.WrapGRPCErrorWithContextError(ctx, err), context.Canceled) { + level.Warn(d.log).Log("msg", "error closing resource attributes response stream", "err", err) + } + }() + + for { + msg, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + if errors.Is(globalerror.WrapGRPCErrorWithContextError(ctx, err), context.Canceled) { + return ignored{}, nil + } + level.Error(log).Log("msg", "error receiving resource attributes response", "err", err) + log.SetError() + return nil, err + } + + res.add(msg.Items) + } + + return ignored{}, nil + } + + _, err = forReplicationSets(ctx, d, replicationSets, ingesterQuery) + if err != nil { + return nil, err + } + + return res.result(), nil +} + +// resourceAttributesResponse is a helper to merge/deduplicate ResourceAttributes responses from ingesters. +type resourceAttributesResponse struct { + m sync.Mutex + series map[uint64]*ingester_client.SeriesResourceAttributes +} + +func newResourceAttributesResponse() *resourceAttributesResponse { + return &resourceAttributesResponse{ + series: map[uint64]*ingester_client.SeriesResourceAttributes{}, + } +} + +func (r *resourceAttributesResponse) add(items []*ingester_client.SeriesResourceAttributes) { + r.m.Lock() + defer r.m.Unlock() + + for _, item := range items { + lbls := mimirpb.FromLabelAdaptersToLabels(item.Labels) + lblHash := labels.StableHash(lbls) + + if existing, ok := r.series[lblHash]; !ok { + // First time seeing this series, store it with a deep copy of labels + itemCopy := &ingester_client.SeriesResourceAttributes{ + Labels: mimirpb.FromLabelsToLabelAdapters(lbls.Copy()), + Versions: item.Versions, + } + r.series[lblHash] = itemCopy + } else { + // Series already exists, merge resource versions + r.mergeVersions(existing, item) + } + } +} + +// mergeVersions merges resource attribute versions from a new item into the existing one. +// It deduplicates versions by their time range overlap. +func (r *resourceAttributesResponse) mergeVersions(existing, newItem *ingester_client.SeriesResourceAttributes) { + for _, newVer := range newItem.Versions { + found := false + for _, existingVer := range existing.Versions { + // Consider versions as duplicates if they have the same time range + if existingVer.MinTimeMs == newVer.MinTimeMs && existingVer.MaxTimeMs == newVer.MaxTimeMs { + found = true + break + } + } + if !found { + existing.Versions = append(existing.Versions, newVer) + } + } +} + +func (r *resourceAttributesResponse) result() []*ingester_client.SeriesResourceAttributes { + r.m.Lock() + defer r.m.Unlock() + + result := make([]*ingester_client.SeriesResourceAttributes, 0, len(r.series)) + for _, item := range r.series { + result = append(result, item) + } + return result +} + // LabelNames returns the names of all labels from series with samples timestamp between from and to, and matching // the input optional series label matchers. The returned label names are sorted. func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { diff --git a/pkg/distributor/otel.go b/pkg/distributor/otel.go index 501e6e6effd..57182f7e622 100644 --- a/pkg/distributor/otel.go +++ b/pkg/distributor/otel.go @@ -67,6 +67,7 @@ type OTLPHandlerLimits interface { NameValidationScheme(id string) model.ValidationScheme OTelLabelNameUnderscoreSanitization(string) bool OTelLabelNamePreserveMultipleUnderscores(string) bool + OTelPersistResourceAttributes(string) bool } type OTLPPushMiddleware func(ctx context.Context, req *pmetricotlp.ExportRequest) error @@ -379,6 +380,7 @@ func newOTLPParser( allowUTF8: !translationStrategy.ShouldEscape(), underscoreSanitization: limits.OTelLabelNameUnderscoreSanitization(tenantID), preserveMultipleUnderscores: limits.OTelLabelNamePreserveMultipleUnderscores(tenantID), + persistResourceAttributes: limits.OTelPersistResourceAttributes(tenantID), } metrics, metadata, metricsDropped, err := otelMetricsToSeriesAndMetadata( ctx, @@ -575,6 +577,7 @@ type conversionOptions struct { allowUTF8 bool underscoreSanitization bool preserveMultipleUnderscores bool + persistResourceAttributes bool } func otelMetricsToSeriesAndMetadata( @@ -596,6 +599,7 @@ func otelMetricsToSeriesAndMetadata( LabelNamePreserveMultipleUnderscores: opts.preserveMultipleUnderscores, } converter.appender.EnableCreatedTimestampZeroIngestion = opts.enableCTZeroIngestion + converter.appender.PersistResourceAttributes = opts.persistResourceAttributes mimirTS, metadata := converter.ToSeriesAndMetadata(ctx, md, settings, logger) dropped := converter.DroppedTotal() diff --git a/pkg/distributor/otlpappender/mimir_appender.go b/pkg/distributor/otlpappender/mimir_appender.go index e09f1270706..c2784408ccf 100644 --- a/pkg/distributor/otlpappender/mimir_appender.go +++ b/pkg/distributor/otlpappender/mimir_appender.go @@ -3,6 +3,9 @@ package otlpappender import ( + "slices" + "strings" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -27,6 +30,8 @@ const defaultIntervalForStartTimestamps = int64(300_000) type MimirAppender struct { EnableCreatedTimestampZeroIngestion bool ValidIntervalCreatedTimestampZeroIngestion int64 + // PersistResourceAttributes enables storing OTel resource attributes per series. + PersistResourceAttributes bool series []mimirpb.PreallocTimeseries metadata []*mimirpb.MetricMetadata @@ -56,13 +61,14 @@ func (c *MimirAppender) GetResult() ([]mimirpb.PreallocTimeseries, []*mimirpb.Me return c.series, c.metadata } -func (c *MimirAppender) Append(_ storage.SeriesRef, ls labels.Labels, ct, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AppendV2Options) (storage.SeriesRef, error) { - ct = c.recalcCreatedTimestamp(t, ct) +// Append implements storage.AppenderV2. +func (c *MimirAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AppendV2Options) (storage.SeriesRef, error) { + st = c.recalcCreatedTimestamp(t, st) hash, idx, collisionIdx, seenSeries := c.processLabelsAndMetadata(ls) - if !seenSeries || c.ctRequiresNewSeries(idx.idx, ct) { - c.createNewSeries(&idx, collisionIdx, hash, ls, ct) + if !seenSeries || c.ctRequiresNewSeries(idx.idx, st) { + c.createNewSeries(&idx, collisionIdx, hash, ls, st, t, opts) } switch { @@ -79,7 +85,10 @@ func (c *MimirAppender) Append(_ storage.SeriesRef, ls labels.Labels, ct, t int6 return 0, nil } -func (c *MimirAppender) Commit() error { return nil } +// Commit implements storage.AppenderV2. No-op for Mimir (uses GetResult instead). +func (c *MimirAppender) Commit() error { return nil } + +// Rollback implements storage.AppenderV2. No-op for Mimir (uses GetResult instead). func (c *MimirAppender) Rollback() error { return nil } func (c *MimirAppender) recalcCreatedTimestamp(t, ct int64) int64 { @@ -139,10 +148,38 @@ func (c *MimirAppender) processLabelsAndMetadata(ls labels.Labels) (hash uint64, return } -func (c *MimirAppender) createNewSeries(idx *labelsIdx, collisionIdx int, hash uint64, ls labels.Labels, ct int64) { +func (c *MimirAppender) createNewSeries(idx *labelsIdx, collisionIdx int, hash uint64, ls labels.Labels, ct int64, t int64, opts storage.AppendV2Options) { ts := mimirpb.TimeseriesFromPool() ts.Labels = mimirpb.FromLabelsToLabelAdapters(ls) ts.CreatedTimestamp = ct + + // Attach resource attributes if enabled and we have any. + // Skip target_info series since it's synthesized from resource attributes. + if c.PersistResourceAttributes && opts.Resource != nil && len(opts.Resource.Identifying) > 0 { + metricName := ls.Get(model.MetricNameLabel) + if metricName != "target_info" { + ts.ResourceAttributes = &mimirpb.ResourceAttributes{ + Identifying: mapToAttributeEntries(opts.Resource.Identifying), + Descriptive: mapToAttributeEntries(opts.Resource.Descriptive), + Entities: entityDataToResourceEntities(opts.Resource.Entities), + Timestamp: t, + } + } + } + + // Attach scope attributes if enabled and we have any. + if c.PersistResourceAttributes && opts.Scope != nil { + if opts.Scope.Name != "" || opts.Scope.Version != "" || opts.Scope.SchemaURL != "" || len(opts.Scope.Attrs) > 0 { + ts.ScopeAttributes = &mimirpb.ScopeAttributes{ + Name: opts.Scope.Name, + Version: opts.Scope.Version, + SchemaURL: opts.Scope.SchemaURL, + Attrs: mapToAttributeEntries(opts.Scope.Attrs), + Timestamp: t, + } + } + } + c.series = append(c.series, mimirpb.PreallocTimeseries{TimeSeries: ts}) idx.idx = len(c.series) - 1 @@ -153,6 +190,37 @@ func (c *MimirAppender) createNewSeries(idx *labelsIdx, collisionIdx int, hash u c.collisionRefs[hash][collisionIdx] = *idx } +// mapToAttributeEntries converts a map[string]string to a sorted slice of AttributeEntry. +func mapToAttributeEntries(m map[string]string) []mimirpb.AttributeEntry { + if len(m) == 0 { + return nil + } + entries := make([]mimirpb.AttributeEntry, 0, len(m)) + for k, v := range m { + entries = append(entries, mimirpb.AttributeEntry{Key: k, Value: v}) + } + slices.SortFunc(entries, func(a, b mimirpb.AttributeEntry) int { + return strings.Compare(a.Key, b.Key) + }) + return entries +} + +// entityDataToResourceEntities converts a slice of storage.EntityData to a slice of ResourceEntity. +func entityDataToResourceEntities(entities []storage.EntityData) []mimirpb.ResourceEntity { + if len(entities) == 0 { + return nil + } + result := make([]mimirpb.ResourceEntity, 0, len(entities)) + for _, e := range entities { + result = append(result, mimirpb.ResourceEntity{ + Type: e.Type, + ID: mapToAttributeEntries(e.ID), + Description: mapToAttributeEntries(e.Description), + }) + } + return result +} + // appendExemplars appends exemplars to the time series at the given index. // It's split from appenndMetadata to be eligible for inlining. func (c *MimirAppender) appendExemplars(seriesIdx int, es []exemplar.Exemplar) { diff --git a/pkg/distributor/otlpappender/mimir_appender_test.go b/pkg/distributor/otlpappender/mimir_appender_test.go index 794f9b8ecf9..3ec91982622 100644 --- a/pkg/distributor/otlpappender/mimir_appender_test.go +++ b/pkg/distributor/otlpappender/mimir_appender_test.go @@ -577,6 +577,219 @@ func TestMimirAppender(t *testing.T) { } } +func TestMimirAppender_ResourceContext(t *testing.T) { + testCases := map[string]struct { + persistResourceAttributes bool + resource *storage.ResourceContext + appends func(*testing.T, *MimirAppender, *storage.ResourceContext) + expectResourceAttrs *mimirpb.ResourceAttributes + }{ + "resource attributes disabled": { + persistResourceAttributes: false, + resource: &storage.ResourceContext{ + Identifying: map[string]string{"service.name": "myservice"}, + Descriptive: map[string]string{"host.name": "myhost"}, + }, + appends: func(t *testing.T, ca *MimirAppender, res *storage.ResourceContext) { + _, err := ca.Append(0, + labels.FromStrings(model.MetricNameLabel, "my_metric"), + 0, 1000, 42.0, nil, nil, + storage.AppendV2Options{ + Metadata: metadata.Metadata{Type: model.MetricTypeGauge}, + MetricFamilyName: "my_metric", + Resource: res, + }) + require.NoError(t, err) + }, + expectResourceAttrs: nil, // Disabled, so no attrs + }, + "resource attributes enabled with identifying attrs": { + persistResourceAttributes: true, + resource: &storage.ResourceContext{ + Identifying: map[string]string{"service.name": "myservice"}, + Descriptive: map[string]string{"host.name": "myhost"}, + }, + appends: func(t *testing.T, ca *MimirAppender, res *storage.ResourceContext) { + _, err := ca.Append(0, + labels.FromStrings(model.MetricNameLabel, "my_metric"), + 0, 1000, 42.0, nil, nil, + storage.AppendV2Options{ + Metadata: metadata.Metadata{Type: model.MetricTypeGauge}, + MetricFamilyName: "my_metric", + Resource: res, + }) + require.NoError(t, err) + }, + expectResourceAttrs: &mimirpb.ResourceAttributes{ + Identifying: []mimirpb.AttributeEntry{ + {Key: "service.name", Value: "myservice"}, + }, + Descriptive: []mimirpb.AttributeEntry{ + {Key: "host.name", Value: "myhost"}, + }, + Timestamp: 1000, + }, + }, + "target_info metric skips resource attributes": { + persistResourceAttributes: true, + resource: &storage.ResourceContext{ + Identifying: map[string]string{"service.name": "myservice"}, + }, + appends: func(t *testing.T, ca *MimirAppender, res *storage.ResourceContext) { + _, err := ca.Append(0, + labels.FromStrings(model.MetricNameLabel, "target_info"), + 0, 1000, 1.0, nil, nil, + storage.AppendV2Options{ + Metadata: metadata.Metadata{Type: model.MetricTypeInfo}, + MetricFamilyName: "target_info", + Resource: res, + }) + require.NoError(t, err) + }, + expectResourceAttrs: nil, // target_info should not have resource attrs + }, + "nil resource context": { + persistResourceAttributes: true, + resource: nil, + appends: func(t *testing.T, ca *MimirAppender, res *storage.ResourceContext) { + _, err := ca.Append(0, + labels.FromStrings(model.MetricNameLabel, "my_metric"), + 0, 1000, 42.0, nil, nil, + storage.AppendV2Options{ + Metadata: metadata.Metadata{Type: model.MetricTypeGauge}, + MetricFamilyName: "my_metric", + Resource: res, + }) + require.NoError(t, err) + }, + expectResourceAttrs: nil, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + appender := NewCombinedAppender() + appender.PersistResourceAttributes = tc.persistResourceAttributes + + tc.appends(t, appender, tc.resource) + + series, _ := appender.GetResult() + require.Len(t, series, 1) + + if tc.expectResourceAttrs == nil { + require.Nil(t, series[0].ResourceAttributes) + } else { + require.NotNil(t, series[0].ResourceAttributes) + // Check identifying attrs + require.Equal(t, len(tc.expectResourceAttrs.Identifying), len(series[0].ResourceAttributes.Identifying)) + for i, expected := range tc.expectResourceAttrs.Identifying { + require.Equal(t, expected.Key, series[0].ResourceAttributes.Identifying[i].Key) + require.Equal(t, expected.Value, series[0].ResourceAttributes.Identifying[i].Value) + } + // Check descriptive attrs + require.Equal(t, len(tc.expectResourceAttrs.Descriptive), len(series[0].ResourceAttributes.Descriptive)) + for i, expected := range tc.expectResourceAttrs.Descriptive { + require.Equal(t, expected.Key, series[0].ResourceAttributes.Descriptive[i].Key) + require.Equal(t, expected.Value, series[0].ResourceAttributes.Descriptive[i].Value) + } + require.Equal(t, tc.expectResourceAttrs.Timestamp, series[0].ResourceAttributes.Timestamp) + } + }) + } +} + +func TestMimirAppender_ScopeContext(t *testing.T) { + testCases := map[string]struct { + persistResourceAttributes bool + scope *storage.ScopeContext + expectScopeAttrs *mimirpb.ScopeAttributes + }{ + "scope attributes disabled": { + persistResourceAttributes: false, + scope: &storage.ScopeContext{ + Name: "github.com/example/payment", + Version: "1.2.0", + }, + expectScopeAttrs: nil, + }, + "scope attributes enabled with name and version": { + persistResourceAttributes: true, + scope: &storage.ScopeContext{ + Name: "github.com/example/payment", + Version: "1.2.0", + }, + expectScopeAttrs: &mimirpb.ScopeAttributes{ + Name: "github.com/example/payment", + Version: "1.2.0", + Timestamp: 1000, + }, + }, + "scope attributes enabled with all fields": { + persistResourceAttributes: true, + scope: &storage.ScopeContext{ + Name: "github.com/example/payment", + Version: "1.2.0", + SchemaURL: "https://opentelemetry.io/schemas/1.24.0", + Attrs: map[string]string{"library.language": "go"}, + }, + expectScopeAttrs: &mimirpb.ScopeAttributes{ + Name: "github.com/example/payment", + Version: "1.2.0", + SchemaURL: "https://opentelemetry.io/schemas/1.24.0", + Attrs: []mimirpb.AttributeEntry{ + {Key: "library.language", Value: "go"}, + }, + Timestamp: 1000, + }, + }, + "nil scope context": { + persistResourceAttributes: true, + scope: nil, + expectScopeAttrs: nil, + }, + "empty scope context": { + persistResourceAttributes: true, + scope: &storage.ScopeContext{}, + expectScopeAttrs: nil, // All fields empty, so not stored + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + appender := NewCombinedAppender() + appender.PersistResourceAttributes = tc.persistResourceAttributes + + _, err := appender.Append(0, + labels.FromStrings(model.MetricNameLabel, "my_metric"), + 0, 1000, 42.0, nil, nil, + storage.AppendV2Options{ + Metadata: metadata.Metadata{Type: model.MetricTypeGauge}, + MetricFamilyName: "my_metric", + Scope: tc.scope, + }) + require.NoError(t, err) + + series, _ := appender.GetResult() + require.Len(t, series, 1) + + if tc.expectScopeAttrs == nil { + require.Nil(t, series[0].ScopeAttributes) + } else { + require.NotNil(t, series[0].ScopeAttributes) + require.Equal(t, tc.expectScopeAttrs.Name, series[0].ScopeAttributes.Name) + require.Equal(t, tc.expectScopeAttrs.Version, series[0].ScopeAttributes.Version) + require.Equal(t, tc.expectScopeAttrs.SchemaURL, series[0].ScopeAttributes.SchemaURL) + require.Equal(t, tc.expectScopeAttrs.Timestamp, series[0].ScopeAttributes.Timestamp) + require.Equal(t, len(tc.expectScopeAttrs.Attrs), len(series[0].ScopeAttributes.Attrs)) + for i, expected := range tc.expectScopeAttrs.Attrs { + require.Equal(t, expected.Key, series[0].ScopeAttributes.Attrs[i].Key) + require.Equal(t, expected.Value, series[0].ScopeAttributes.Attrs[i].Value) + } + } + }) + } +} + // adapted from pkg/distributor/distributor_test.go func labelsWithHashCollision() (labels.Labels, labels.Labels) { // These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions diff --git a/pkg/distributor/push_test.go b/pkg/distributor/push_test.go index 145d27dca9a..025ca1367f0 100644 --- a/pkg/distributor/push_test.go +++ b/pkg/distributor/push_test.go @@ -1865,6 +1865,10 @@ func (o otlpLimitsMock) OTelLabelNamePreserveMultipleUnderscores(string) bool { return true } +func (o otlpLimitsMock) OTelPersistResourceAttributes(string) bool { + return false +} + func promToMimirHistogram(h *prompb.Histogram) mimirpb.Histogram { pSpans := make([]mimirpb.BucketSpan, 0, len(h.PositiveSpans)) for _, span := range h.PositiveSpans { diff --git a/pkg/frontend/querymiddleware/roundtrip.go b/pkg/frontend/querymiddleware/roundtrip.go index fc1386675c7..60e8fcea8ab 100644 --- a/pkg/frontend/querymiddleware/roundtrip.go +++ b/pkg/frontend/querymiddleware/roundtrip.go @@ -41,6 +41,8 @@ const ( labelNamesPathSuffix = "/api/v1/labels" remoteReadPathSuffix = "/api/v1/read" seriesPathSuffix = "/api/v1/series" + resourceAttributesPathSuffix = "/api/v1/resources" + resourceAttributesSeriesPathSuffix = "/api/v1/resources/series" queryTypeInstant = "query" queryTypeRange = "query_range" @@ -49,6 +51,7 @@ const ( queryTypeLabels = "label_names_and_values" queryTypeActiveSeries = "active_series" queryTypeActiveNativeHistogramMetrics = "active_native_histogram_metrics" + queryTypeResourceAttributes = "resource_attributes" queryTypeOther = "other" ) @@ -317,12 +320,14 @@ func newQueryTripperware( activeNativeHistogramMetrics := next labels := next series := next + resourceAttributes := next if cfg.MaxRetries > 0 { cardinality = newRetryRoundTripper(cardinality, log, cfg.MaxRetries, retryMetrics) series = newRetryRoundTripper(series, log, cfg.MaxRetries, retryMetrics) labels = newRetryRoundTripper(labels, log, cfg.MaxRetries, retryMetrics) - activeSeries = newRetryRoundTripper(series, log, cfg.MaxRetries, retryMetrics) + activeSeries = newRetryRoundTripper(activeSeries, log, cfg.MaxRetries, retryMetrics) + resourceAttributes = newRetryRoundTripper(resourceAttributes, log, cfg.MaxRetries, retryMetrics) } if cfg.ShardActiveSeriesQueries { @@ -341,6 +346,7 @@ func newQueryTripperware( activeNativeHistogramMetrics = newReadConsistencyRoundTripper(activeNativeHistogramMetrics, ingestStorageTopicOffsetsReaders, limits, log, metrics) labels = newReadConsistencyRoundTripper(labels, ingestStorageTopicOffsetsReaders, limits, log, metrics) series = newReadConsistencyRoundTripper(series, ingestStorageTopicOffsetsReaders, limits, log, metrics) + resourceAttributes = newReadConsistencyRoundTripper(resourceAttributes, ingestStorageTopicOffsetsReaders, limits, log, metrics) remoteRead = newReadConsistencyRoundTripper(remoteRead, ingestStorageTopicOffsetsReaders, limits, log, metrics) next = newReadConsistencyRoundTripper(next, ingestStorageTopicOffsetsReaders, limits, log, metrics) } @@ -381,6 +387,10 @@ func newQueryTripperware( return labels.RoundTrip(r) case IsSeriesQuery(r.URL.Path): return series.RoundTrip(r) + case IsResourceAttributesSeriesQuery(r.URL.Path): + return resourceAttributes.RoundTrip(r) + case IsResourceAttributesQuery(r.URL.Path): + return resourceAttributes.RoundTrip(r) case IsRemoteReadQuery(r.URL.Path): return remoteRead.RoundTrip(r) default: @@ -630,6 +640,10 @@ func newQueryCountTripperware(registerer prometheus.Registerer) Tripperware { op = queryTypeActiveNativeHistogramMetrics case IsLabelsQuery(r.URL.Path): op = queryTypeLabels + case IsResourceAttributesSeriesQuery(r.URL.Path): + op = queryTypeResourceAttributes + case IsResourceAttributesQuery(r.URL.Path): + op = queryTypeResourceAttributes } tenantIDs, err := tenant.TenantIDs(r.Context()) @@ -683,6 +697,14 @@ func IsActiveNativeHistogramMetricsQuery(path string) bool { return strings.HasSuffix(path, cardinalityActiveNativeHistogramMetricsPathSuffix) } +func IsResourceAttributesSeriesQuery(path string) bool { + return strings.HasSuffix(path, resourceAttributesSeriesPathSuffix) +} + +func IsResourceAttributesQuery(path string) bool { + return strings.HasSuffix(path, resourceAttributesPathSuffix) && !IsResourceAttributesSeriesQuery(path) +} + func IsRemoteReadQuery(path string) bool { return strings.HasSuffix(path, remoteReadPathSuffix) } diff --git a/pkg/ingester/client/compat.go b/pkg/ingester/client/compat.go index fb3ffb7892a..1fd00373f11 100644 --- a/pkg/ingester/client/compat.go +++ b/pkg/ingester/client/compat.go @@ -205,6 +205,20 @@ func ToActiveSeriesRequest(matchers []*labels.Matcher) (*ActiveSeriesRequest, er return &ActiveSeriesRequest{Matchers: ms}, nil } +func ToResourceAttributesRequest(startMs, endMs int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*ResourceAttrFilter) (*ResourceAttributesRequest, error) { + ms, err := ToLabelMatchers(matchers) + if err != nil { + return nil, err + } + return &ResourceAttributesRequest{ + StartTimestampMs: startMs, + EndTimestampMs: endMs, + Matchers: ms, + Limit: limit, + ResourceAttrFilters: resourceAttrFilters, + }, nil +} + func ToLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) { result := make([]*LabelMatcher, 0, len(matchers)) for _, matcher := range matchers { diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index 5e8a0e3da6d..6b32ad9964e 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -404,11 +404,11 @@ func (m *LabelValueSeriesCount) GetLabelValueSeries() map[string]uint64 { } type QueryRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` - ProjectionInclude bool `protobuf:"varint,4,opt,name=projection_include,json=projectionInclude,proto3" json:"projection_include,omitempty"` - ProjectionLabels []string `protobuf:"bytes,5,rep,name=projection_labels,json=projectionLabels,proto3" json:"projection_labels,omitempty"` + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` + ProjectionInclude bool `protobuf:"varint,4,opt,name=projection_include,json=projectionInclude,proto3" json:"projection_include,omitempty"` + ProjectionLabels []string `protobuf:"bytes,5,rep,name=projection_labels,json=projectionLabels,proto3" json:"projection_labels,omitempty"` // Why 100? This QueryRequest message is also used for remote read requests, so we need to avoid any field numbers added in the future. StreamingChunksBatchSize uint64 `protobuf:"varint,100,opt,name=streaming_chunks_batch_size,json=streamingChunksBatchSize,proto3" json:"streaming_chunks_batch_size,omitempty"` } @@ -1670,464 +1670,700 @@ func (m *LabelMatcher) GetValue() string { return "" } -func init() { - proto.RegisterEnum("cortex.CountMethod", CountMethod_name, CountMethod_value) - proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) - proto.RegisterEnum("cortex.ActiveSeriesRequest_RequestType", ActiveSeriesRequest_RequestType_name, ActiveSeriesRequest_RequestType_value) - proto.RegisterType((*LabelNamesAndValuesRequest)(nil), "cortex.LabelNamesAndValuesRequest") - proto.RegisterType((*LabelNamesAndValuesResponse)(nil), "cortex.LabelNamesAndValuesResponse") - proto.RegisterType((*LabelValues)(nil), "cortex.LabelValues") - proto.RegisterType((*LabelValuesCardinalityRequest)(nil), "cortex.LabelValuesCardinalityRequest") - proto.RegisterType((*LabelValuesCardinalityResponse)(nil), "cortex.LabelValuesCardinalityResponse") - proto.RegisterType((*LabelValueSeriesCount)(nil), "cortex.LabelValueSeriesCount") - proto.RegisterMapType((map[string]uint64)(nil), "cortex.LabelValueSeriesCount.LabelValueSeriesEntry") - proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest") - proto.RegisterType((*ExemplarQueryRequest)(nil), "cortex.ExemplarQueryRequest") - proto.RegisterType((*ActiveSeriesRequest)(nil), "cortex.ActiveSeriesRequest") - proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse") - proto.RegisterType((*QueryStreamSeries)(nil), "cortex.QueryStreamSeries") - proto.RegisterType((*QueryStreamSeriesChunks)(nil), "cortex.QueryStreamSeriesChunks") - proto.RegisterType((*ExemplarQueryResponse)(nil), "cortex.ExemplarQueryResponse") - proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest") - proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse") - proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest") - proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse") - proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest") - proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse") - proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse") - proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") - proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") - proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") - proto.RegisterType((*MetricsMetadataRequest)(nil), "cortex.MetricsMetadataRequest") - proto.RegisterType((*MetricsMetadataResponse)(nil), "cortex.MetricsMetadataResponse") - proto.RegisterType((*ActiveSeriesResponse)(nil), "cortex.ActiveSeriesResponse") - proto.RegisterType((*Chunk)(nil), "cortex.Chunk") - proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") - proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") +// ResourceAttrFilter specifies a resource attribute key:value pair for reverse lookup. +type ResourceAttrFilter struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } - -var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1737 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xd7, - 0x15, 0xe6, 0x25, 0x29, 0x46, 0x3c, 0xa4, 0xe5, 0xd1, 0x95, 0x6c, 0x31, 0xa3, 0x78, 0xa4, 0x4c, - 0xe1, 0x84, 0xcd, 0x43, 0x7e, 0x36, 0x70, 0xd2, 0x74, 0x41, 0xc9, 0x8c, 0x4d, 0x35, 0x94, 0x9c, - 0xa1, 0x9c, 0x3e, 0x80, 0x60, 0x30, 0x24, 0xaf, 0xa4, 0xa9, 0x38, 0xc3, 0xe9, 0xdc, 0xcb, 0xc0, - 0xca, 0xaa, 0xe8, 0xa2, 0xeb, 0xfe, 0x80, 0x6e, 0xba, 0x2b, 0xba, 0x6d, 0x81, 0x6e, 0xfa, 0x03, - 0xbc, 0x29, 0xe0, 0x45, 0x17, 0x41, 0x81, 0x1a, 0xb5, 0xbc, 0xe9, 0x32, 0x3f, 0x21, 0xb8, 0x8f, - 0x79, 0x92, 0x7a, 0x38, 0x40, 0xb2, 0xe2, 0xdc, 0x73, 0xce, 0x3d, 0xf7, 0xdc, 0x6f, 0xbe, 0xf3, - 0xe0, 0xc0, 0x82, 0xeb, 0x1f, 0x10, 0xca, 0x48, 0xb8, 0x11, 0x84, 0x63, 0x36, 0xc6, 0x95, 0xc1, - 0x38, 0x64, 0xe4, 0x89, 0x7e, 0xf3, 0xc0, 0x65, 0x87, 0x93, 0xfe, 0xc6, 0x60, 0xec, 0xdd, 0x38, - 0x08, 0x9d, 0x7d, 0xc7, 0x77, 0x6e, 0x78, 0xae, 0xe7, 0x86, 0x37, 0x82, 0xa3, 0x03, 0xf9, 0x14, - 0xf4, 0xe5, 0xaf, 0xdc, 0xa9, 0x2f, 0x1f, 0x8c, 0x0f, 0xc6, 0xe2, 0xf1, 0x06, 0x7f, 0x92, 0x52, - 0xf3, 0x0f, 0x08, 0xf4, 0x4f, 0x9d, 0x3e, 0x19, 0xed, 0x38, 0x1e, 0xa1, 0x2d, 0x7f, 0xf8, 0xb9, - 0x33, 0x9a, 0x10, 0x6a, 0x91, 0xdf, 0x4e, 0x08, 0x65, 0xf8, 0x26, 0xcc, 0x7b, 0x0e, 0x1b, 0x1c, - 0x92, 0x90, 0x36, 0xd0, 0x7a, 0xa9, 0x59, 0xbb, 0xbd, 0xbc, 0x21, 0x23, 0xd8, 0x10, 0xbb, 0xba, - 0x52, 0x69, 0xc5, 0x56, 0xf8, 0x03, 0xa8, 0x0f, 0xc6, 0x13, 0x9f, 0xd9, 0x1e, 0x61, 0x87, 0xe3, - 0x61, 0xa3, 0xb8, 0x8e, 0x9a, 0x0b, 0xb7, 0x97, 0xa2, 0x5d, 0x5b, 0x5c, 0xd7, 0x15, 0x2a, 0xab, - 0x36, 0x48, 0x16, 0xe6, 0x43, 0x58, 0x9d, 0x19, 0x07, 0x0d, 0xc6, 0x3e, 0x25, 0xf8, 0xc7, 0x30, - 0xe7, 0x32, 0xe2, 0x45, 0x51, 0x2c, 0x65, 0xa2, 0x50, 0xb6, 0xd2, 0xc2, 0xbc, 0x0f, 0xb5, 0x94, - 0x14, 0x5f, 0x03, 0x18, 0xf1, 0xa5, 0xed, 0x3b, 0x1e, 0x69, 0xa0, 0x75, 0xd4, 0xac, 0x5a, 0xd5, - 0x51, 0x74, 0x14, 0xbe, 0x0a, 0x95, 0x2f, 0x85, 0x61, 0xa3, 0xb8, 0x5e, 0x6a, 0x56, 0x2d, 0xb5, - 0x32, 0xff, 0x8a, 0xe0, 0x5a, 0xca, 0xcd, 0x96, 0x13, 0x0e, 0x5d, 0xdf, 0x19, 0xb9, 0xec, 0x38, - 0xc2, 0x66, 0x0d, 0x6a, 0x89, 0x63, 0x19, 0x58, 0xd5, 0x82, 0xd8, 0x33, 0xcd, 0x80, 0x57, 0xfc, - 0x4e, 0xe0, 0x95, 0x2e, 0x08, 0xde, 0x63, 0x30, 0x4e, 0x8b, 0x55, 0xe1, 0x77, 0x27, 0x8b, 0xdf, - 0xb5, 0x69, 0xfc, 0x7a, 0x24, 0x74, 0x09, 0x15, 0x47, 0x44, 0x48, 0x3e, 0x47, 0x70, 0x65, 0xa6, - 0xc1, 0x79, 0xa0, 0x3a, 0x80, 0xa5, 0x5a, 0x80, 0x69, 0x53, 0xb1, 0x53, 0x61, 0x70, 0xe7, 0xcc, - 0xa3, 0xa7, 0xa4, 0x6d, 0x9f, 0x85, 0xc7, 0x96, 0x36, 0xca, 0x89, 0xf5, 0xad, 0xe9, 0xd0, 0x84, - 0x29, 0xd6, 0xa0, 0x74, 0x44, 0x8e, 0x55, 0x4c, 0xfc, 0x11, 0x2f, 0xc3, 0x9c, 0x88, 0x43, 0x70, - 0xb1, 0x6c, 0xc9, 0xc5, 0x47, 0xc5, 0x7b, 0xc8, 0xfc, 0x5b, 0x11, 0xea, 0x9f, 0x4d, 0x48, 0x18, - 0xbf, 0xd3, 0xf7, 0x00, 0x53, 0xe6, 0x84, 0xcc, 0x66, 0xae, 0x47, 0x28, 0x73, 0xbc, 0xc0, 0x16, - 0x98, 0xa1, 0x66, 0xc9, 0xd2, 0x84, 0x66, 0x2f, 0x52, 0x74, 0x29, 0x6e, 0x82, 0x46, 0xfc, 0x61, - 0xd6, 0xb6, 0x28, 0x6c, 0x17, 0x88, 0x3f, 0x4c, 0x5b, 0xa6, 0xa9, 0x50, 0xba, 0x10, 0x15, 0xde, - 0x07, 0x1c, 0x84, 0xe3, 0xdf, 0x90, 0x01, 0x73, 0xc7, 0xbe, 0xed, 0xfa, 0x83, 0xd1, 0x64, 0x48, - 0x1a, 0xe5, 0x75, 0xd4, 0x9c, 0xb7, 0x16, 0x13, 0x4d, 0x47, 0x2a, 0xf0, 0xbb, 0x90, 0x12, 0xda, - 0x02, 0x2d, 0xda, 0x98, 0x13, 0x94, 0xd4, 0x12, 0x85, 0x38, 0x8b, 0xe2, 0x9f, 0xc1, 0x2a, 0x65, - 0x21, 0x71, 0x3c, 0xd7, 0x3f, 0xb0, 0x07, 0x87, 0x13, 0xff, 0x88, 0xda, 0x7d, 0x7e, 0xb0, 0x4d, - 0xdd, 0xaf, 0x48, 0x63, 0x28, 0x60, 0x6a, 0xc4, 0x26, 0x5b, 0xc2, 0x62, 0x93, 0x1b, 0xf4, 0xdc, - 0xaf, 0x88, 0xf9, 0x67, 0x04, 0xcb, 0xed, 0x27, 0xc4, 0x0b, 0x46, 0x4e, 0xf8, 0x83, 0xa0, 0x77, - 0x6b, 0x0a, 0xbd, 0x2b, 0xb3, 0xd0, 0xa3, 0x09, 0x7c, 0xe6, 0x3f, 0x11, 0x2c, 0xb5, 0x06, 0xcc, - 0xfd, 0x52, 0x71, 0xe3, 0xbb, 0x17, 0xb4, 0x9f, 0x42, 0x99, 0x1d, 0x07, 0x44, 0x15, 0xb2, 0xb7, - 0x23, 0xeb, 0x19, 0xce, 0x37, 0xd4, 0xef, 0xde, 0x71, 0x40, 0x2c, 0xb1, 0xc9, 0xfc, 0x00, 0x6a, - 0x29, 0x21, 0x06, 0xa8, 0xf4, 0xda, 0x56, 0xa7, 0xdd, 0xd3, 0x0a, 0x78, 0x15, 0x56, 0x76, 0x5a, - 0x7b, 0x9d, 0xcf, 0xdb, 0xf6, 0xc3, 0x4e, 0x6f, 0x6f, 0xf7, 0x81, 0xd5, 0xea, 0xda, 0x4a, 0x89, - 0xcc, 0xdf, 0x17, 0x61, 0x49, 0x40, 0xdb, 0x13, 0x2f, 0x21, 0x4e, 0xe3, 0x6d, 0xd0, 0x92, 0x37, - 0xa7, 0xd2, 0x4a, 0x22, 0xf2, 0x7a, 0x14, 0x58, 0x6a, 0x9b, 0x8c, 0x6e, 0xb3, 0xfc, 0xf4, 0xf9, - 0x5a, 0xc1, 0xba, 0x1c, 0x6f, 0x94, 0x62, 0x7c, 0x17, 0x56, 0x5c, 0x6a, 0xf3, 0x57, 0x30, 0xde, - 0x57, 0xbe, 0x6c, 0x69, 0xa3, 0x68, 0xb6, 0xe4, 0xd2, 0xb6, 0x3f, 0xdc, 0xdd, 0x97, 0xf6, 0xd2, - 0x25, 0xfe, 0x02, 0x56, 0xf2, 0x11, 0x28, 0x0a, 0x09, 0xba, 0xd5, 0x6e, 0xaf, 0x9d, 0x1a, 0x88, - 0xe2, 0x91, 0x0c, 0xe7, 0x4a, 0x2e, 0x1c, 0xa9, 0xdc, 0x2e, 0xcf, 0x23, 0xad, 0xb8, 0x5d, 0x9e, - 0x2f, 0x6a, 0x25, 0xf3, 0x4f, 0x08, 0x16, 0xa7, 0x9c, 0xe0, 0x7d, 0xa8, 0x28, 0x7a, 0x67, 0x5b, - 0x41, 0xd0, 0x97, 0x6f, 0xf0, 0x91, 0xe3, 0x86, 0x9b, 0x1f, 0xf2, 0x33, 0xfe, 0xf3, 0x7c, 0xed, - 0xd6, 0x45, 0xda, 0xa4, 0xdc, 0xd7, 0x1a, 0x3a, 0x01, 0x23, 0xa1, 0xa5, 0xbc, 0xf3, 0xf2, 0x2e, - 0xee, 0x65, 0x8b, 0x42, 0xab, 0x98, 0x09, 0x42, 0x24, 0x2a, 0x95, 0xe9, 0xc2, 0xca, 0x29, 0x57, - 0xc4, 0x6f, 0x42, 0x5d, 0x41, 0xe3, 0xfa, 0x43, 0xf2, 0x44, 0xa4, 0x40, 0xd9, 0xaa, 0x49, 0x59, - 0x87, 0x8b, 0xf0, 0xbb, 0x50, 0x51, 0xb0, 0xc9, 0xb2, 0x78, 0x29, 0x2e, 0xf2, 0x5c, 0xaa, 0x40, - 0x52, 0x26, 0x66, 0x0f, 0xae, 0xe4, 0x12, 0x4e, 0xf1, 0xe1, 0x23, 0x00, 0x91, 0x3f, 0x92, 0x09, - 0x59, 0x42, 0x07, 0xfd, 0x0d, 0x9e, 0x44, 0x19, 0x12, 0xa4, 0xac, 0xcd, 0x7f, 0x23, 0xc0, 0xe9, - 0xf6, 0xa9, 0x32, 0xe4, 0x9c, 0xd2, 0x3e, 0x3b, 0xc7, 0x8b, 0xaf, 0x90, 0xe3, 0xa5, 0x73, 0x73, - 0x9c, 0xd3, 0xef, 0xfc, 0x1c, 0xe7, 0x75, 0x7d, 0xe4, 0x7a, 0x2e, 0x6b, 0xcc, 0x09, 0x8f, 0x72, - 0x61, 0xde, 0x83, 0xa5, 0xcc, 0xad, 0x14, 0x52, 0x6f, 0x42, 0x3d, 0xd5, 0x92, 0xa2, 0x76, 0x5d, - 0x4b, 0xfa, 0x0a, 0x35, 0xff, 0x8e, 0x60, 0x31, 0x99, 0x41, 0x7e, 0xd8, 0xa2, 0xf6, 0x6a, 0x17, - 0x2e, 0xa7, 0x2f, 0xfc, 0x13, 0xf5, 0x1a, 0x55, 0xd4, 0xea, 0xbe, 0xe7, 0x4d, 0x27, 0xe6, 0x36, - 0x68, 0x8f, 0x29, 0x09, 0x7b, 0xcc, 0x61, 0xf1, 0x5d, 0xf3, 0xf3, 0x07, 0xba, 0xe0, 0xfc, 0xf1, - 0x0f, 0x04, 0x8b, 0x29, 0x67, 0x2a, 0x84, 0xeb, 0xd1, 0xf4, 0xca, 0x5b, 0x52, 0xe8, 0x30, 0xc9, - 0x26, 0x64, 0x5d, 0x8a, 0xa5, 0x96, 0xc3, 0x08, 0x27, 0x9c, 0x3f, 0xf1, 0x92, 0x21, 0x81, 0xa7, - 0x4a, 0xd5, 0x9f, 0x44, 0xf9, 0xfe, 0x1e, 0x60, 0x27, 0x70, 0xed, 0x9c, 0xa7, 0x92, 0xf0, 0xa4, - 0x39, 0x81, 0xdb, 0xc9, 0x38, 0xdb, 0x80, 0xa5, 0x70, 0x32, 0x22, 0x79, 0xf3, 0xb2, 0x30, 0x5f, - 0xe4, 0xaa, 0x8c, 0xbd, 0xf9, 0x05, 0x2c, 0xf1, 0xc0, 0x3b, 0xf7, 0xb3, 0xa1, 0xaf, 0xc0, 0x6b, - 0x13, 0x4a, 0x42, 0xdb, 0x1d, 0xaa, 0x0c, 0xa8, 0xf0, 0x65, 0x67, 0x88, 0xdf, 0x87, 0xf2, 0xd0, - 0x61, 0x8e, 0x08, 0x33, 0x55, 0x74, 0xa7, 0x2e, 0x6f, 0x09, 0x33, 0xf3, 0x01, 0x60, 0xae, 0xa2, - 0x59, 0xef, 0xb7, 0x60, 0x8e, 0x72, 0x81, 0x4a, 0xd8, 0xd5, 0xb4, 0x97, 0x5c, 0x24, 0x96, 0xb4, - 0x34, 0x9f, 0x22, 0x30, 0xba, 0x84, 0x85, 0xee, 0x80, 0x7e, 0x32, 0x0e, 0xb3, 0x04, 0xf9, 0x9e, - 0x89, 0x7a, 0x0f, 0xea, 0x11, 0x03, 0x6d, 0x4a, 0xd8, 0xd9, 0x1d, 0xb8, 0x16, 0x99, 0xf6, 0x08, - 0x3b, 0x85, 0xaf, 0x3f, 0x87, 0xb5, 0x53, 0x6f, 0xa2, 0x00, 0x6a, 0x42, 0xc5, 0x13, 0x26, 0x0a, - 0x21, 0x2d, 0x29, 0x69, 0x72, 0xab, 0xa5, 0xf4, 0x66, 0x00, 0x57, 0x95, 0xb3, 0x2e, 0x61, 0x0e, - 0xc7, 0x3c, 0x82, 0x23, 0x3e, 0x9c, 0x23, 0xb0, 0xa8, 0x0e, 0xe7, 0xd7, 0x16, 0x0f, 0x76, 0x40, - 0x42, 0x5b, 0x9d, 0x51, 0x14, 0x06, 0x0b, 0x42, 0xfe, 0x88, 0x84, 0xd2, 0x1f, 0xff, 0x63, 0xa0, - 0xf4, 0x25, 0xc9, 0x00, 0x75, 0xe2, 0x2e, 0xac, 0x4c, 0x9d, 0xa8, 0xc2, 0xbe, 0x0b, 0xf3, 0x9e, - 0x92, 0xa9, 0xc0, 0x1b, 0xf9, 0xc0, 0xe3, 0x3d, 0xb1, 0xa5, 0x39, 0x80, 0xe5, 0xec, 0x30, 0xf1, - 0xaa, 0x20, 0xf0, 0xda, 0xd6, 0x9f, 0x0c, 0x8e, 0x08, 0x8b, 0x7b, 0x55, 0x89, 0xb7, 0x1b, 0x29, - 0x93, 0xcd, 0xea, 0x5f, 0x08, 0xe6, 0x44, 0x67, 0xf9, 0xde, 0x68, 0xa2, 0xc3, 0x3c, 0xf1, 0x07, - 0xe3, 0xa1, 0xeb, 0x1f, 0x08, 0xc4, 0xe6, 0xac, 0x78, 0x8d, 0x1f, 0xa9, 0xac, 0xe1, 0x3c, 0xa8, - 0x6f, 0x7e, 0xac, 0x9a, 0xf3, 0xdd, 0x0b, 0x35, 0xe7, 0xc7, 0x3e, 0x75, 0xf6, 0xc9, 0xe6, 0x31, - 0x23, 0xbd, 0x91, 0x3b, 0x88, 0x12, 0xab, 0x05, 0x97, 0x32, 0xd4, 0x79, 0xf5, 0xc1, 0xce, 0xb4, - 0xa1, 0x9e, 0xd6, 0xe0, 0xeb, 0x6a, 0xd0, 0x93, 0x45, 0x6f, 0x31, 0xda, 0x2d, 0xd4, 0xc9, 0x48, - 0x87, 0x31, 0x94, 0x45, 0x67, 0x2c, 0x0a, 0x56, 0x88, 0xe7, 0xe4, 0x1f, 0x86, 0xa4, 0x8a, 0x5c, - 0xbc, 0xd3, 0x84, 0x5a, 0xaa, 0x62, 0xe2, 0x4b, 0x50, 0xed, 0xec, 0xd8, 0xdd, 0x76, 0x77, 0xd7, - 0xfa, 0x95, 0x56, 0xe0, 0xb3, 0x60, 0x6b, 0x8b, 0xcf, 0x7f, 0x1a, 0x7a, 0x67, 0x1b, 0xaa, 0xf1, - 0x31, 0xb8, 0x0a, 0x73, 0xed, 0xcf, 0x1e, 0xb7, 0x3e, 0xd5, 0x0a, 0x7c, 0xcb, 0xce, 0xee, 0x9e, - 0x2d, 0x97, 0x08, 0x5f, 0x86, 0x9a, 0xd5, 0x7e, 0xd0, 0xfe, 0xa5, 0xdd, 0x6d, 0xed, 0x6d, 0x3d, - 0xd4, 0x8a, 0x18, 0xc3, 0x82, 0x14, 0xec, 0xec, 0x2a, 0x59, 0xe9, 0xf6, 0x7f, 0x5f, 0x83, 0xf9, - 0x8e, 0xfa, 0x68, 0x80, 0x3f, 0x84, 0xf2, 0xa3, 0x09, 0x3d, 0xc4, 0x57, 0x13, 0xee, 0xfc, 0x22, - 0x74, 0x19, 0x51, 0x49, 0xa2, 0xaf, 0x4c, 0xc9, 0x25, 0xf9, 0xcc, 0x02, 0xbe, 0x0f, 0xb5, 0xd4, - 0x78, 0x83, 0x97, 0x33, 0x63, 0x5d, 0xb4, 0x7f, 0x75, 0xc6, 0xb0, 0x97, 0xf8, 0xb8, 0x89, 0xf0, - 0x2e, 0x2c, 0x08, 0x55, 0x34, 0xbe, 0x50, 0xfc, 0x46, 0xb4, 0x65, 0xd6, 0x5f, 0x08, 0xfd, 0xda, - 0x29, 0xda, 0x38, 0xac, 0x87, 0xd9, 0x7f, 0xf7, 0xfa, 0xac, 0x0f, 0x01, 0xf9, 0xe0, 0x66, 0xcc, - 0x03, 0x66, 0x01, 0xb7, 0x01, 0x92, 0xbe, 0x89, 0x5f, 0xcf, 0x18, 0xa7, 0x27, 0x00, 0x5d, 0x9f, - 0xa5, 0x8a, 0xdd, 0x6c, 0x42, 0x35, 0xae, 0xfe, 0xb8, 0x31, 0xa3, 0x21, 0x48, 0x27, 0xa7, 0xb7, - 0x0a, 0xb3, 0x80, 0x3f, 0x81, 0x7a, 0x6b, 0x34, 0xba, 0x88, 0x1b, 0x3d, 0xad, 0xa1, 0x79, 0x3f, - 0xa3, 0xb8, 0x36, 0xe5, 0x4b, 0x2b, 0x7e, 0x2b, 0xe6, 0xf3, 0x99, 0x5d, 0x44, 0x7f, 0xfb, 0x5c, - 0xbb, 0xf8, 0xb4, 0x3d, 0xb8, 0x9c, 0xab, 0x84, 0xd8, 0xc8, 0xed, 0xce, 0x15, 0x65, 0x7d, 0xed, - 0x54, 0x7d, 0xec, 0xb5, 0xaf, 0xe6, 0xb7, 0xec, 0x87, 0x20, 0x6c, 0x4e, 0xbf, 0x84, 0xfc, 0xd7, - 0x2a, 0xfd, 0x47, 0x67, 0xda, 0xa4, 0x58, 0x79, 0x04, 0x57, 0x67, 0x7f, 0x2f, 0xc1, 0xd7, 0x67, - 0x70, 0x66, 0xfa, 0xdb, 0x8f, 0xfe, 0xd6, 0x79, 0x66, 0xa9, 0xc3, 0xba, 0x50, 0x4f, 0xd7, 0x77, - 0xbc, 0x7a, 0xc6, 0x5f, 0x48, 0xfd, 0x8d, 0xd9, 0xca, 0xc4, 0xdd, 0xe6, 0xc7, 0xcf, 0x5e, 0x18, - 0x85, 0xaf, 0x5f, 0x18, 0x85, 0x6f, 0x5e, 0x18, 0xe8, 0x77, 0x27, 0x06, 0xfa, 0xcb, 0x89, 0x81, - 0x9e, 0x9e, 0x18, 0xe8, 0xd9, 0x89, 0x81, 0xfe, 0x77, 0x62, 0xa0, 0xff, 0x9f, 0x18, 0x85, 0x6f, - 0x4e, 0x0c, 0xf4, 0xc7, 0x97, 0x46, 0xe1, 0xd9, 0x4b, 0xa3, 0xf0, 0xf5, 0x4b, 0xa3, 0xf0, 0xeb, - 0xca, 0x60, 0xe4, 0x12, 0x9f, 0xf5, 0x2b, 0xe2, 0xb3, 0xdf, 0x9d, 0x6f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0xd2, 0xb1, 0xe6, 0x3a, 0x58, 0x14, 0x00, 0x00, -} +func (m *ResourceAttrFilter) Reset() { *m = ResourceAttrFilter{} } +func (m *ResourceAttrFilter) String() string { return fmt.Sprintf("ResourceAttrFilter{Key:%s, Value:%s}", m.Key, m.Value) } +func (*ResourceAttrFilter) ProtoMessage() {} -func (x CountMethod) String() string { - s, ok := CountMethod_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x MatchType) String() string { - s, ok := MatchType_name[int32(x)] - if ok { - return s +func (m *ResourceAttrFilter) GetKey() string { + if m != nil { + return m.Key } - return strconv.Itoa(int(x)) + return "" } -func (x ActiveSeriesRequest_RequestType) String() string { - s, ok := ActiveSeriesRequest_RequestType_name[int32(x)] - if ok { - return s + +func (m *ResourceAttrFilter) GetValue() string { + if m != nil { + return m.Value } - return strconv.Itoa(int(x)) + return "" } -func (this *LabelNamesAndValuesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*LabelNamesAndValuesRequest) - if !ok { - that2, ok := that.(LabelNamesAndValuesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - if this.CountMethod != that1.CountMethod { - return false +func (m *ResourceAttrFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return true + return dAtA[:n], nil } -func (this *LabelNamesAndValuesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*LabelNamesAndValuesResponse) - if !ok { - that2, ok := that.(LabelNamesAndValuesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Items) != len(that1.Items) { - return false - } - for i := range this.Items { - if !this.Items[i].Equal(that1.Items[i]) { - return false - } - } - return true +func (m *ResourceAttrFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *LabelValues) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*LabelValues) - if !ok { - that2, ok := that.(LabelValues) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.LabelName != that1.LabelName { - return false - } - if len(this.Values) != len(that1.Values) { - return false +func (m *ResourceAttrFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 } - for i := range this.Values { - if this.Values[i] != that1.Values[i] { - return false - } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa } - return true + return len(dAtA) - i, nil } -func (this *LabelValuesCardinalityRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*LabelValuesCardinalityRequest) - if !ok { - that2, ok := that.(LabelValuesCardinalityRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelNames) != len(that1.LabelNames) { - return false - } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { - return false - } - } - if len(this.Matchers) != len(that1.Matchers) { - return false +func (m *ResourceAttrFilter) Size() (n int) { + if m == nil { + return 0 } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } + l := len(m.Key) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) } - if this.CountMethod != that1.CountMethod { - return false + l = len(m.Value) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) } - return true + return n } -func (this *LabelValuesCardinalityResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*LabelValuesCardinalityResponse) - if !ok { - that2, ok := that.(LabelValuesCardinalityResponse) - if ok { - that1 = &that2 - } else { - return false +func (m *ResourceAttrFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Items) != len(that1.Items) { - return false - } - for i := range this.Items { - if !this.Items[i].Equal(that1.Items[i]) { - return false + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttrFilter: wiretype end group for non-group") } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttrFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF } - return true -} -func (this *LabelValueSeriesCount) Equal(that interface{}) bool { - if that == nil { - return this == nil - } + return nil +} - that1, ok := that.(*LabelValueSeriesCount) - if !ok { - that2, ok := that.(LabelValueSeriesCount) - if ok { - that1 = &that2 - } else { - return false +// ResourceAttributesRequest queries OTel resource attributes for series matching matchers. +type ResourceAttributesRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` + Limit int64 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + ResourceAttrFilters []*ResourceAttrFilter `protobuf:"bytes,5,rep,name=resource_attr_filters,json=resourceAttrFilters,proto3" json:"resource_attr_filters,omitempty"` +} + +func (m *ResourceAttributesRequest) Reset() { *m = ResourceAttributesRequest{} } +func (*ResourceAttributesRequest) ProtoMessage() {} +func (*ResourceAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_60f6df4f3586b478, []int{29} +} +func (m *ResourceAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false +} +func (m *ResourceAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesRequest.Merge(m, src) +} +func (m *ResourceAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesRequest proto.InternalMessageInfo + +func (m *ResourceAttributesRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs } - if this.LabelName != that1.LabelName { - return false + return 0 +} + +func (m *ResourceAttributesRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs } - if len(this.LabelValueSeries) != len(that1.LabelValueSeries) { - return false + return 0 +} + +func (m *ResourceAttributesRequest) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers } - for i := range this.LabelValueSeries { - if this.LabelValueSeries[i] != that1.LabelValueSeries[i] { - return false - } + return nil +} + +func (m *ResourceAttributesRequest) GetLimit() int64 { + if m != nil { + return m.Limit } - return true + return 0 } -func (this *QueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil + +func (m *ResourceAttributesRequest) GetResourceAttrFilters() []*ResourceAttrFilter { + if m != nil { + return m.ResourceAttrFilters } + return nil +} - that1, ok := that.(*QueryRequest) - if !ok { - that2, ok := that.(QueryRequest) - if ok { - that1 = &that2 - } else { - return false +// ResourceAttributesResponse contains batches of series with their resource attributes. +type ResourceAttributesResponse struct { + Items []*SeriesResourceAttributes `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (m *ResourceAttributesResponse) Reset() { *m = ResourceAttributesResponse{} } +func (*ResourceAttributesResponse) ProtoMessage() {} +func (*ResourceAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_60f6df4f3586b478, []int{30} +} +func (m *ResourceAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false +} +func (m *ResourceAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesResponse.Merge(m, src) +} +func (m *ResourceAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesResponse proto.InternalMessageInfo + +func (m *ResourceAttributesResponse) GetItems() []*SeriesResourceAttributes { + if m != nil { + return m.Items } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false + return nil +} + +// SeriesResourceAttributes contains resource data for a single series. +type SeriesResourceAttributes struct { + // The series labels. + Labels []github_com_grafana_mimir_pkg_mimirpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/grafana/mimir/pkg/mimirpb.LabelAdapter" json:"labels"` + // Resource versions for this series. + Versions []*ResourceVersionData `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"` +} + +func (m *SeriesResourceAttributes) Reset() { *m = SeriesResourceAttributes{} } +func (*SeriesResourceAttributes) ProtoMessage() {} +func (*SeriesResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_60f6df4f3586b478, []int{31} +} +func (m *SeriesResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesResourceAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if this.ProjectionInclude != that1.ProjectionInclude { - return false - } - if len(this.ProjectionLabels) != len(that1.ProjectionLabels) { - return false +} +func (m *SeriesResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesResourceAttributes.Merge(m, src) +} +func (m *SeriesResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *SeriesResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesResourceAttributes proto.InternalMessageInfo + +func (m *SeriesResourceAttributes) GetVersions() []*ResourceVersionData { + if m != nil { + return m.Versions } - for i := range this.ProjectionLabels { - if this.ProjectionLabels[i] != that1.ProjectionLabels[i] { - return false + return nil +} + +// ResourceVersionData represents a snapshot of resource data at a point in time. +type ResourceVersionData struct { + Identifying map[string]string `protobuf:"bytes,1,rep,name=identifying,proto3" json:"identifying,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Descriptive map[string]string `protobuf:"bytes,2,rep,name=descriptive,proto3" json:"descriptive,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Entities []*EntityData `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities,omitempty"` + MinTimeMs int64 `protobuf:"varint,4,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` + MaxTimeMs int64 `protobuf:"varint,5,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` +} + +func (m *ResourceVersionData) Reset() { *m = ResourceVersionData{} } +func (*ResourceVersionData) ProtoMessage() {} +func (*ResourceVersionData) Descriptor() ([]byte, []int) { + return fileDescriptor_60f6df4f3586b478, []int{32} +} +func (m *ResourceVersionData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceVersionData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceVersionData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if this.StreamingChunksBatchSize != that1.StreamingChunksBatchSize { - return false +} +func (m *ResourceVersionData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceVersionData.Merge(m, src) +} +func (m *ResourceVersionData) XXX_Size() int { + return m.Size() +} +func (m *ResourceVersionData) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceVersionData.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceVersionData proto.InternalMessageInfo + +func (m *ResourceVersionData) GetIdentifying() map[string]string { + if m != nil { + return m.Identifying } - return true + return nil } -func (this *ExemplarQueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil + +func (m *ResourceVersionData) GetDescriptive() map[string]string { + if m != nil { + return m.Descriptive } + return nil +} - that1, ok := that.(*ExemplarQueryRequest) - if !ok { - that2, ok := that.(ExemplarQueryRequest) - if ok { - that1 = &that2 - } else { - return false +func (m *ResourceVersionData) GetEntities() []*EntityData { + if m != nil { + return m.Entities + } + return nil +} + +func (m *ResourceVersionData) GetMinTimeMs() int64 { + if m != nil { + return m.MinTimeMs + } + return 0 +} + +func (m *ResourceVersionData) GetMaxTimeMs() int64 { + if m != nil { + return m.MaxTimeMs + } + return 0 +} + +// EntityData represents a typed OTel entity. +type EntityData struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id map[string]string `protobuf:"bytes,2,rep,name=id,proto3" json:"id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Description map[string]string `protobuf:"bytes,3,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *EntityData) Reset() { *m = EntityData{} } +func (*EntityData) ProtoMessage() {} +func (*EntityData) Descriptor() ([]byte, []int) { + return fileDescriptor_60f6df4f3586b478, []int{33} +} +func (m *EntityData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntityData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntityData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false +} +func (m *EntityData) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityData.Merge(m, src) +} +func (m *EntityData) XXX_Size() int { + return m.Size() +} +func (m *EntityData) XXX_DiscardUnknown() { + xxx_messageInfo_EntityData.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityData proto.InternalMessageInfo + +func (m *EntityData) GetType() string { + if m != nil { + return m.Type } - if this.StartTimestampMs != that1.StartTimestampMs { - return false + return "" +} + +func (m *EntityData) GetId() map[string]string { + if m != nil { + return m.Id } - if this.EndTimestampMs != that1.EndTimestampMs { - return false + return nil +} + +func (m *EntityData) GetDescription() map[string]string { + if m != nil { + return m.Description } - if len(this.Matchers) != len(that1.Matchers) { - return false + return nil +} + +func init() { + proto.RegisterEnum("cortex.CountMethod", CountMethod_name, CountMethod_value) + proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) + proto.RegisterEnum("cortex.ActiveSeriesRequest_RequestType", ActiveSeriesRequest_RequestType_name, ActiveSeriesRequest_RequestType_value) + proto.RegisterType((*LabelNamesAndValuesRequest)(nil), "cortex.LabelNamesAndValuesRequest") + proto.RegisterType((*LabelNamesAndValuesResponse)(nil), "cortex.LabelNamesAndValuesResponse") + proto.RegisterType((*LabelValues)(nil), "cortex.LabelValues") + proto.RegisterType((*LabelValuesCardinalityRequest)(nil), "cortex.LabelValuesCardinalityRequest") + proto.RegisterType((*LabelValuesCardinalityResponse)(nil), "cortex.LabelValuesCardinalityResponse") + proto.RegisterType((*LabelValueSeriesCount)(nil), "cortex.LabelValueSeriesCount") + proto.RegisterMapType((map[string]uint64)(nil), "cortex.LabelValueSeriesCount.LabelValueSeriesEntry") + proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest") + proto.RegisterType((*ExemplarQueryRequest)(nil), "cortex.ExemplarQueryRequest") + proto.RegisterType((*ActiveSeriesRequest)(nil), "cortex.ActiveSeriesRequest") + proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse") + proto.RegisterType((*QueryStreamSeries)(nil), "cortex.QueryStreamSeries") + proto.RegisterType((*QueryStreamSeriesChunks)(nil), "cortex.QueryStreamSeriesChunks") + proto.RegisterType((*ExemplarQueryResponse)(nil), "cortex.ExemplarQueryResponse") + proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest") + proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse") + proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest") + proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse") + proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest") + proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse") + proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse") + proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") + proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") + proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") + proto.RegisterType((*MetricsMetadataRequest)(nil), "cortex.MetricsMetadataRequest") + proto.RegisterType((*MetricsMetadataResponse)(nil), "cortex.MetricsMetadataResponse") + proto.RegisterType((*ActiveSeriesResponse)(nil), "cortex.ActiveSeriesResponse") + proto.RegisterType((*Chunk)(nil), "cortex.Chunk") + proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") + proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") + proto.RegisterType((*ResourceAttributesRequest)(nil), "cortex.ResourceAttributesRequest") + proto.RegisterType((*ResourceAttributesResponse)(nil), "cortex.ResourceAttributesResponse") + proto.RegisterType((*SeriesResourceAttributes)(nil), "cortex.SeriesResourceAttributes") + proto.RegisterType((*ResourceVersionData)(nil), "cortex.ResourceVersionData") + proto.RegisterMapType((map[string]string)(nil), "cortex.ResourceVersionData.DescriptiveEntry") + proto.RegisterMapType((map[string]string)(nil), "cortex.ResourceVersionData.IdentifyingEntry") + proto.RegisterType((*EntityData)(nil), "cortex.EntityData") + proto.RegisterMapType((map[string]string)(nil), "cortex.EntityData.DescriptionEntry") + proto.RegisterMapType((map[string]string)(nil), "cortex.EntityData.IdEntry") +} + +func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } + +var fileDescriptor_60f6df4f3586b478 = []byte{ + // 1974 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4b, 0x6f, 0x1b, 0xc9, + 0x11, 0xe6, 0x90, 0x94, 0x56, 0x2c, 0xd2, 0x32, 0xd5, 0x94, 0x2d, 0x7a, 0xb4, 0xa6, 0xe4, 0x59, + 0x78, 0x97, 0x71, 0x1c, 0xfa, 0xb9, 0x8e, 0x77, 0xb3, 0x09, 0x40, 0x49, 0x5c, 0x9b, 0xce, 0x52, + 0xf2, 0x0e, 0x65, 0xe7, 0x01, 0x2c, 0x06, 0x43, 0x4e, 0x4b, 0x6a, 0x88, 0x33, 0x64, 0x66, 0x9a, + 0x86, 0xb4, 0xa7, 0x20, 0x87, 0x1c, 0x72, 0xca, 0x0f, 0xc8, 0x25, 0x87, 0x00, 0x41, 0xce, 0x01, + 0x72, 0xd8, 0x20, 0x67, 0x5f, 0x02, 0xf8, 0x10, 0x20, 0x8b, 0x1c, 0x8c, 0x58, 0xbe, 0xe4, 0xb8, + 0x3f, 0x21, 0xe8, 0xc7, 0x3c, 0x39, 0xd4, 0xc3, 0x80, 0x7d, 0x12, 0xa7, 0xea, 0xab, 0xea, 0xaa, + 0xea, 0xaa, 0xea, 0xea, 0x16, 0xcc, 0x13, 0x67, 0x17, 0x7b, 0x14, 0xbb, 0x8d, 0x91, 0x3b, 0xa4, + 0x43, 0x34, 0xdb, 0x1f, 0xba, 0x14, 0x1f, 0xa8, 0x37, 0x77, 0x09, 0xdd, 0x1b, 0xf7, 0x1a, 0xfd, + 0xa1, 0x7d, 0x63, 0xd7, 0x35, 0x77, 0x4c, 0xc7, 0xbc, 0x61, 0x13, 0x9b, 0xb8, 0x37, 0x46, 0xfb, + 0xbb, 0xe2, 0xd7, 0xa8, 0x27, 0xfe, 0x0a, 0x49, 0x75, 0x71, 0x77, 0xb8, 0x3b, 0xe4, 0x3f, 0x6f, + 0xb0, 0x5f, 0x82, 0xaa, 0xfd, 0x56, 0x01, 0xf5, 0x0b, 0xb3, 0x87, 0x07, 0x9b, 0xa6, 0x8d, 0xbd, + 0xa6, 0x63, 0x3d, 0x35, 0x07, 0x63, 0xec, 0xe9, 0xf8, 0x57, 0x63, 0xec, 0x51, 0x74, 0x13, 0xe6, + 0x6c, 0x93, 0xf6, 0xf7, 0xb0, 0xeb, 0x55, 0x95, 0xd5, 0x5c, 0xbd, 0x78, 0x7b, 0xb1, 0x21, 0x2c, + 0x68, 0x70, 0xa9, 0x8e, 0x60, 0xea, 0x01, 0x0a, 0xdd, 0x83, 0x52, 0x7f, 0x38, 0x76, 0xa8, 0x61, + 0x63, 0xba, 0x37, 0xb4, 0xaa, 0xd9, 0x55, 0xa5, 0x3e, 0x7f, 0xbb, 0xe2, 0x4b, 0xad, 0x33, 0x5e, + 0x87, 0xb3, 0xf4, 0x62, 0x3f, 0xfc, 0xd0, 0x1e, 0xc2, 0x72, 0xaa, 0x1d, 0xde, 0x68, 0xe8, 0x78, + 0x18, 0x7d, 0x0f, 0x66, 0x08, 0xc5, 0xb6, 0x6f, 0x45, 0x25, 0x66, 0x85, 0xc4, 0x0a, 0x84, 0xb6, + 0x01, 0xc5, 0x08, 0x15, 0x5d, 0x06, 0x18, 0xb0, 0x4f, 0xc3, 0x31, 0x6d, 0x5c, 0x55, 0x56, 0x95, + 0x7a, 0x41, 0x2f, 0x0c, 0xfc, 0xa5, 0xd0, 0x45, 0x98, 0x7d, 0xc6, 0x81, 0xd5, 0xec, 0x6a, 0xae, + 0x5e, 0xd0, 0xe5, 0x97, 0xf6, 0x17, 0x05, 0x2e, 0x47, 0xd4, 0xac, 0x9b, 0xae, 0x45, 0x1c, 0x73, + 0x40, 0xe8, 0xa1, 0x1f, 0x9b, 0x15, 0x28, 0x86, 0x8a, 0x85, 0x61, 0x05, 0x1d, 0x02, 0xcd, 0x5e, + 0x2c, 0x78, 0xd9, 0x37, 0x0a, 0x5e, 0xee, 0x94, 0xc1, 0x7b, 0x02, 0xb5, 0x69, 0xb6, 0xca, 0xf8, + 0xdd, 0x89, 0xc7, 0xef, 0xf2, 0x64, 0xfc, 0xba, 0xd8, 0x25, 0xd8, 0xe3, 0x4b, 0xf8, 0x91, 0x7c, + 0xa9, 0xc0, 0x85, 0x54, 0xc0, 0x49, 0x41, 0x35, 0x01, 0x09, 0x36, 0x0f, 0xa6, 0xe1, 0x71, 0x49, + 0x19, 0x83, 0x3b, 0xc7, 0x2e, 0x3d, 0x41, 0x6d, 0x39, 0xd4, 0x3d, 0xd4, 0xcb, 0x83, 0x04, 0x59, + 0x5d, 0x9f, 0x34, 0x8d, 0x43, 0x51, 0x19, 0x72, 0xfb, 0xf8, 0x50, 0xda, 0xc4, 0x7e, 0xa2, 0x45, + 0x98, 0xe1, 0x76, 0xf0, 0x5c, 0xcc, 0xeb, 0xe2, 0xe3, 0xd3, 0xec, 0x7d, 0x45, 0xfb, 0xb7, 0x02, + 0xa5, 0x2f, 0xc7, 0xd8, 0x0d, 0xf6, 0xf4, 0x3a, 0x20, 0x8f, 0x9a, 0x2e, 0x35, 0x28, 0xb1, 0xb1, + 0x47, 0x4d, 0x7b, 0x64, 0xf0, 0x98, 0x29, 0xf5, 0x9c, 0x5e, 0xe6, 0x9c, 0x6d, 0x9f, 0xd1, 0xf1, + 0x50, 0x1d, 0xca, 0xd8, 0xb1, 0xe2, 0xd8, 0x2c, 0xc7, 0xce, 0x63, 0xc7, 0x8a, 0x22, 0xa3, 0xa9, + 0x90, 0x3b, 0x55, 0x2a, 0xfc, 0x18, 0x96, 0x3d, 0xea, 0x62, 0xd3, 0x26, 0xce, 0xae, 0xd1, 0xdf, + 0x1b, 0x3b, 0xfb, 0x9e, 0xd1, 0x63, 0x4c, 0xc3, 0x23, 0x5f, 0xe3, 0xaa, 0xc5, 0x5d, 0xa9, 0x06, + 0x90, 0x75, 0x8e, 0x58, 0x63, 0x80, 0x2e, 0xf9, 0x1a, 0x6b, 0x7f, 0x54, 0x60, 0xb1, 0x75, 0x80, + 0xed, 0xd1, 0xc0, 0x74, 0xdf, 0x89, 0x87, 0xb7, 0x26, 0x3c, 0xbc, 0x90, 0xe6, 0xa1, 0x17, 0xba, + 0xa8, 0xfd, 0x5d, 0x81, 0x4a, 0xb3, 0x4f, 0xc9, 0x33, 0xb9, 0x7f, 0x6f, 0xde, 0x74, 0x7e, 0x04, + 0x79, 0x7a, 0x38, 0xc2, 0xb2, 0xd9, 0x7c, 0xe4, 0xa3, 0x53, 0x94, 0x37, 0xe4, 0xdf, 0xed, 0xc3, + 0x11, 0xd6, 0xb9, 0x90, 0x76, 0x0f, 0x8a, 0x11, 0x22, 0x02, 0x98, 0xed, 0xb6, 0xf4, 0x76, 0xab, + 0x5b, 0xce, 0xa0, 0x65, 0x58, 0xda, 0x6c, 0x6e, 0xb7, 0x9f, 0xb6, 0x8c, 0x87, 0xed, 0xee, 0xf6, + 0xd6, 0x03, 0xbd, 0xd9, 0x31, 0x24, 0x53, 0xd1, 0x7e, 0x93, 0x85, 0x0a, 0x0f, 0x6d, 0x97, 0x6f, + 0x42, 0x50, 0x6a, 0x8f, 0xa0, 0x1c, 0xee, 0x9c, 0x4c, 0x7d, 0x11, 0x91, 0x4b, 0xbe, 0x61, 0x11, + 0x31, 0x61, 0xdd, 0x5a, 0xfe, 0xf9, 0xcb, 0x95, 0x8c, 0x7e, 0x3e, 0x10, 0x14, 0x64, 0x74, 0x17, + 0x96, 0x88, 0x67, 0xb0, 0x2d, 0x18, 0xee, 0x48, 0x5d, 0x86, 0xc0, 0x54, 0xf3, 0xab, 0x4a, 0x7d, + 0x4e, 0xaf, 0x10, 0xaf, 0xe5, 0x58, 0x5b, 0x3b, 0x02, 0x2f, 0x54, 0xa2, 0xaf, 0x60, 0x29, 0x69, + 0x81, 0x4c, 0xa1, 0xea, 0x0c, 0x37, 0x64, 0x65, 0xaa, 0x21, 0x32, 0x8f, 0x84, 0x39, 0x17, 0x12, + 0xe6, 0x08, 0xe6, 0xa3, 0xfc, 0x9c, 0x52, 0xce, 0x3e, 0xca, 0xcf, 0x65, 0xcb, 0x39, 0xed, 0x0f, + 0x0a, 0x2c, 0x4c, 0x28, 0x41, 0x3b, 0x30, 0xcb, 0x0b, 0x36, 0xd9, 0xae, 0x47, 0x3d, 0xb1, 0x83, + 0x8f, 0x4d, 0xe2, 0xae, 0x7d, 0xc2, 0xd6, 0xf8, 0xcf, 0xcb, 0x95, 0x5b, 0xa7, 0x39, 0xca, 0x84, + 0x5c, 0xd3, 0x32, 0x47, 0x14, 0xbb, 0xba, 0xd4, 0xce, 0x5a, 0x30, 0xf7, 0xcb, 0xe0, 0xcd, 0x50, + 0x66, 0x26, 0x70, 0x12, 0xef, 0x26, 0x1a, 0x81, 0xa5, 0x29, 0x2e, 0xa2, 0x2b, 0x50, 0x92, 0xa1, + 0x21, 0x8e, 0x85, 0x0f, 0x78, 0x09, 0xe4, 0xf5, 0xa2, 0xa0, 0xb5, 0x19, 0x09, 0x7d, 0x1f, 0x66, + 0x65, 0xd8, 0x44, 0xeb, 0x3a, 0x17, 0x34, 0x62, 0x46, 0x95, 0x41, 0x92, 0x10, 0xad, 0x0b, 0x17, + 0x12, 0x05, 0x27, 0xf3, 0xe1, 0x53, 0x00, 0x5e, 0x3f, 0x22, 0x13, 0xe2, 0x09, 0x3d, 0xea, 0x35, + 0x58, 0x11, 0xc5, 0x92, 0x20, 0x82, 0xd6, 0xfe, 0xa5, 0x00, 0x8a, 0x1e, 0x71, 0xb2, 0x42, 0x4e, + 0x68, 0xbf, 0xe9, 0x35, 0x9e, 0x3d, 0x43, 0x8d, 0xe7, 0x4e, 0xac, 0x71, 0x96, 0x7e, 0x27, 0xd7, + 0x38, 0xeb, 0xbd, 0x03, 0x62, 0x13, 0x5a, 0x9d, 0xe1, 0x1a, 0xc5, 0x87, 0x76, 0x1f, 0x2a, 0x31, + 0xaf, 0x64, 0xa4, 0xae, 0x40, 0x29, 0x72, 0x6c, 0xf8, 0x47, 0x6a, 0x31, 0xec, 0xfd, 0x9e, 0xf6, + 0x57, 0x05, 0x16, 0xc2, 0x39, 0xe1, 0xdd, 0x36, 0xb5, 0xb3, 0x39, 0x9c, 0x8f, 0x3a, 0xfc, 0xb1, + 0xdc, 0x46, 0x69, 0xb5, 0xf4, 0xf7, 0xa4, 0x09, 0x42, 0x7b, 0x04, 0xe5, 0x27, 0x1e, 0x76, 0xbb, + 0xd4, 0xa4, 0x81, 0xaf, 0xc9, 0x19, 0x41, 0x39, 0xe5, 0x8c, 0xf0, 0x37, 0x05, 0x16, 0x22, 0xca, + 0xa4, 0x09, 0x57, 0xfd, 0x09, 0x93, 0x0c, 0x1d, 0xc3, 0x35, 0xa9, 0xc8, 0x26, 0x45, 0x3f, 0x17, + 0x50, 0x75, 0x93, 0x62, 0x96, 0x70, 0xce, 0xd8, 0x0e, 0x0f, 0x72, 0x56, 0x2a, 0x05, 0x67, 0xec, + 0xd7, 0xfb, 0x75, 0x40, 0xe6, 0x88, 0x18, 0x09, 0x4d, 0x39, 0xae, 0xa9, 0x6c, 0x8e, 0x48, 0x3b, + 0xa6, 0xac, 0x01, 0x15, 0x77, 0x3c, 0xc0, 0x49, 0x78, 0x9e, 0xc3, 0x17, 0x18, 0x2b, 0x86, 0xd7, + 0xbe, 0x82, 0x0a, 0x33, 0xbc, 0xbd, 0x11, 0x37, 0x7d, 0x09, 0xde, 0x1b, 0x7b, 0xd8, 0x35, 0x88, + 0x25, 0x2b, 0x60, 0x96, 0x7d, 0xb6, 0x2d, 0xf4, 0x03, 0xc8, 0x5b, 0x26, 0x35, 0xb9, 0x99, 0x91, + 0xa6, 0x3b, 0xe1, 0xbc, 0xce, 0x61, 0xda, 0x03, 0x40, 0x8c, 0xe5, 0xc5, 0xb5, 0xdf, 0x82, 0x19, + 0x8f, 0x11, 0x64, 0xc1, 0x2e, 0x47, 0xb5, 0x24, 0x2c, 0xd1, 0x05, 0x52, 0x7b, 0xae, 0x40, 0xad, + 0x83, 0xa9, 0x4b, 0xfa, 0xde, 0xe7, 0x43, 0x37, 0x9e, 0x20, 0x6f, 0x39, 0x51, 0xef, 0x43, 0xc9, + 0xcf, 0x40, 0xc3, 0xc3, 0xf4, 0xf8, 0x13, 0xb8, 0xe8, 0x43, 0xbb, 0x98, 0x4e, 0xc9, 0xd7, 0x9f, + 0xc2, 0xca, 0x54, 0x4f, 0x64, 0x80, 0xea, 0x30, 0x6b, 0x73, 0x88, 0x8c, 0x50, 0x39, 0x6c, 0x69, + 0x42, 0x54, 0x97, 0x7c, 0x6d, 0x04, 0x17, 0xa5, 0xb2, 0x0e, 0xa6, 0x26, 0x8b, 0xb9, 0x1f, 0x8e, + 0x60, 0x71, 0x16, 0x81, 0x05, 0xb9, 0x38, 0x73, 0x9b, 0xff, 0x30, 0x46, 0xd8, 0x35, 0xe4, 0x1a, + 0x59, 0x0e, 0x98, 0xe7, 0xf4, 0xc7, 0xd8, 0x15, 0xfa, 0xd8, 0xf0, 0x2e, 0xf9, 0x39, 0x91, 0x01, + 0x72, 0xc5, 0x2d, 0x58, 0x9a, 0x58, 0x51, 0x9a, 0x7d, 0x17, 0xe6, 0x6c, 0x49, 0x93, 0x86, 0x57, + 0x93, 0x86, 0x07, 0x32, 0x01, 0x52, 0xeb, 0xc3, 0x62, 0x7c, 0x98, 0x38, 0x6b, 0x10, 0x58, 0x6f, + 0xeb, 0x8d, 0xfb, 0xfb, 0x98, 0x06, 0x67, 0x55, 0x8e, 0x1d, 0x37, 0x82, 0x26, 0x0e, 0xab, 0x7f, + 0x2a, 0x30, 0xc3, 0x4f, 0x96, 0xb7, 0x96, 0x26, 0x2a, 0xcc, 0x61, 0xa7, 0x3f, 0xb4, 0x88, 0xb3, + 0xcb, 0x23, 0x36, 0xa3, 0x07, 0xdf, 0xe8, 0xb1, 0xac, 0x1a, 0x96, 0x07, 0xa5, 0xb5, 0xcf, 0xe4, + 0xe1, 0x7c, 0xf7, 0x54, 0x87, 0xf3, 0x13, 0xc7, 0x33, 0x77, 0xf0, 0xda, 0x21, 0xc5, 0xdd, 0x01, + 0xe9, 0xfb, 0x85, 0xd5, 0x84, 0x73, 0xb1, 0xd4, 0x39, 0xfb, 0x60, 0xa7, 0x19, 0x50, 0x8a, 0x72, + 0xd0, 0x55, 0x39, 0xe8, 0x89, 0xa6, 0xb7, 0xe0, 0x4b, 0x73, 0x76, 0x38, 0xd2, 0x21, 0x04, 0x79, + 0x7e, 0x32, 0x66, 0x79, 0x56, 0xf0, 0xdf, 0xe1, 0x2d, 0x40, 0xa4, 0x8a, 0xf8, 0xd0, 0xbe, 0x51, + 0xe0, 0x92, 0x8e, 0xbd, 0xe1, 0xd8, 0xed, 0xe3, 0x26, 0xa5, 0x2e, 0xe9, 0x8d, 0xe9, 0xdb, 0x3f, + 0x57, 0xce, 0x7e, 0x1d, 0x48, 0x2f, 0xd3, 0x6d, 0x50, 0xd3, 0x8c, 0x97, 0xc9, 0x79, 0x2f, 0x7e, + 0xe7, 0x5b, 0xf5, 0x97, 0x08, 0x72, 0x38, 0x29, 0x28, 0xaf, 0x7d, 0xdf, 0x28, 0x50, 0x9d, 0x86, + 0x79, 0x67, 0xa3, 0xdd, 0x0f, 0x61, 0xee, 0x19, 0x76, 0x3d, 0x32, 0x74, 0xfc, 0xe9, 0x2b, 0x68, + 0xc1, 0xbe, 0x55, 0x4f, 0x05, 0x7f, 0x83, 0x97, 0xaa, 0x0f, 0xd6, 0xfe, 0x94, 0x83, 0x4a, 0x0a, + 0x02, 0x6d, 0x42, 0x91, 0x58, 0xd8, 0xa1, 0x64, 0xe7, 0x90, 0xa5, 0xbf, 0xb0, 0xfe, 0xfa, 0x31, + 0x3a, 0x1b, 0xed, 0x10, 0x2e, 0x6e, 0xa1, 0x51, 0x05, 0x4c, 0x9f, 0x85, 0xbd, 0xbe, 0x4b, 0x46, + 0xac, 0x2f, 0x48, 0x1b, 0x8f, 0xd5, 0xb7, 0x11, 0xc2, 0xa5, 0xbe, 0x88, 0x02, 0xd4, 0x60, 0xb5, + 0x49, 0x09, 0x0d, 0xaf, 0x0b, 0xc8, 0x57, 0xd6, 0x62, 0xf4, 0x43, 0xe1, 0xa7, 0x8f, 0x41, 0x35, + 0x28, 0xda, 0xc4, 0xe1, 0xd9, 0xc6, 0x12, 0x4d, 0xe4, 0x45, 0xc1, 0x26, 0x0e, 0x4b, 0xb4, 0x8e, + 0xe0, 0x9b, 0x07, 0x01, 0x7f, 0x46, 0xf2, 0xcd, 0x03, 0xc1, 0x57, 0x7f, 0x02, 0xe5, 0xa4, 0x83, + 0x27, 0xdd, 0x9d, 0x0b, 0x91, 0xbb, 0x33, 0x93, 0x4f, 0x3a, 0x74, 0x16, 0x79, 0xed, 0x77, 0x59, + 0x80, 0xd0, 0x31, 0x56, 0xb2, 0x41, 0x65, 0x17, 0x64, 0x19, 0x5f, 0x83, 0x2c, 0xb1, 0x64, 0x64, + 0xd5, 0xc9, 0x60, 0x34, 0xda, 0x96, 0x88, 0x63, 0x96, 0x58, 0xa8, 0x15, 0xd9, 0x8e, 0xa1, 0x23, + 0x23, 0xf8, 0x41, 0x8a, 0xd0, 0x46, 0x88, 0x4a, 0xee, 0xc2, 0xd0, 0x51, 0x3f, 0x86, 0xf7, 0xa4, + 0xd6, 0x37, 0x0e, 0x86, 0xd4, 0x7b, 0x16, 0xf9, 0x6b, 0x75, 0x28, 0x46, 0x06, 0x37, 0x74, 0x0e, + 0x0a, 0xed, 0x4d, 0xa3, 0xd3, 0xea, 0x6c, 0xe9, 0xbf, 0x28, 0x67, 0xd8, 0x95, 0xb4, 0xb9, 0xce, + 0xae, 0xa1, 0x65, 0xe5, 0xda, 0x23, 0x28, 0x04, 0xdd, 0x0e, 0x15, 0x60, 0xa6, 0xf5, 0xe5, 0x93, + 0xe6, 0x17, 0xe5, 0x0c, 0x13, 0xd9, 0xdc, 0xda, 0x36, 0xc4, 0xa7, 0x82, 0xce, 0x43, 0x51, 0x6f, + 0x3d, 0x68, 0xfd, 0xdc, 0xe8, 0x34, 0xb7, 0xd7, 0x1f, 0x96, 0xb3, 0x08, 0xc1, 0xbc, 0x20, 0x6c, + 0x6e, 0x49, 0x5a, 0xee, 0xf6, 0x3f, 0xe6, 0x60, 0xae, 0x2d, 0xdf, 0x17, 0xd1, 0x27, 0x90, 0x7f, + 0x3c, 0xf6, 0xf6, 0xd0, 0xc5, 0xb0, 0xa0, 0x7f, 0xe6, 0x12, 0x8a, 0x65, 0x2f, 0x54, 0x97, 0x26, + 0xe8, 0xa2, 0xcd, 0x68, 0x19, 0xb4, 0x01, 0xc5, 0xc8, 0x2d, 0x0b, 0x2d, 0xc6, 0x6e, 0x97, 0xbe, + 0xfc, 0x72, 0xca, 0x9d, 0x33, 0xd4, 0x71, 0x53, 0x41, 0x5b, 0x30, 0xcf, 0x59, 0xfe, 0x2d, 0xca, + 0x43, 0xef, 0x07, 0xdb, 0x97, 0xf2, 0x92, 0xa1, 0x5e, 0x9e, 0xc2, 0x0d, 0xcc, 0x7a, 0x18, 0x7f, + 0x08, 0x54, 0xd3, 0xde, 0x0c, 0x93, 0xc6, 0xa5, 0x5c, 0x4b, 0xb4, 0x0c, 0x6a, 0x01, 0x84, 0xe3, + 0x3b, 0xba, 0x14, 0x03, 0x47, 0x2f, 0x22, 0xaa, 0x9a, 0xc6, 0x0a, 0xd4, 0xac, 0x41, 0x21, 0x18, + 0x42, 0x51, 0x35, 0x65, 0x2e, 0x15, 0x4a, 0xa6, 0x4f, 0xac, 0x5a, 0x06, 0x7d, 0x0e, 0xa5, 0xe6, + 0x60, 0x70, 0x1a, 0x35, 0x6a, 0x94, 0xe3, 0x25, 0xf5, 0x0c, 0x82, 0x11, 0x29, 0x39, 0xe1, 0xa1, + 0x0f, 0x83, 0x63, 0xf5, 0xd8, 0x61, 0x56, 0xfd, 0xe8, 0x44, 0x5c, 0xb0, 0xda, 0x36, 0x9c, 0x4f, + 0x0c, 0x64, 0xa8, 0x96, 0x90, 0x4e, 0xcc, 0x86, 0xea, 0xca, 0x54, 0x7e, 0xa0, 0xb5, 0x27, 0xaf, + 0x91, 0xf1, 0x37, 0x63, 0xa4, 0x4d, 0x6e, 0x42, 0xf2, 0x61, 0x5b, 0xfd, 0xe0, 0x58, 0x4c, 0x24, + 0x2b, 0xf7, 0xe1, 0x62, 0xfa, 0xd3, 0x2a, 0xba, 0x9a, 0x92, 0x33, 0x93, 0xcf, 0xc4, 0xea, 0x87, + 0x27, 0xc1, 0x22, 0x8b, 0x75, 0xa0, 0x14, 0x1d, 0x33, 0xd1, 0xf2, 0x31, 0x2f, 0x59, 0xea, 0xfb, + 0xe9, 0xcc, 0x88, 0x3a, 0x03, 0x50, 0xca, 0x09, 0x7e, 0x25, 0x79, 0x46, 0x4d, 0xcc, 0x3d, 0xaa, + 0x76, 0x1c, 0x24, 0x5c, 0x60, 0xed, 0xb3, 0x17, 0xaf, 0x6a, 0x99, 0x6f, 0x5f, 0xd5, 0x32, 0xdf, + 0xbd, 0xaa, 0x29, 0xbf, 0x3e, 0xaa, 0x29, 0x7f, 0x3e, 0xaa, 0x29, 0xcf, 0x8f, 0x6a, 0xca, 0x8b, + 0xa3, 0x9a, 0xf2, 0xdf, 0xa3, 0x9a, 0xf2, 0xbf, 0xa3, 0x5a, 0xe6, 0xbb, 0xa3, 0x9a, 0xf2, 0xfb, + 0xd7, 0xb5, 0xcc, 0x8b, 0xd7, 0xb5, 0xcc, 0xb7, 0xaf, 0x6b, 0x99, 0x5f, 0xce, 0xf6, 0x07, 0x04, + 0x3b, 0xb4, 0x37, 0xcb, 0xff, 0x05, 0x71, 0xe7, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0xc8, + 0x10, 0x10, 0xe4, 0x18, 0x00, 0x00, +} + +func (x CountMethod) String() string { + s, ok := CountMethod_name[int32(x)] + if ok { + return s } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } + return strconv.Itoa(int(x)) +} +func (x MatchType) String() string { + s, ok := MatchType_name[int32(x)] + if ok { + return s } - return true + return strconv.Itoa(int(x)) } -func (this *ActiveSeriesRequest) Equal(that interface{}) bool { +func (x ActiveSeriesRequest_RequestType) String() string { + s, ok := ActiveSeriesRequest_RequestType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *LabelNamesAndValuesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ActiveSeriesRequest) + that1, ok := that.(*LabelNamesAndValuesRequest) if !ok { - that2, ok := that.(ActiveSeriesRequest) + that2, ok := that.(LabelNamesAndValuesRequest) if ok { that1 = &that2 } else { @@ -2147,19 +2383,19 @@ func (this *ActiveSeriesRequest) Equal(that interface{}) bool { return false } } - if this.Type != that1.Type { + if this.CountMethod != that1.CountMethod { return false } return true } -func (this *QueryStreamResponse) Equal(that interface{}) bool { +func (this *LabelNamesAndValuesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*QueryStreamResponse) + that1, ok := that.(*LabelNamesAndValuesResponse) if !ok { - that2, ok := that.(QueryStreamResponse) + that2, ok := that.(LabelNamesAndValuesResponse) if ok { that1 = &that2 } else { @@ -2171,35 +2407,24 @@ func (this *QueryStreamResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.StreamingSeries) != len(that1.StreamingSeries) { - return false - } - for i := range this.StreamingSeries { - if !this.StreamingSeries[i].Equal(&that1.StreamingSeries[i]) { - return false - } - } - if this.IsEndOfSeriesStream != that1.IsEndOfSeriesStream { - return false - } - if len(this.StreamingSeriesChunks) != len(that1.StreamingSeriesChunks) { + if len(this.Items) != len(that1.Items) { return false } - for i := range this.StreamingSeriesChunks { - if !this.StreamingSeriesChunks[i].Equal(&that1.StreamingSeriesChunks[i]) { + for i := range this.Items { + if !this.Items[i].Equal(that1.Items[i]) { return false } } return true } -func (this *QueryStreamSeries) Equal(that interface{}) bool { +func (this *LabelValues) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*QueryStreamSeries) + that1, ok := that.(*LabelValues) if !ok { - that2, ok := that.(QueryStreamSeries) + that2, ok := that.(LabelValues) if ok { that1 = &that2 } else { @@ -2211,27 +2436,27 @@ func (this *QueryStreamSeries) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Labels) != len(that1.Labels) { + if this.LabelName != that1.LabelName { return false } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if this.Values[i] != that1.Values[i] { return false } } - if this.ChunkCount != that1.ChunkCount { - return false - } return true } -func (this *QueryStreamSeriesChunks) Equal(that interface{}) bool { +func (this *LabelValuesCardinalityRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*QueryStreamSeriesChunks) + that1, ok := that.(*LabelValuesCardinalityRequest) if !ok { - that2, ok := that.(QueryStreamSeriesChunks) + that2, ok := that.(LabelValuesCardinalityRequest) if ok { that1 = &that2 } else { @@ -2243,27 +2468,35 @@ func (this *QueryStreamSeriesChunks) Equal(that interface{}) bool { } else if this == nil { return false } - if this.SeriesIndex != that1.SeriesIndex { + if len(this.LabelNames) != len(that1.LabelNames) { return false } - if len(this.Chunks) != len(that1.Chunks) { + for i := range this.LabelNames { + if this.LabelNames[i] != that1.LabelNames[i] { + return false + } + } + if len(this.Matchers) != len(that1.Matchers) { return false } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { return false } } + if this.CountMethod != that1.CountMethod { + return false + } return true } -func (this *ExemplarQueryResponse) Equal(that interface{}) bool { +func (this *LabelValuesCardinalityResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ExemplarQueryResponse) + that1, ok := that.(*LabelValuesCardinalityResponse) if !ok { - that2, ok := that.(ExemplarQueryResponse) + that2, ok := that.(LabelValuesCardinalityResponse) if ok { that1 = &that2 } else { @@ -2275,24 +2508,24 @@ func (this *ExemplarQueryResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Timeseries) != len(that1.Timeseries) { + if len(this.Items) != len(that1.Items) { return false } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { + for i := range this.Items { + if !this.Items[i].Equal(that1.Items[i]) { return false } } return true } -func (this *LabelValuesRequest) Equal(that interface{}) bool { +func (this *LabelValueSeriesCount) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelValuesRequest) + that1, ok := that.(*LabelValueSeriesCount) if !ok { - that2, ok := that.(LabelValuesRequest) + that2, ok := that.(LabelValueSeriesCount) if ok { that1 = &that2 } else { @@ -2307,28 +2540,24 @@ func (this *LabelValuesRequest) Equal(that interface{}) bool { if this.LabelName != that1.LabelName { return false } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if !this.Matchers.Equal(that1.Matchers) { + if len(this.LabelValueSeries) != len(that1.LabelValueSeries) { return false } - if this.Limit != that1.Limit { - return false + for i := range this.LabelValueSeries { + if this.LabelValueSeries[i] != that1.LabelValueSeries[i] { + return false + } } return true } -func (this *LabelValuesResponse) Equal(that interface{}) bool { +func (this *QueryRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelValuesResponse) + that1, ok := that.(*QueryRequest) if !ok { - that2, ok := that.(LabelValuesResponse) + that2, ok := that.(QueryRequest) if ok { that1 = &that2 } else { @@ -2340,24 +2569,33 @@ func (this *LabelValuesResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.LabelValues) != len(that1.LabelValues) { + if this.StartTimestampMs != that1.StartTimestampMs { return false } - for i := range this.LabelValues { - if this.LabelValues[i] != that1.LabelValues[i] { + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { return false } } + if this.StreamingChunksBatchSize != that1.StreamingChunksBatchSize { + return false + } return true } -func (this *LabelNamesRequest) Equal(that interface{}) bool { +func (this *ExemplarQueryRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelNamesRequest) + that1, ok := that.(*ExemplarQueryRequest) if !ok { - that2, ok := that.(LabelNamesRequest) + that2, ok := that.(ExemplarQueryRequest) if ok { that1 = &that2 } else { @@ -2375,22 +2613,24 @@ func (this *LabelNamesRequest) Equal(that interface{}) bool { if this.EndTimestampMs != that1.EndTimestampMs { return false } - if !this.Matchers.Equal(that1.Matchers) { + if len(this.Matchers) != len(that1.Matchers) { return false } - if this.Limit != that1.Limit { - return false + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } } return true } -func (this *LabelNamesResponse) Equal(that interface{}) bool { +func (this *ActiveSeriesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelNamesResponse) + that1, ok := that.(*ActiveSeriesRequest) if !ok { - that2, ok := that.(LabelNamesResponse) + that2, ok := that.(ActiveSeriesRequest) if ok { that1 = &that2 } else { @@ -2402,24 +2642,27 @@ func (this *LabelNamesResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.LabelNames) != len(that1.LabelNames) { + if len(this.Matchers) != len(that1.Matchers) { return false } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { return false } } + if this.Type != that1.Type { + return false + } return true } -func (this *UserStatsRequest) Equal(that interface{}) bool { +func (this *QueryStreamResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UserStatsRequest) + that1, ok := that.(*QueryStreamResponse) if !ok { - that2, ok := that.(UserStatsRequest) + that2, ok := that.(QueryStreamResponse) if ok { that1 = &that2 } else { @@ -2431,19 +2674,35 @@ func (this *UserStatsRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.CountMethod != that1.CountMethod { + if len(this.StreamingSeries) != len(that1.StreamingSeries) { + return false + } + for i := range this.StreamingSeries { + if !this.StreamingSeries[i].Equal(&that1.StreamingSeries[i]) { + return false + } + } + if this.IsEndOfSeriesStream != that1.IsEndOfSeriesStream { + return false + } + if len(this.StreamingSeriesChunks) != len(that1.StreamingSeriesChunks) { return false } + for i := range this.StreamingSeriesChunks { + if !this.StreamingSeriesChunks[i].Equal(&that1.StreamingSeriesChunks[i]) { + return false + } + } return true } -func (this *UserStatsResponse) Equal(that interface{}) bool { +func (this *QueryStreamSeries) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UserStatsResponse) + that1, ok := that.(*QueryStreamSeries) if !ok { - that2, ok := that.(UserStatsResponse) + that2, ok := that.(QueryStreamSeries) if ok { that1 = &that2 } else { @@ -2455,28 +2714,27 @@ func (this *UserStatsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if this.IngestionRate != that1.IngestionRate { - return false - } - if this.NumSeries != that1.NumSeries { + if len(this.Labels) != len(that1.Labels) { return false } - if this.ApiIngestionRate != that1.ApiIngestionRate { - return false + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } } - if this.RuleIngestionRate != that1.RuleIngestionRate { + if this.ChunkCount != that1.ChunkCount { return false } return true } -func (this *UserIDStatsResponse) Equal(that interface{}) bool { +func (this *QueryStreamSeriesChunks) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UserIDStatsResponse) + that1, ok := that.(*QueryStreamSeriesChunks) if !ok { - that2, ok := that.(UserIDStatsResponse) + that2, ok := that.(QueryStreamSeriesChunks) if ok { that1 = &that2 } else { @@ -2488,22 +2746,27 @@ func (this *UserIDStatsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if this.UserId != that1.UserId { + if this.SeriesIndex != that1.SeriesIndex { return false } - if !this.Data.Equal(that1.Data) { + if len(this.Chunks) != len(that1.Chunks) { return false } + for i := range this.Chunks { + if !this.Chunks[i].Equal(&that1.Chunks[i]) { + return false + } + } return true } -func (this *UsersStatsResponse) Equal(that interface{}) bool { +func (this *ExemplarQueryResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UsersStatsResponse) + that1, ok := that.(*ExemplarQueryResponse) if !ok { - that2, ok := that.(UsersStatsResponse) + that2, ok := that.(ExemplarQueryResponse) if ok { that1 = &that2 } else { @@ -2515,24 +2778,24 @@ func (this *UsersStatsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Stats) != len(that1.Stats) { + if len(this.Timeseries) != len(that1.Timeseries) { return false } - for i := range this.Stats { - if !this.Stats[i].Equal(that1.Stats[i]) { + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { return false } } return true } -func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { +func (this *LabelValuesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*MetricsForLabelMatchersRequest) + that1, ok := that.(*LabelValuesRequest) if !ok { - that2, ok := that.(MetricsForLabelMatchersRequest) + that2, ok := that.(LabelValuesRequest) if ok { that1 = &that2 } else { @@ -2544,33 +2807,31 @@ func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { } else if this == nil { return false } + if this.LabelName != that1.LabelName { + return false + } if this.StartTimestampMs != that1.StartTimestampMs { return false } if this.EndTimestampMs != that1.EndTimestampMs { return false } - if len(this.MatchersSet) != len(that1.MatchersSet) { + if !this.Matchers.Equal(that1.Matchers) { return false } - for i := range this.MatchersSet { - if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { - return false - } - } if this.Limit != that1.Limit { return false } return true } -func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { +func (this *LabelValuesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*MetricsForLabelMatchersResponse) + that1, ok := that.(*LabelValuesResponse) if !ok { - that2, ok := that.(MetricsForLabelMatchersResponse) + that2, ok := that.(LabelValuesResponse) if ok { that1 = &that2 } else { @@ -2582,24 +2843,24 @@ func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Metric) != len(that1.Metric) { + if len(this.LabelValues) != len(that1.LabelValues) { return false } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { + for i := range this.LabelValues { + if this.LabelValues[i] != that1.LabelValues[i] { return false } } return true } -func (this *MetricsMetadataRequest) Equal(that interface{}) bool { +func (this *LabelNamesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*MetricsMetadataRequest) + that1, ok := that.(*LabelNamesRequest) if !ok { - that2, ok := that.(MetricsMetadataRequest) + that2, ok := that.(LabelNamesRequest) if ok { that1 = &that2 } else { @@ -2611,25 +2872,28 @@ func (this *MetricsMetadataRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Limit != that1.Limit { + if this.StartTimestampMs != that1.StartTimestampMs { return false } - if this.LimitPerMetric != that1.LimitPerMetric { + if this.EndTimestampMs != that1.EndTimestampMs { return false } - if this.Metric != that1.Metric { + if !this.Matchers.Equal(that1.Matchers) { + return false + } + if this.Limit != that1.Limit { return false } return true } -func (this *MetricsMetadataResponse) Equal(that interface{}) bool { +func (this *LabelNamesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*MetricsMetadataResponse) + that1, ok := that.(*LabelNamesResponse) if !ok { - that2, ok := that.(MetricsMetadataResponse) + that2, ok := that.(LabelNamesResponse) if ok { that1 = &that2 } else { @@ -2641,24 +2905,24 @@ func (this *MetricsMetadataResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Metadata) != len(that1.Metadata) { + if len(this.LabelNames) != len(that1.LabelNames) { return false } - for i := range this.Metadata { - if !this.Metadata[i].Equal(that1.Metadata[i]) { + for i := range this.LabelNames { + if this.LabelNames[i] != that1.LabelNames[i] { return false } } return true } -func (this *ActiveSeriesResponse) Equal(that interface{}) bool { +func (this *UserStatsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ActiveSeriesResponse) + that1, ok := that.(*UserStatsRequest) if !ok { - that2, ok := that.(ActiveSeriesResponse) + that2, ok := that.(UserStatsRequest) if ok { that1 = &that2 } else { @@ -2670,32 +2934,52 @@ func (this *ActiveSeriesResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Metric) != len(that1.Metric) { + if this.CountMethod != that1.CountMethod { return false } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { + return true +} +func (this *UserStatsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UserStatsResponse) + if !ok { + that2, ok := that.(UserStatsResponse) + if ok { + that1 = &that2 + } else { return false } } - if len(this.BucketCount) != len(that1.BucketCount) { + if that1 == nil { + return this == nil + } else if this == nil { return false } - for i := range this.BucketCount { - if this.BucketCount[i] != that1.BucketCount[i] { - return false - } + if this.IngestionRate != that1.IngestionRate { + return false + } + if this.NumSeries != that1.NumSeries { + return false + } + if this.ApiIngestionRate != that1.ApiIngestionRate { + return false + } + if this.RuleIngestionRate != that1.RuleIngestionRate { + return false } return true } -func (this *Chunk) Equal(that interface{}) bool { +func (this *UserIDStatsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*Chunk) + that1, ok := that.(*UserIDStatsResponse) if !ok { - that2, ok := that.(Chunk) + that2, ok := that.(UserIDStatsResponse) if ok { that1 = &that2 } else { @@ -2707,13 +2991,7 @@ func (this *Chunk) Equal(that interface{}) bool { } else if this == nil { return false } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if this.Encoding != that1.Encoding { + if this.UserId != that1.UserId { return false } if !this.Data.Equal(that1.Data) { @@ -2721,14 +2999,14 @@ func (this *Chunk) Equal(that interface{}) bool { } return true } -func (this *LabelMatchers) Equal(that interface{}) bool { +func (this *UsersStatsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelMatchers) + that1, ok := that.(*UsersStatsResponse) if !ok { - that2, ok := that.(LabelMatchers) + that2, ok := that.(UsersStatsResponse) if ok { that1 = &that2 } else { @@ -2740,24 +3018,24 @@ func (this *LabelMatchers) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Matchers) != len(that1.Matchers) { + if len(this.Stats) != len(that1.Stats) { return false } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { + for i := range this.Stats { + if !this.Stats[i].Equal(that1.Stats[i]) { return false } } return true } -func (this *LabelMatcher) Equal(that interface{}) bool { +func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*LabelMatcher) + that1, ok := that.(*MetricsForLabelMatchersRequest) if !ok { - that2, ok := that.(LabelMatcher) + that2, ok := that.(MetricsForLabelMatchersRequest) if ok { that1 = &that2 } else { @@ -2769,2147 +3047,1646 @@ func (this *LabelMatcher) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Type != that1.Type { + if this.StartTimestampMs != that1.StartTimestampMs { return false } - if this.Name != that1.Name { + if this.EndTimestampMs != that1.EndTimestampMs { return false } - if this.Value != that1.Value { + if len(this.MatchersSet) != len(that1.MatchersSet) { return false } - return true -} -func (this *LabelNamesAndValuesRequest) GoString() string { - if this == nil { - return "nil" + for i := range this.MatchersSet { + if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { + return false + } } - s := make([]string, 0, 6) - s = append(s, "&client.LabelNamesAndValuesRequest{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + if this.Limit != that1.Limit { + return false } - s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *LabelNamesAndValuesResponse) GoString() string { - if this == nil { - return "nil" +func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesAndValuesResponse{") - if this.Items != nil { - s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") + + that1, ok := that.(*MetricsForLabelMatchersResponse) + if !ok { + that2, ok := that.(MetricsForLabelMatchersResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValues) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.LabelValues{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesCardinalityRequest) GoString() string { - if this == nil { - return "nil" + if len(this.Metric) != len(that1.Metric) { + return false } - s := make([]string, 0, 7) - s = append(s, "&client.LabelValuesCardinalityRequest{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + for i := range this.Metric { + if !this.Metric[i].Equal(that1.Metric[i]) { + return false + } } - s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *LabelValuesCardinalityResponse) GoString() string { - if this == nil { - return "nil" +func (this *MetricsMetadataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesCardinalityResponse{") - if this.Items != nil { - s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") + + that1, ok := that.(*MetricsMetadataRequest) + if !ok { + that2, ok := that.(MetricsMetadataRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValueSeriesCount) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.LabelValueSeriesCount{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - keysForLabelValueSeries := make([]string, 0, len(this.LabelValueSeries)) - for k, _ := range this.LabelValueSeries { - keysForLabelValueSeries = append(keysForLabelValueSeries, k) + if this.Limit != that1.Limit { + return false } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabelValueSeries) - mapStringForLabelValueSeries := "map[string]uint64{" - for _, k := range keysForLabelValueSeries { - mapStringForLabelValueSeries += fmt.Sprintf("%#v: %#v,", k, this.LabelValueSeries[k]) + if this.LimitPerMetric != that1.LimitPerMetric { + return false } - mapStringForLabelValueSeries += "}" - if this.LabelValueSeries != nil { - s = append(s, "LabelValueSeries: "+mapStringForLabelValueSeries+",\n") + if this.Metric != that1.Metric { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *QueryRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&client.QueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") +func (this *MetricsMetadataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s = append(s, "ProjectionInclude: "+fmt.Sprintf("%#v", this.ProjectionInclude)+",\n") - s = append(s, "ProjectionLabels: "+fmt.Sprintf("%#v", this.ProjectionLabels)+",\n") - s = append(s, "StreamingChunksBatchSize: "+fmt.Sprintf("%#v", this.StreamingChunksBatchSize)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExemplarQueryRequest) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*MetricsMetadataResponse) + if !ok { + that2, ok := that.(MetricsMetadataResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 7) - s = append(s, "&client.ExemplarQueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ActiveSeriesRequest) GoString() string { - if this == nil { - return "nil" + if len(this.Metadata) != len(that1.Metadata) { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.ActiveSeriesRequest{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + for i := range this.Metadata { + if !this.Metadata[i].Equal(that1.Metadata[i]) { + return false + } } - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *QueryStreamResponse) GoString() string { - if this == nil { - return "nil" +func (this *ActiveSeriesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 7) - s = append(s, "&client.QueryStreamResponse{") - if this.StreamingSeries != nil { - vs := make([]QueryStreamSeries, len(this.StreamingSeries)) - for i := range vs { - vs[i] = this.StreamingSeries[i] + + that1, ok := that.(*ActiveSeriesResponse) + if !ok { + that2, ok := that.(ActiveSeriesResponse) + if ok { + that1 = &that2 + } else { + return false } - s = append(s, "StreamingSeries: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "IsEndOfSeriesStream: "+fmt.Sprintf("%#v", this.IsEndOfSeriesStream)+",\n") - if this.StreamingSeriesChunks != nil { - vs := make([]QueryStreamSeriesChunks, len(this.StreamingSeriesChunks)) - for i := range vs { - vs[i] = this.StreamingSeriesChunks[i] - } - s = append(s, "StreamingSeriesChunks: "+fmt.Sprintf("%#v", vs)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryStreamSeries) GoString() string { - if this == nil { - return "nil" + if len(this.Metric) != len(that1.Metric) { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.QueryStreamSeries{") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - s = append(s, "ChunkCount: "+fmt.Sprintf("%#v", this.ChunkCount)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryStreamSeriesChunks) GoString() string { - if this == nil { - return "nil" + for i := range this.Metric { + if !this.Metric[i].Equal(that1.Metric[i]) { + return false + } } - s := make([]string, 0, 6) - s = append(s, "&client.QueryStreamSeriesChunks{") - s = append(s, "SeriesIndex: "+fmt.Sprintf("%#v", this.SeriesIndex)+",\n") - if this.Chunks != nil { - vs := make([]Chunk, len(this.Chunks)) - for i := range vs { - vs[i] = this.Chunks[i] + if len(this.BucketCount) != len(that1.BucketCount) { + return false + } + for i := range this.BucketCount { + if this.BucketCount[i] != that1.BucketCount[i] { + return false } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *ExemplarQueryResponse) GoString() string { - if this == nil { - return "nil" +func (this *Chunk) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&client.ExemplarQueryResponse{") - if this.Timeseries != nil { - vs := make([]mimirpb.TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = this.Timeseries[i] + + that1, ok := that.(*Chunk) + if !ok { + that2, ok := that.(Chunk) + if ok { + that1 = &that2 + } else { + return false } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesRequest) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 9) - s = append(s, "&client.LabelValuesRequest{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + if this.StartTimestampMs != that1.StartTimestampMs { + return false } - s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesResponse) GoString() string { - if this == nil { - return "nil" + if this.EndTimestampMs != that1.EndTimestampMs { + return false } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesResponse{") - s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesRequest) GoString() string { - if this == nil { - return "nil" + if this.Encoding != that1.Encoding { + return false } - s := make([]string, 0, 8) - s = append(s, "&client.LabelNamesRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + if !this.Data.Equal(that1.Data) { + return false } - s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *LabelNamesResponse) GoString() string { - if this == nil { - return "nil" +func (this *LabelMatchers) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesResponse{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsRequest) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*LabelMatchers) + if !ok { + that2, ok := that.(LabelMatchers) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&client.UserStatsRequest{") - s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsResponse) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 8) - s = append(s, "&client.UserStatsResponse{") - s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") - s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") - s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") - s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserIDStatsResponse) GoString() string { - if this == nil { - return "nil" + if len(this.Matchers) != len(that1.Matchers) { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.UserIDStatsResponse{") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Data != nil { - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *UsersStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.UsersStatsResponse{") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") +func (this *LabelMatcher) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersRequest) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*LabelMatcher) + if !ok { + that2, ok := that.(LabelMatcher) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 8) - s = append(s, "&client.MetricsForLabelMatchersRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.MatchersSet != nil { - s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersResponse) GoString() string { - if this == nil { - return "nil" + if this.Type != that1.Type { + return false } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsForLabelMatchersResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + if this.Name != that1.Name { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsMetadataRequest) GoString() string { - if this == nil { - return "nil" + if this.Value != that1.Value { + return false } - s := make([]string, 0, 7) - s = append(s, "&client.MetricsMetadataRequest{") - s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") - s = append(s, "LimitPerMetric: "+fmt.Sprintf("%#v", this.LimitPerMetric)+",\n") - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *MetricsMetadataResponse) GoString() string { - if this == nil { - return "nil" +func (this *ResourceAttributesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsMetadataResponse{") - if this.Metadata != nil { - s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") + + that1, ok := that.(*ResourceAttributesRequest) + if !ok { + that2, ok := that.(ResourceAttributesRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ActiveSeriesResponse) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 6) - s = append(s, "&client.ActiveSeriesResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + if this.StartTimestampMs != that1.StartTimestampMs { + return false } - s = append(s, "BucketCount: "+fmt.Sprintf("%#v", this.BucketCount)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Chunk) GoString() string { - if this == nil { - return "nil" + if this.EndTimestampMs != that1.EndTimestampMs { + return false } - s := make([]string, 0, 8) - s = append(s, "&client.Chunk{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatchers) GoString() string { - if this == nil { - return "nil" + if len(this.Matchers) != len(that1.Matchers) { + return false } - s := make([]string, 0, 5) - s = append(s, "&client.LabelMatchers{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + for i := range this.Matchers { + if !this.Matchers[i].Equal(that1.Matchers[i]) { + return false + } } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatcher) GoString() string { - if this == nil { - return "nil" + if this.Limit != that1.Limit { + return false } - s := make([]string, 0, 7) - s = append(s, "&client.LabelMatcher{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func valueToGoStringIngester(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func (this *ResourceAttributesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IngesterClient is the client API for Ingester service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngesterClient interface { - Push(ctx context.Context, in *mimirpb.WriteRequest, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) - QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) - QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) - LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) - LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) - UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) - AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) - MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) - MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) - // LabelNamesAndValues provides all values for each label that matches the matchers. - // The order of the labels and values is not guaranteed. - LabelNamesAndValues(ctx context.Context, in *LabelNamesAndValuesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesAndValuesClient, error) - // LabelValuesCardinality returns all values and series total count for label_names labels - // that match the matchers. - // The listing order of the labels is not guaranteed. - LabelValuesCardinality(ctx context.Context, in *LabelValuesCardinalityRequest, opts ...grpc.CallOption) (Ingester_LabelValuesCardinalityClient, error) - ActiveSeries(ctx context.Context, in *ActiveSeriesRequest, opts ...grpc.CallOption) (Ingester_ActiveSeriesClient, error) -} - -type ingesterClient struct { - cc *grpc.ClientConn -} - -func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { - return &ingesterClient{cc} -} -func (c *ingesterClient) Push(ctx context.Context, in *mimirpb.WriteRequest, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) { - out := new(mimirpb.WriteResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) - if err != nil { - return nil, err + that1, ok := that.(*ResourceAttributesResponse) + if !ok { + that2, ok := that.(ResourceAttributesResponse) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil -} - -func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) - if err != nil { - return nil, err + if that1 == nil { + return this == nil + } else if this == nil { + return false } - x := &ingesterQueryStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err + if len(this.Items) != len(that1.Items) { + return false } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err + for i := range this.Items { + if !this.Items[i].Equal(that1.Items[i]) { + return false + } } - return x, nil -} - -type Ingester_QueryStreamClient interface { - Recv() (*QueryStreamResponse, error) - grpc.ClientStream -} - -type ingesterQueryStreamClient struct { - grpc.ClientStream + return true } - -func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { - m := new(QueryStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (this *SeriesResourceAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return m, nil -} -func (c *ingesterClient) QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) { - out := new(ExemplarQueryResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/QueryExemplars", in, out, opts...) - if err != nil { - return nil, err + that1, ok := that.(*SeriesResourceAttributes) + if !ok { + that2, ok := that.(SeriesResourceAttributes) + if ok { + that1 = &that2 + } else { + return false + } } - return out, nil -} - -func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { - out := new(LabelValuesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) - if err != nil { - return nil, err + if that1 == nil { + return this == nil + } else if this == nil { + return false } - return out, nil -} - -func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { - out := new(LabelNamesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) - if err != nil { - return nil, err + if len(this.Labels) != len(that1.Labels) { + return false } - return out, nil -} - -func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { - out := new(UserStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) - if err != nil { - return nil, err + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } } - return out, nil -} - -func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { - out := new(UsersStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) - if err != nil { - return nil, err + if len(this.Versions) != len(that1.Versions) { + return false } - return out, nil -} - -func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { - out := new(MetricsForLabelMatchersResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) - if err != nil { - return nil, err + for i := range this.Versions { + if !this.Versions[i].Equal(that1.Versions[i]) { + return false + } } - return out, nil + return true } - -func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) { - out := new(MetricsMetadataResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsMetadata", in, out, opts...) - if err != nil { - return nil, err +func (this *ResourceVersionData) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return out, nil -} -func (c *ingesterClient) LabelNamesAndValues(ctx context.Context, in *LabelNamesAndValuesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesAndValuesClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/LabelNamesAndValues", opts...) - if err != nil { - return nil, err + that1, ok := that.(*ResourceVersionData) + if !ok { + that2, ok := that.(ResourceVersionData) + if ok { + that1 = &that2 + } else { + return false + } } - x := &ingesterLabelNamesAndValuesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err + if that1 == nil { + return this == nil + } else if this == nil { + return false } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err + if len(this.Identifying) != len(that1.Identifying) { + return false } - return x, nil -} - -type Ingester_LabelNamesAndValuesClient interface { - Recv() (*LabelNamesAndValuesResponse, error) - grpc.ClientStream -} - -type ingesterLabelNamesAndValuesClient struct { - grpc.ClientStream -} - -func (x *ingesterLabelNamesAndValuesClient) Recv() (*LabelNamesAndValuesResponse, error) { - m := new(LabelNamesAndValuesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err + for i := range this.Identifying { + if this.Identifying[i] != that1.Identifying[i] { + return false + } } - return m, nil -} - -func (c *ingesterClient) LabelValuesCardinality(ctx context.Context, in *LabelValuesCardinalityRequest, opts ...grpc.CallOption) (Ingester_LabelValuesCardinalityClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/LabelValuesCardinality", opts...) - if err != nil { - return nil, err + if len(this.Descriptive) != len(that1.Descriptive) { + return false } - x := &ingesterLabelValuesCardinalityClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err + for i := range this.Descriptive { + if this.Descriptive[i] != that1.Descriptive[i] { + return false + } } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err + if len(this.Entities) != len(that1.Entities) { + return false } - return x, nil -} - -type Ingester_LabelValuesCardinalityClient interface { - Recv() (*LabelValuesCardinalityResponse, error) - grpc.ClientStream -} - -type ingesterLabelValuesCardinalityClient struct { - grpc.ClientStream -} - -func (x *ingesterLabelValuesCardinalityClient) Recv() (*LabelValuesCardinalityResponse, error) { - m := new(LabelValuesCardinalityResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err + for i := range this.Entities { + if !this.Entities[i].Equal(that1.Entities[i]) { + return false + } } - return m, nil -} - -func (c *ingesterClient) ActiveSeries(ctx context.Context, in *ActiveSeriesRequest, opts ...grpc.CallOption) (Ingester_ActiveSeriesClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[3], "/cortex.Ingester/ActiveSeries", opts...) - if err != nil { - return nil, err + if this.MinTimeMs != that1.MinTimeMs { + return false } - x := &ingesterActiveSeriesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err + if this.MaxTimeMs != that1.MaxTimeMs { + return false } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_ActiveSeriesClient interface { - Recv() (*ActiveSeriesResponse, error) - grpc.ClientStream -} - -type ingesterActiveSeriesClient struct { - grpc.ClientStream + return true } - -func (x *ingesterActiveSeriesClient) Recv() (*ActiveSeriesResponse, error) { - m := new(ActiveSeriesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (this *EntityData) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return m, nil -} - -// IngesterServer is the server API for Ingester service. -type IngesterServer interface { - Push(context.Context, *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) - QueryStream(*QueryRequest, Ingester_QueryStreamServer) error - QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error) - LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) - LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) - UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) - AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) - MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) - MetricsMetadata(context.Context, *MetricsMetadataRequest) (*MetricsMetadataResponse, error) - // LabelNamesAndValues provides all values for each label that matches the matchers. - // The order of the labels and values is not guaranteed. - LabelNamesAndValues(*LabelNamesAndValuesRequest, Ingester_LabelNamesAndValuesServer) error - // LabelValuesCardinality returns all values and series total count for label_names labels - // that match the matchers. - // The listing order of the labels is not guaranteed. - LabelValuesCardinality(*LabelValuesCardinalityRequest, Ingester_LabelValuesCardinalityServer) error - ActiveSeries(*ActiveSeriesRequest, Ingester_ActiveSeriesServer) error -} - -// UnimplementedIngesterServer can be embedded to have forward compatible implementations. -type UnimplementedIngesterServer struct { -} - -func (*UnimplementedIngesterServer) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") -} -func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { - return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") -} -func (*UnimplementedIngesterServer) QueryExemplars(ctx context.Context, req *ExemplarQueryRequest) (*ExemplarQueryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryExemplars not implemented") -} -func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") -} -func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") -} -func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") -} -func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") -} -func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") -} -func (*UnimplementedIngesterServer) MetricsMetadata(ctx context.Context, req *MetricsMetadataRequest) (*MetricsMetadataResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MetricsMetadata not implemented") -} -func (*UnimplementedIngesterServer) LabelNamesAndValues(req *LabelNamesAndValuesRequest, srv Ingester_LabelNamesAndValuesServer) error { - return status.Errorf(codes.Unimplemented, "method LabelNamesAndValues not implemented") -} -func (*UnimplementedIngesterServer) LabelValuesCardinality(req *LabelValuesCardinalityRequest, srv Ingester_LabelValuesCardinalityServer) error { - return status.Errorf(codes.Unimplemented, "method LabelValuesCardinality not implemented") -} -func (*UnimplementedIngesterServer) ActiveSeries(req *ActiveSeriesRequest, srv Ingester_ActiveSeriesServer) error { - return status.Errorf(codes.Unimplemented, "method ActiveSeries not implemented") -} - -func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { - s.RegisterService(&_Ingester_serviceDesc, srv) -} -func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(mimirpb.WriteRequest) - if err := dec(in); err != nil { - return nil, err + that1, ok := that.(*EntityData) + if !ok { + that2, ok := that.(EntityData) + if ok { + that1 = &that2 + } else { + return false + } } - if interceptor == nil { - return srv.(IngesterServer).Push(ctx, in) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Push", + if this.Type != that1.Type { + return false } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Push(ctx, req.(*mimirpb.WriteRequest)) + if len(this.Id) != len(that1.Id) { + return false } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(QueryRequest) - if err := stream.RecvMsg(m); err != nil { - return err + for i := range this.Id { + if this.Id[i] != that1.Id[i] { + return false + } } - return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) -} - -type Ingester_QueryStreamServer interface { - Send(*QueryStreamResponse) error - grpc.ServerStream -} - -type ingesterQueryStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_QueryExemplars_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExemplarQueryRequest) - if err := dec(in); err != nil { - return nil, err + if len(this.Description) != len(that1.Description) { + return false } - if interceptor == nil { - return srv.(IngesterServer).QueryExemplars(ctx, in) + for i := range this.Description { + if this.Description[i] != that1.Description[i] { + return false + } } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/QueryExemplars", + return true +} +func (this *LabelNamesAndValuesRequest) GoString() string { + if this == nil { + return "nil" } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).QueryExemplars(ctx, req.(*ExemplarQueryRequest)) + s := make([]string, 0, 6) + s = append(s, "&client.LabelNamesAndValuesRequest{") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelValues(ctx, in) +func (this *LabelNamesAndValuesResponse) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelValues", + s := make([]string, 0, 5) + s = append(s, "&client.LabelNamesAndValuesResponse{") + if this.Items != nil { + s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValues) GoString() string { + if this == nil { + return "nil" } - return interceptor(ctx, in, info, handler) + s := make([]string, 0, 6) + s = append(s, "&client.LabelValues{") + s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err +func (this *LabelValuesCardinalityRequest) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(IngesterServer).LabelNames(ctx, in) + s := make([]string, 0, 7) + s = append(s, "&client.LabelValuesCardinalityRequest{") + s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelNames", + s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelValuesCardinalityResponse) GoString() string { + if this == nil { + return "nil" } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesCardinalityResponse{") + if this.Items != nil { + s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).UserStats(ctx, in) +func (this *LabelValueSeriesCount) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/UserStats", + s := make([]string, 0, 6) + s = append(s, "&client.LabelValueSeriesCount{") + s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + keysForLabelValueSeries := make([]string, 0, len(this.LabelValueSeries)) + for k, _ := range this.LabelValueSeries { + keysForLabelValueSeries = append(keysForLabelValueSeries, k) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelValueSeries) + mapStringForLabelValueSeries := "map[string]uint64{" + for _, k := range keysForLabelValueSeries { + mapStringForLabelValueSeries += fmt.Sprintf("%#v: %#v,", k, this.LabelValueSeries[k]) } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).AllUserStats(ctx, in) + mapStringForLabelValueSeries += "}" + if this.LabelValueSeries != nil { + s = append(s, "LabelValueSeries: "+mapStringForLabelValueSeries+",\n") } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/AllUserStats", + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryRequest) GoString() string { + if this == nil { + return "nil" } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) + s := make([]string, 0, 8) + s = append(s, "&client.QueryRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "StreamingChunksBatchSize: "+fmt.Sprintf("%#v", this.StreamingChunksBatchSize)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsForLabelMatchersRequest) - if err := dec(in); err != nil { - return nil, err +func (this *ExemplarQueryRequest) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) + s := make([]string, 0, 7) + s = append(s, "&client.ExemplarQueryRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ActiveSeriesRequest) GoString() string { + if this == nil { + return "nil" } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) + s := make([]string, 0, 6) + s = append(s, "&client.ActiveSeriesRequest{") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_MetricsMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsMetadataRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).MetricsMetadata(ctx, in) +func (this *QueryStreamResponse) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsMetadata", + s := make([]string, 0, 7) + s = append(s, "&client.QueryStreamResponse{") + if this.StreamingSeries != nil { + vs := make([]QueryStreamSeries, len(this.StreamingSeries)) + for i := range vs { + vs[i] = this.StreamingSeries[i] + } + s = append(s, "StreamingSeries: "+fmt.Sprintf("%#v", vs)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsMetadata(ctx, req.(*MetricsMetadataRequest)) + s = append(s, "IsEndOfSeriesStream: "+fmt.Sprintf("%#v", this.IsEndOfSeriesStream)+",\n") + if this.StreamingSeriesChunks != nil { + vs := make([]QueryStreamSeriesChunks, len(this.StreamingSeriesChunks)) + for i := range vs { + vs[i] = this.StreamingSeriesChunks[i] + } + s = append(s, "StreamingSeriesChunks: "+fmt.Sprintf("%#v", vs)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_LabelNamesAndValues_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LabelNamesAndValuesRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (this *QueryStreamSeries) GoString() string { + if this == nil { + return "nil" } - return srv.(IngesterServer).LabelNamesAndValues(m, &ingesterLabelNamesAndValuesServer{stream}) -} - -type Ingester_LabelNamesAndValuesServer interface { - Send(*LabelNamesAndValuesResponse) error - grpc.ServerStream -} - -type ingesterLabelNamesAndValuesServer struct { - grpc.ServerStream + s := make([]string, 0, 6) + s = append(s, "&client.QueryStreamSeries{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "ChunkCount: "+fmt.Sprintf("%#v", this.ChunkCount)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (x *ingesterLabelNamesAndValuesServer) Send(m *LabelNamesAndValuesResponse) error { - return x.ServerStream.SendMsg(m) +func (this *QueryStreamSeriesChunks) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&client.QueryStreamSeriesChunks{") + s = append(s, "SeriesIndex: "+fmt.Sprintf("%#v", this.SeriesIndex)+",\n") + if this.Chunks != nil { + vs := make([]Chunk, len(this.Chunks)) + for i := range vs { + vs[i] = this.Chunks[i] + } + s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_LabelValuesCardinality_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LabelValuesCardinalityRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (this *ExemplarQueryResponse) GoString() string { + if this == nil { + return "nil" } - return srv.(IngesterServer).LabelValuesCardinality(m, &ingesterLabelValuesCardinalityServer{stream}) + s := make([]string, 0, 5) + s = append(s, "&client.ExemplarQueryResponse{") + if this.Timeseries != nil { + vs := make([]mimirpb.TimeSeries, len(this.Timeseries)) + for i := range vs { + vs[i] = this.Timeseries[i] + } + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -type Ingester_LabelValuesCardinalityServer interface { - Send(*LabelValuesCardinalityResponse) error - grpc.ServerStream +func (this *LabelValuesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&client.LabelValuesRequest{") + s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -type ingesterLabelValuesCardinalityServer struct { - grpc.ServerStream +func (this *LabelValuesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.LabelValuesResponse{") + s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (x *ingesterLabelValuesCardinalityServer) Send(m *LabelValuesCardinalityResponse) error { - return x.ServerStream.SendMsg(m) +func (this *LabelNamesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.LabelNamesRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func _Ingester_ActiveSeries_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ActiveSeriesRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (this *LabelNamesResponse) GoString() string { + if this == nil { + return "nil" } - return srv.(IngesterServer).ActiveSeries(m, &ingesterActiveSeriesServer{stream}) + s := make([]string, 0, 5) + s = append(s, "&client.LabelNamesResponse{") + s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -type Ingester_ActiveSeriesServer interface { - Send(*ActiveSeriesResponse) error - grpc.ServerStream +func (this *UserStatsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&client.UserStatsRequest{") + s = append(s, "CountMethod: "+fmt.Sprintf("%#v", this.CountMethod)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -type ingesterActiveSeriesServer struct { - grpc.ServerStream +func (this *UserStatsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.UserStatsResponse{") + s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") + s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") + s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") + s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (x *ingesterActiveSeriesServer) Send(m *ActiveSeriesResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _Ingester_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cortex.Ingester", - HandlerType: (*IngesterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Push", - Handler: _Ingester_Push_Handler, - }, - { - MethodName: "QueryExemplars", - Handler: _Ingester_QueryExemplars_Handler, - }, - { - MethodName: "LabelValues", - Handler: _Ingester_LabelValues_Handler, - }, - { - MethodName: "LabelNames", - Handler: _Ingester_LabelNames_Handler, - }, - { - MethodName: "UserStats", - Handler: _Ingester_UserStats_Handler, - }, - { - MethodName: "AllUserStats", - Handler: _Ingester_AllUserStats_Handler, - }, - { - MethodName: "MetricsForLabelMatchers", - Handler: _Ingester_MetricsForLabelMatchers_Handler, - }, - { - MethodName: "MetricsMetadata", - Handler: _Ingester_MetricsMetadata_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QueryStream", - Handler: _Ingester_QueryStream_Handler, - ServerStreams: true, - }, - { - StreamName: "LabelNamesAndValues", - Handler: _Ingester_LabelNamesAndValues_Handler, - ServerStreams: true, - }, - { - StreamName: "LabelValuesCardinality", - Handler: _Ingester_LabelValuesCardinality_Handler, - ServerStreams: true, - }, - { - StreamName: "ActiveSeries", - Handler: _Ingester_ActiveSeries_Handler, - ServerStreams: true, - }, - }, - Metadata: "ingester.proto", -} - -func (m *LabelNamesAndValuesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *UserIDStatsResponse) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *LabelNamesAndValuesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + s := make([]string, 0, 6) + s = append(s, "&client.UserIDStatsResponse{") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelNamesAndValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CountMethod != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) - i-- - dAtA[i] = 0x10 +func (this *UsersStatsResponse) GoString() string { + if this == nil { + return "nil" } - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + s := make([]string, 0, 5) + s = append(s, "&client.UsersStatsResponse{") + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") } - return len(dAtA) - i, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelNamesAndValuesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *MetricsForLabelMatchersRequest) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *LabelNamesAndValuesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesAndValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + s := make([]string, 0, 8) + s = append(s, "&client.MetricsForLabelMatchersRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.MatchersSet != nil { + s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") } - return len(dAtA) - i, nil + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValues) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *MetricsForLabelMatchersResponse) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil + s := make([]string, 0, 5) + s = append(s, "&client.MetricsForLabelMatchersResponse{") + if this.Metric != nil { + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValues) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (this *MetricsMetadataRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&client.MetricsMetadataRequest{") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "LimitPerMetric: "+fmt.Sprintf("%#v", this.LimitPerMetric)+",\n") + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValues) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 - } +func (this *MetricsMetadataResponse) GoString() string { + if this == nil { + return "nil" } - if len(m.LabelName) > 0 { - i -= len(m.LabelName) - copy(dAtA[i:], m.LabelName) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) - i-- - dAtA[i] = 0xa + s := make([]string, 0, 5) + s = append(s, "&client.MetricsMetadataResponse{") + if this.Metadata != nil { + s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") } - return len(dAtA) - i, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValuesCardinalityRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *ActiveSeriesResponse) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *LabelValuesCardinalityRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + s := make([]string, 0, 6) + s = append(s, "&client.ActiveSeriesResponse{") + if this.Metric != nil { + s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") + } + s = append(s, "BucketCount: "+fmt.Sprintf("%#v", this.BucketCount)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValuesCardinalityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CountMethod != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) - i-- - dAtA[i] = 0x18 +func (this *Chunk) GoString() string { + if this == nil { + return "nil" } - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + s := make([]string, 0, 8) + s = append(s, "&client.Chunk{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelMatchers) GoString() string { + if this == nil { + return "nil" } - if len(m.LabelNames) > 0 { - for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelNames[iNdEx]) - copy(dAtA[i:], m.LabelNames[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } + s := make([]string, 0, 5) + s = append(s, "&client.LabelMatchers{") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - return len(dAtA) - i, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValuesCardinalityResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *LabelMatcher) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *LabelValuesCardinalityResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + s := make([]string, 0, 7) + s = append(s, "&client.LabelMatcher{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValuesCardinalityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func (this *ResourceAttributesRequest) GoString() string { + if this == nil { + return "nil" } - return len(dAtA) - i, nil -} - -func (m *LabelValueSeriesCount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + s := make([]string, 0, 8) + s = append(s, "&client.ResourceAttributesRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") + if this.Matchers != nil { + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") } - return dAtA[:n], nil -} - -func (m *LabelValueSeriesCount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *LabelValueSeriesCount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelValueSeries) > 0 { - for k := range m.LabelValueSeries { - v := m.LabelValueSeries[k] - baseI := i - i = encodeVarintIngester(dAtA, i, uint64(v)) - i-- - dAtA[i] = 0x10 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintIngester(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } +func (this *ResourceAttributesResponse) GoString() string { + if this == nil { + return "nil" } - if len(m.LabelName) > 0 { - i -= len(m.LabelName) - copy(dAtA[i:], m.LabelName) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) - i-- - dAtA[i] = 0xa + s := make([]string, 0, 5) + s = append(s, "&client.ResourceAttributesResponse{") + if this.Items != nil { + s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") } - return len(dAtA) - i, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (m *QueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *SeriesResourceAttributes) GoString() string { + if this == nil { + return "nil" } - return dAtA[:n], nil + s := make([]string, 0, 6) + s = append(s, "&client.SeriesResourceAttributes{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Versions != nil { + s = append(s, "Versions: "+fmt.Sprintf("%#v", this.Versions)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (this *ResourceVersionData) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&client.ResourceVersionData{") + keysForIdentifying := make([]string, 0, len(this.Identifying)) + for k, _ := range this.Identifying { + keysForIdentifying = append(keysForIdentifying, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForIdentifying) + mapStringForIdentifying := "map[string]string{" + for _, k := range keysForIdentifying { + mapStringForIdentifying += fmt.Sprintf("%#v: %#v,", k, this.Identifying[k]) + } + mapStringForIdentifying += "}" + if this.Identifying != nil { + s = append(s, "Identifying: "+mapStringForIdentifying+",\n") + } + keysForDescriptive := make([]string, 0, len(this.Descriptive)) + for k, _ := range this.Descriptive { + keysForDescriptive = append(keysForDescriptive, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescriptive) + mapStringForDescriptive := "map[string]string{" + for _, k := range keysForDescriptive { + mapStringForDescriptive += fmt.Sprintf("%#v: %#v,", k, this.Descriptive[k]) + } + mapStringForDescriptive += "}" + if this.Descriptive != nil { + s = append(s, "Descriptive: "+mapStringForDescriptive+",\n") + } + if this.Entities != nil { + s = append(s, "Entities: "+fmt.Sprintf("%#v", this.Entities)+",\n") + } + s = append(s, "MinTimeMs: "+fmt.Sprintf("%#v", this.MinTimeMs)+",\n") + s = append(s, "MaxTimeMs: "+fmt.Sprintf("%#v", this.MaxTimeMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StreamingChunksBatchSize != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StreamingChunksBatchSize)) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa0 +func (this *EntityData) GoString() string { + if this == nil { + return "nil" } - if len(m.ProjectionLabels) > 0 { - for iNdEx := len(m.ProjectionLabels) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ProjectionLabels[iNdEx]) - copy(dAtA[i:], m.ProjectionLabels[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.ProjectionLabels[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + s := make([]string, 0, 7) + s = append(s, "&client.EntityData{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + keysForId := make([]string, 0, len(this.Id)) + for k, _ := range this.Id { + keysForId = append(keysForId, k) } - if m.ProjectionInclude { - i-- - if m.ProjectionInclude { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + github_com_gogo_protobuf_sortkeys.Strings(keysForId) + mapStringForId := "map[string]string{" + for _, k := range keysForId { + mapStringForId += fmt.Sprintf("%#v: %#v,", k, this.Id[k]) } - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + mapStringForId += "}" + if this.Id != nil { + s = append(s, "Id: "+mapStringForId+",\n") } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 + keysForDescription := make([]string, 0, len(this.Description)) + for k, _ := range this.Description { + keysForDescription = append(keysForDescription, k) } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + mapStringForDescription := "map[string]string{" + for _, k := range keysForDescription { + mapStringForDescription += fmt.Sprintf("%#v: %#v,", k, this.Description[k]) } - return len(dAtA) - i, nil + mapStringForDescription += "}" + if this.Description != nil { + s = append(s, "Description: "+mapStringForDescription+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (m *ExemplarQueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func valueToGoStringIngester(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return dAtA[:n], nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func (m *ExemplarQueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IngesterClient is the client API for Ingester service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngesterClient interface { + Push(ctx context.Context, in *mimirpb.WriteRequest, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) + QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) + QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) + LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) + LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) + UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) + AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) + MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) + MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) + // LabelNamesAndValues provides all values for each label that matches the matchers. + // The order of the labels and values is not guaranteed. + LabelNamesAndValues(ctx context.Context, in *LabelNamesAndValuesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesAndValuesClient, error) + // LabelValuesCardinality returns all values and series total count for label_names labels + // that match the matchers. + // The listing order of the labels is not guaranteed. + LabelValuesCardinality(ctx context.Context, in *LabelValuesCardinalityRequest, opts ...grpc.CallOption) (Ingester_LabelValuesCardinalityClient, error) + ActiveSeries(ctx context.Context, in *ActiveSeriesRequest, opts ...grpc.CallOption) (Ingester_ActiveSeriesClient, error) + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + ResourceAttributes(ctx context.Context, in *ResourceAttributesRequest, opts ...grpc.CallOption) (Ingester_ResourceAttributesClient, error) } -func (m *ExemplarQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 +type ingesterClient struct { + cc *grpc.ClientConn +} + +func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { + return &ingesterClient{cc} +} + +func (c *ingesterClient) Push(ctx context.Context, in *mimirpb.WriteRequest, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) { + out := new(mimirpb.WriteResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *ActiveSeriesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &ingesterQueryStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (m *ActiveSeriesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_QueryStreamClient interface { + Recv() (*QueryStreamResponse, error) + grpc.ClientStream } -func (m *ActiveSeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Type != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +type ingesterQueryStreamClient struct { + grpc.ClientStream +} + +func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { + m := new(QueryStreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - return len(dAtA) - i, nil + return m, nil } -func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) { + out := new(ExemplarQueryResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/QueryExemplars", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { + out := new(LabelValuesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StreamingSeriesChunks) > 0 { - for iNdEx := len(m.StreamingSeriesChunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StreamingSeriesChunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.IsEndOfSeriesStream { - i-- - if m.IsEndOfSeriesStream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.StreamingSeries) > 0 { - for iNdEx := len(m.StreamingSeries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StreamingSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } +func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { + out := new(LabelNamesResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *QueryStreamSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { + out := new(UserStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryStreamSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { + out := new(UsersStatsResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *QueryStreamSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ChunkCount != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.ChunkCount)) - i-- - dAtA[i] = 0x10 - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { + out := new(MetricsForLabelMatchersResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *QueryStreamSeriesChunks) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) { + out := new(MetricsMetadataResponse) + err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsMetadata", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *QueryStreamSeriesChunks) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryStreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } +func (c *ingesterClient) LabelNamesAndValues(ctx context.Context, in *LabelNamesAndValuesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesAndValuesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/LabelNamesAndValues", opts...) + if err != nil { + return nil, err } - if m.SeriesIndex != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.SeriesIndex)) - i-- - dAtA[i] = 0x8 + x := &ingesterLabelNamesAndValuesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err } - return len(dAtA) - i, nil -} - -func (m *ExemplarQueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { + if err := x.ClientStream.CloseSend(); err != nil { return nil, err } - return dAtA[:n], nil + return x, nil } -func (m *ExemplarQueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_LabelNamesAndValuesClient interface { + Recv() (*LabelNamesAndValuesResponse, error) + grpc.ClientStream } -func (m *ExemplarQueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +type ingesterLabelNamesAndValuesClient struct { + grpc.ClientStream +} + +func (x *ingesterLabelNamesAndValuesClient) Recv() (*LabelNamesAndValuesResponse, error) { + m := new(LabelNamesAndValuesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - return len(dAtA) - i, nil + return m, nil } -func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) LabelValuesCardinality(ctx context.Context, in *LabelValuesCardinalityRequest, opts ...grpc.CallOption) (Ingester_LabelValuesCardinalityClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/LabelValuesCardinality", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &ingesterLabelValuesCardinalityClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_LabelValuesCardinalityClient interface { + Recv() (*LabelValuesCardinalityResponse, error) + grpc.ClientStream } -func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Limit != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x28 - } - if m.Matchers != nil { - { - size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x18 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if len(m.LabelName) > 0 { - i -= len(m.LabelName) - copy(dAtA[i:], m.LabelName) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) - i-- - dAtA[i] = 0xa +type ingesterLabelValuesCardinalityClient struct { + grpc.ClientStream +} + +func (x *ingesterLabelValuesCardinalityClient) Recv() (*LabelValuesCardinalityResponse, error) { + m := new(LabelValuesCardinalityResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - return len(dAtA) - i, nil + return m, nil } -func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) ActiveSeries(ctx context.Context, in *ActiveSeriesRequest, opts ...grpc.CallOption) (Ingester_ActiveSeriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[3], "/cortex.Ingester/ActiveSeries", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &ingesterActiveSeriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_ActiveSeriesClient interface { + Recv() (*ActiveSeriesResponse, error) + grpc.ClientStream } -func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelValues) > 0 { - for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelValues[iNdEx]) - copy(dAtA[i:], m.LabelValues[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) - i-- - dAtA[i] = 0xa - } +type ingesterActiveSeriesClient struct { + grpc.ClientStream +} + +func (x *ingesterActiveSeriesClient) Recv() (*ActiveSeriesResponse, error) { + m := new(ActiveSeriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - return len(dAtA) - i, nil + return m, nil } -func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *ingesterClient) ResourceAttributes(ctx context.Context, in *ResourceAttributesRequest, opts ...grpc.CallOption) (Ingester_ResourceAttributesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[4], "/cortex.Ingester/ResourceAttributes", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &ingesterResourceAttributesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_ResourceAttributesClient interface { + Recv() (*ResourceAttributesResponse, error) + grpc.ClientStream } -func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Limit != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x20 - } - if m.Matchers != nil { - { - size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil +type ingesterResourceAttributesClient struct { + grpc.ClientStream } -func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func (x *ingesterResourceAttributesClient) Recv() (*ResourceAttributesResponse, error) { + m := new(ResourceAttributesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return m, nil } -func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelNames) > 0 { - for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelNames[iNdEx]) - copy(dAtA[i:], m.LabelNames[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +// IngesterServer is the server API for Ingester service. +type IngesterServer interface { + Push(context.Context, *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) + QueryStream(*QueryRequest, Ingester_QueryStreamServer) error + QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error) + LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) + LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) + UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) + AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) + MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) + MetricsMetadata(context.Context, *MetricsMetadataRequest) (*MetricsMetadataResponse, error) + // LabelNamesAndValues provides all values for each label that matches the matchers. + // The order of the labels and values is not guaranteed. + LabelNamesAndValues(*LabelNamesAndValuesRequest, Ingester_LabelNamesAndValuesServer) error + // LabelValuesCardinality returns all values and series total count for label_names labels + // that match the matchers. + // The listing order of the labels is not guaranteed. + LabelValuesCardinality(*LabelValuesCardinalityRequest, Ingester_LabelValuesCardinalityServer) error + ActiveSeries(*ActiveSeriesRequest, Ingester_ActiveSeriesServer) error + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + ResourceAttributes(*ResourceAttributesRequest, Ingester_ResourceAttributesServer) error } -func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// UnimplementedIngesterServer can be embedded to have forward compatible implementations. +type UnimplementedIngesterServer struct { } -func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (*UnimplementedIngesterServer) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mimirpb.WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") } - -func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CountMethod != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil +func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { + return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") } - -func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (*UnimplementedIngesterServer) QueryExemplars(ctx context.Context, req *ExemplarQueryRequest) (*ExemplarQueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryExemplars not implemented") +} +func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") +} +func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") +} +func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") +} +func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") +} +func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") +} +func (*UnimplementedIngesterServer) MetricsMetadata(ctx context.Context, req *MetricsMetadataRequest) (*MetricsMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MetricsMetadata not implemented") +} +func (*UnimplementedIngesterServer) LabelNamesAndValues(req *LabelNamesAndValuesRequest, srv Ingester_LabelNamesAndValuesServer) error { + return status.Errorf(codes.Unimplemented, "method LabelNamesAndValues not implemented") +} +func (*UnimplementedIngesterServer) LabelValuesCardinality(req *LabelValuesCardinalityRequest, srv Ingester_LabelValuesCardinalityServer) error { + return status.Errorf(codes.Unimplemented, "method LabelValuesCardinality not implemented") +} +func (*UnimplementedIngesterServer) ActiveSeries(req *ActiveSeriesRequest, srv Ingester_ActiveSeriesServer) error { + return status.Errorf(codes.Unimplemented, "method ActiveSeries not implemented") +} +func (*UnimplementedIngesterServer) ResourceAttributes(req *ResourceAttributesRequest, srv Ingester_ResourceAttributesServer) error { + return status.Errorf(codes.Unimplemented, "method ResourceAttributes not implemented") } -func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { + s.RegisterService(&_Ingester_serviceDesc, srv) } -func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RuleIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) - i-- - dAtA[i] = 0x21 +func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(mimirpb.WriteRequest) + if err := dec(in); err != nil { + return nil, err } - if m.ApiIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) - i-- - dAtA[i] = 0x19 + if interceptor == nil { + return srv.(IngesterServer).Push(ctx, in) } - if m.NumSeries != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.NumSeries)) - i-- - dAtA[i] = 0x10 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/Push", } - if m.IngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) - i-- - dAtA[i] = 0x9 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).Push(ctx, req.(*mimirpb.WriteRequest)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return dAtA[:n], nil -} - -func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) } -func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +type Ingester_QueryStreamServer interface { + Send(*QueryStreamResponse) error + grpc.ServerStream } -func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +type ingesterQueryStreamServer struct { + grpc.ServerStream } -func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { + return x.ServerStream.SendMsg(m) } -func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func _Ingester_QueryExemplars_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExemplarQueryRequest) + if err := dec(in); err != nil { + return nil, err } - return len(dAtA) - i, nil + if interceptor == nil { + return srv.(IngesterServer).QueryExemplars(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/QueryExemplars", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).QueryExemplars(ctx, req.(*ExemplarQueryRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelValuesRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(IngesterServer).LabelValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).LabelNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/LabelNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Limit != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x20 +func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.MatchersSet) > 0 { - for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + if interceptor == nil { + return srv.(IngesterServer).UserStats(ctx, in) } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/UserStats", } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserStatsRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(IngesterServer).AllUserStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/AllUserStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsForLabelMatchersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func _Ingester_MetricsMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsMetadataRequest) + if err := dec(in); err != nil { + return nil, err } - return len(dAtA) - i, nil + if interceptor == nil { + return srv.(IngesterServer).MetricsMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.Ingester/MetricsMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngesterServer).MetricsMetadata(ctx, req.(*MetricsMetadataRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *MetricsMetadataRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func _Ingester_LabelNamesAndValues_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LabelNamesAndValuesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return dAtA[:n], nil + return srv.(IngesterServer).LabelNamesAndValues(m, &ingesterLabelNamesAndValuesServer{stream}) } -func (m *MetricsMetadataRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_LabelNamesAndValuesServer interface { + Send(*LabelNamesAndValuesResponse) error + grpc.ServerStream } -func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - i -= len(m.Metric) - copy(dAtA[i:], m.Metric) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Metric))) - i-- - dAtA[i] = 0x1a - } - if m.LimitPerMetric != 0 { - i = encodeVarintIngester(dAtA, i, uint64((uint32(m.LimitPerMetric)<<1)^uint32((m.LimitPerMetric>>31)))) - i-- - dAtA[i] = 0x10 - } - if m.Limit != 0 { - i = encodeVarintIngester(dAtA, i, uint64((uint32(m.Limit)<<1)^uint32((m.Limit>>31)))) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil +type ingesterLabelNamesAndValuesServer struct { + grpc.ServerStream } -func (m *MetricsMetadataResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ingesterLabelNamesAndValuesServer) Send(m *LabelNamesAndValuesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Ingester_LabelValuesCardinality_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LabelValuesCardinalityRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return dAtA[:n], nil + return srv.(IngesterServer).LabelValuesCardinality(m, &ingesterLabelValuesCardinalityServer{stream}) } -func (m *MetricsMetadataResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_LabelValuesCardinalityServer interface { + Send(*LabelValuesCardinalityResponse) error + grpc.ServerStream } -func (m *MetricsMetadataResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +type ingesterLabelValuesCardinalityServer struct { + grpc.ServerStream +} + +func (x *ingesterLabelValuesCardinalityServer) Send(m *LabelValuesCardinalityResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Ingester_ActiveSeries_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ActiveSeriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return len(dAtA) - i, nil + return srv.(IngesterServer).ActiveSeries(m, &ingesterActiveSeriesServer{stream}) } -func (m *ActiveSeriesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +type Ingester_ActiveSeriesServer interface { + Send(*ActiveSeriesResponse) error + grpc.ServerStream +} + +type ingesterActiveSeriesServer struct { + grpc.ServerStream +} + +func (x *ingesterActiveSeriesServer) Send(m *ActiveSeriesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Ingester_ResourceAttributes_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ResourceAttributesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return dAtA[:n], nil + return srv.(IngesterServer).ResourceAttributes(m, &ingesterResourceAttributesServer{stream}) } -func (m *ActiveSeriesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Ingester_ResourceAttributesServer interface { + Send(*ResourceAttributesResponse) error + grpc.ServerStream } -func (m *ActiveSeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +type ingesterResourceAttributesServer struct { + grpc.ServerStream +} + +func (x *ingesterResourceAttributesServer) Send(m *ResourceAttributesResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Ingester_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cortex.Ingester", + HandlerType: (*IngesterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _Ingester_Push_Handler, + }, + { + MethodName: "QueryExemplars", + Handler: _Ingester_QueryExemplars_Handler, + }, + { + MethodName: "LabelValues", + Handler: _Ingester_LabelValues_Handler, + }, + { + MethodName: "LabelNames", + Handler: _Ingester_LabelNames_Handler, + }, + { + MethodName: "UserStats", + Handler: _Ingester_UserStats_Handler, + }, + { + MethodName: "AllUserStats", + Handler: _Ingester_AllUserStats_Handler, + }, + { + MethodName: "MetricsForLabelMatchers", + Handler: _Ingester_MetricsForLabelMatchers_Handler, + }, + { + MethodName: "MetricsMetadata", + Handler: _Ingester_MetricsMetadata_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "QueryStream", + Handler: _Ingester_QueryStream_Handler, + ServerStreams: true, + }, + { + StreamName: "LabelNamesAndValues", + Handler: _Ingester_LabelNamesAndValues_Handler, + ServerStreams: true, + }, + { + StreamName: "LabelValuesCardinality", + Handler: _Ingester_LabelValuesCardinality_Handler, + ServerStreams: true, + }, + { + StreamName: "ActiveSeries", + Handler: _Ingester_ActiveSeries_Handler, + ServerStreams: true, + }, + { + StreamName: "ResourceAttributes", + Handler: _Ingester_ResourceAttributes_Handler, + ServerStreams: true, + }, + }, + Metadata: "ingester.proto", +} + +func (m *LabelNamesAndValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesAndValuesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesAndValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.BucketCount) > 0 { - dAtA5 := make([]byte, len(m.BucketCount)*10) - var j4 int - for _, num := range m.BucketCount { - for num >= 1<<7 { - dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j4++ - } - dAtA5[j4] = uint8(num) - j4++ - } - i -= j4 - copy(dAtA[i:], dAtA5[:j4]) - i = encodeVarintIngester(dAtA, i, uint64(j4)) + if m.CountMethod != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Metric) > 0 { - for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4923,7 +4700,7 @@ func (m *ActiveSeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Chunk) Marshal() (dAtA []byte, err error) { +func (m *LabelNamesAndValuesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4933,45 +4710,73 @@ func (m *Chunk) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { +func (m *LabelNamesAndValuesResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LabelNamesAndValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size := m.Data.Size() - i -= size - if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i = encodeVarintIngester(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x22 - if m.Encoding != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Encoding)) - i-- - dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *LabelValues) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 + return dAtA[:n], nil +} + +func (m *LabelValues) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValues) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + if len(m.LabelName) > 0 { + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { +func (m *LabelValuesCardinalityRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4981,16 +4786,21 @@ func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { +func (m *LabelValuesCardinalityRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LabelValuesCardinalityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.CountMethod != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) + i-- + dAtA[i] = 0x18 + } if len(m.Matchers) > 0 { for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { { @@ -5002,13 +4812,22 @@ func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintIngester(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelNames) > 0 { + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { +func (m *LabelValuesCardinalityResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5018,1040 +4837,3370 @@ func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { +func (m *LabelValuesCardinalityResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LabelValuesCardinalityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func encodeVarintIngester(dAtA []byte, offset int, v uint64) int { - offset -= sovIngester(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *LabelValueSeriesCount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *LabelNamesAndValuesRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *LabelValueSeriesCount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValueSeriesCount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + if len(m.LabelValueSeries) > 0 { + for k := range m.LabelValueSeries { + v := m.LabelValueSeries[k] + baseI := i + i = encodeVarintIngester(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintIngester(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.CountMethod != 0 { - n += 1 + sovIngester(uint64(m.CountMethod)) + if len(m.LabelName) > 0 { + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *LabelNamesAndValuesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LabelValues) Size() (n int) { - if m == nil { - return 0 - } +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) + if m.StreamingChunksBatchSize != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StreamingChunksBatchSize)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) + if len(m.ProjectionLabels) > 0 { + for iNdEx := len(m.ProjectionLabels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProjectionLabels[iNdEx]) + copy(dAtA[i:], m.ProjectionLabels[iNdEx]) + i = encodeVarintIngester(dAtA, i, uint64(len(m.ProjectionLabels[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - return n -} - -func (m *LabelValuesCardinalityRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) + if m.ProjectionInclude { + i-- + if m.ProjectionInclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if m.CountMethod != 0 { - n += 1 + sovIngester(uint64(m.CountMethod)) + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - return n + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *LabelValuesCardinalityResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } +func (m *ExemplarQueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LabelValueSeriesCount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if len(m.LabelValueSeries) > 0 { - for k, v := range m.LabelValueSeries { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + sovIngester(uint64(v)) - n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) - } - } - return n +func (m *ExemplarQueryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryRequest) Size() (n int) { - if m == nil { - return 0 - } +func (m *ExemplarQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if m.ProjectionInclude { - n += 2 - } - if len(m.ProjectionLabels) > 0 { - for _, s := range m.ProjectionLabels { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) - } + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - if m.StreamingChunksBatchSize != 0 { - n += 2 + sovIngester(uint64(m.StreamingChunksBatchSize)) + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *ExemplarQueryRequest) Size() (n int) { - if m == nil { - return 0 +func (m *ActiveSeriesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ActiveSeriesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActiveSeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) + if m.Type != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 } if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *ActiveSeriesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - if m.Type != 0 { - n += 1 + sovIngester(uint64(m.Type)) +func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *QueryStreamResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.StreamingSeries) > 0 { - for _, e := range m.StreamingSeries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + if len(m.StreamingSeriesChunks) > 0 { + for iNdEx := len(m.StreamingSeriesChunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StreamingSeriesChunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } if m.IsEndOfSeriesStream { - n += 2 + i-- + if m.IsEndOfSeriesStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if len(m.StreamingSeriesChunks) > 0 { - for _, e := range m.StreamingSeriesChunks { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + if len(m.StreamingSeries) > 0 { + for iNdEx := len(m.StreamingSeries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StreamingSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return n + return len(dAtA) - i, nil } -func (m *QueryStreamSeries) Size() (n int) { - if m == nil { - return 0 +func (m *QueryStreamSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *QueryStreamSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStreamSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l + if m.ChunkCount != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.ChunkCount)) + i-- + dAtA[i] = 0x10 + } if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.ChunkCount != 0 { - n += 1 + sovIngester(uint64(m.ChunkCount)) - } - return n + return len(dAtA) - i, nil } -func (m *QueryStreamSeriesChunks) Size() (n int) { - if m == nil { - return 0 +func (m *QueryStreamSeriesChunks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *QueryStreamSeriesChunks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SeriesIndex != 0 { - n += 1 + sovIngester(uint64(m.SeriesIndex)) - } if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + if m.SeriesIndex != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.SeriesIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ExemplarQueryResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ExemplarQueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ExemplarQueryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemplarQueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *LabelValuesRequest) Size() (n int) { - if m == nil { - return 0 +func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x28 } if m.Matchers != nil { - l = m.Matchers.Size() - n += 1 + l + sovIngester(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sovIngester(uint64(m.Limit)) + { + size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return n + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x18 + } + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if len(m.LabelName) > 0 { + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *LabelValuesResponse) Size() (n int) { - if m == nil { - return 0 +func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelValues[iNdEx]) + copy(dAtA[i:], m.LabelValues[iNdEx]) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *LabelNamesRequest) Size() (n int) { - if m == nil { - return 0 +func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 } if m.Matchers != nil { - l = m.Matchers.Size() - n += 1 + l + sovIngester(uint64(l)) + { + size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.Limit != 0 { - n += 1 + sovIngester(uint64(m.Limit)) + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - return n + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *LabelNamesResponse) Size() (n int) { - if m == nil { - return 0 +func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *UserStatsRequest) Size() (n int) { - if m == nil { - return 0 +func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if m.CountMethod != 0 { - n += 1 + sovIngester(uint64(m.CountMethod)) + i = encodeVarintIngester(dAtA, i, uint64(m.CountMethod)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *UserStatsResponse) Size() (n int) { - if m == nil { - return 0 +func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.IngestionRate != 0 { - n += 9 - } - if m.NumSeries != 0 { - n += 1 + sovIngester(uint64(m.NumSeries)) + if m.RuleIngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) + i-- + dAtA[i] = 0x21 } if m.ApiIngestionRate != 0 { - n += 9 + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) + i-- + dAtA[i] = 0x19 } - if m.RuleIngestionRate != 0 { - n += 9 + if m.NumSeries != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.NumSeries)) + i-- + dAtA[i] = 0x10 } - return n + if m.IngestionRate != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil } -func (m *UserIDStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovIngester(uint64(l)) +func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *UsersStatsResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return n + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintIngester(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *MetricsForLabelMatchersRequest) Size() (n int) { - if m == nil { - return 0 +func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) + return len(dAtA) - i, nil +} + +func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 } if len(m.MatchersSet) > 0 { - for _, e := range m.MatchersSet { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if m.Limit != 0 { - n += 1 + sovIngester(uint64(m.Limit)) + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - return n + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *MetricsForLabelMatchersResponse) Size() (n int) { - if m == nil { - return 0 +func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *MetricsMetadataRequest) Size() (n int) { - if m == nil { - return 0 +func (m *MetricsMetadataRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *MetricsMetadataRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Limit != 0 { - n += 1 + sozIngester(uint64(m.Limit)) + if len(m.Metric) > 0 { + i -= len(m.Metric) + copy(dAtA[i:], m.Metric) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Metric))) + i-- + dAtA[i] = 0x1a } if m.LimitPerMetric != 0 { - n += 1 + sozIngester(uint64(m.LimitPerMetric)) + i = encodeVarintIngester(dAtA, i, uint64((uint32(m.LimitPerMetric)<<1)^uint32((m.LimitPerMetric>>31)))) + i-- + dAtA[i] = 0x10 } - l = len(m.Metric) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64((uint32(m.Limit)<<1)^uint32((m.Limit>>31)))) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *MetricsMetadataResponse) Size() (n int) { - if m == nil { - return 0 +func (m *MetricsMetadataResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *MetricsMetadataResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsMetadataResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Metadata) > 0 { - for _, e := range m.Metadata { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *ActiveSeriesResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ActiveSeriesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ActiveSeriesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ActiveSeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } if len(m.BucketCount) > 0 { - l = 0 - for _, e := range m.BucketCount { - l += sovIngester(uint64(e)) - } - n += 1 + sovIngester(uint64(l)) + l + dAtA5 := make([]byte, len(m.BucketCount)*10) + var j4 int + for _, num := range m.BucketCount { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintIngester(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0x12 } - return n + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) + { + size := m.Data.Size() + i -= size + if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Encoding != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Encoding)) + i-- + dAtA[i] = 0x18 } if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - if m.Encoding != 0 { - n += 1 + sovIngester(uint64(m.Encoding)) + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - l = m.Data.Size() - n += 1 + l + sovIngester(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *LabelMatchers) Size() (n int) { - if m == nil { - return 0 +func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *LabelMatcher) Size() (n int) { - if m == nil { - return 0 +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovIngester(uint64(m.Type)) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) + if m.Type != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func sovIngester(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *ResourceAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func sozIngester(x uint64) (n int) { - return sovIngester(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (m *ResourceAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *LabelNamesAndValuesRequest) String() string { - if this == nil { - return "nil" + +func (m *ResourceAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceAttrFilters) > 0 { + for iNdEx := len(m.ResourceAttrFilters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceAttrFilters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + if m.Limit != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&LabelNamesAndValuesRequest{`, - `Matchers:` + repeatedStringForMatchers + `,`, - `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesAndValuesResponse) String() string { - if this == nil { - return "nil" + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - repeatedStringForItems := "[]*LabelValues{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(f.String(), "LabelValues", "LabelValues", 1) + "," + if m.EndTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 } - repeatedStringForItems += "}" - s := strings.Join([]string{`&LabelNamesAndValuesResponse{`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *LabelValues) String() string { - if this == nil { - return "nil" + if m.StartTimestampMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 } - s := strings.Join([]string{`&LabelValues{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + return len(dAtA) - i, nil } -func (this *LabelValuesCardinalityRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + +func (m *ResourceAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&LabelValuesCardinalityRequest{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *LabelValuesCardinalityResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]*LabelValueSeriesCount{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(f.String(), "LabelValueSeriesCount", "LabelValueSeriesCount", 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&LabelValuesCardinalityResponse{`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + +func (m *ResourceAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *LabelValueSeriesCount) String() string { - if this == nil { - return "nil" - } - keysForLabelValueSeries := make([]string, 0, len(this.LabelValueSeries)) - for k, _ := range this.LabelValueSeries { - keysForLabelValueSeries = append(keysForLabelValueSeries, k) + +func (m *ResourceAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabelValueSeries) - mapStringForLabelValueSeries := "map[string]uint64{" - for _, k := range keysForLabelValueSeries { - mapStringForLabelValueSeries += fmt.Sprintf("%v: %v,", k, this.LabelValueSeries[k]) + return len(dAtA) - i, nil +} + +func (m *SeriesResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - mapStringForLabelValueSeries += "}" - s := strings.Join([]string{`&LabelValueSeriesCount{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `LabelValueSeries:` + mapStringForLabelValueSeries + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *QueryRequest) String() string { - if this == nil { - return "nil" + +func (m *SeriesResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeriesResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&QueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `ProjectionInclude:` + fmt.Sprintf("%v", this.ProjectionInclude) + `,`, - `ProjectionLabels:` + fmt.Sprintf("%v", this.ProjectionLabels) + `,`, - `StreamingChunksBatchSize:` + fmt.Sprintf("%v", this.StreamingChunksBatchSize) + `,`, - `}`, - }, "") - return s + return len(dAtA) - i, nil } -func (this *ExemplarQueryRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatchers{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + +func (m *ResourceVersionData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&ExemplarQueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *ActiveSeriesRequest) String() string { - if this == nil { - return "nil" + +func (m *ResourceVersionData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceVersionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxTimeMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.MaxTimeMs)) + i-- + dAtA[i] = 0x28 } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + if m.MinTimeMs != 0 { + i = encodeVarintIngester(dAtA, i, uint64(m.MinTimeMs)) + i-- + dAtA[i] = 0x20 } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&ActiveSeriesRequest{`, - `Matchers:` + repeatedStringForMatchers + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `}`, - }, "") - return s -} -func (this *QueryStreamResponse) String() string { - if this == nil { - return "nil" + if len(m.Entities) > 0 { + for iNdEx := len(m.Entities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintIngester(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - repeatedStringForStreamingSeries := "[]QueryStreamSeries{" - for _, f := range this.StreamingSeries { - repeatedStringForStreamingSeries += strings.Replace(strings.Replace(f.String(), "QueryStreamSeries", "QueryStreamSeries", 1), `&`, ``, 1) + "," + if len(m.Descriptive) > 0 { + for k := range m.Descriptive { + v := m.Descriptive[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintIngester(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintIngester(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - repeatedStringForStreamingSeries += "}" - repeatedStringForStreamingSeriesChunks := "[]QueryStreamSeriesChunks{" - for _, f := range this.StreamingSeriesChunks { - repeatedStringForStreamingSeriesChunks += strings.Replace(strings.Replace(f.String(), "QueryStreamSeriesChunks", "QueryStreamSeriesChunks", 1), `&`, ``, 1) + "," + if len(m.Identifying) > 0 { + for k := range m.Identifying { + v := m.Identifying[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintIngester(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintIngester(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - repeatedStringForStreamingSeriesChunks += "}" - s := strings.Join([]string{`&QueryStreamResponse{`, - `StreamingSeries:` + repeatedStringForStreamingSeries + `,`, - `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, - `StreamingSeriesChunks:` + repeatedStringForStreamingSeriesChunks + `,`, - `}`, - }, "") - return s + return len(dAtA) - i, nil } -func (this *QueryStreamSeries) String() string { - if this == nil { - return "nil" + +func (m *EntityData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - s := strings.Join([]string{`&QueryStreamSeries{`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `ChunkCount:` + fmt.Sprintf("%v", this.ChunkCount) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *QueryStreamSeriesChunks) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&QueryStreamSeriesChunks{`, - `SeriesIndex:` + fmt.Sprintf("%v", this.SeriesIndex) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s + +func (m *EntityData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *ExemplarQueryResponse) String() string { - if this == nil { - return "nil" + +func (m *EntityData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + for k := range m.Description { + v := m.Description[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintIngester(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintIngester(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } } - repeatedStringForTimeseries := "[]TimeSeries{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += fmt.Sprintf("%v", f) + "," + if len(m.Id) > 0 { + for k := range m.Id { + v := m.Id[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintIngester(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintIngester(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintIngester(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&ExemplarQueryResponse{`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesRequest) String() string { - if this == nil { - return "nil" + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintIngester(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa } - s := strings.Join([]string{`&LabelValuesRequest{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `}`, - }, "") - return s + return len(dAtA) - i, nil } -func (this *LabelValuesResponse) String() string { - if this == nil { - return "nil" + +func encodeVarintIngester(dAtA []byte, offset int, v uint64) int { + offset -= sovIngester(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - s := strings.Join([]string{`&LabelValuesResponse{`, - `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, - `}`, - }, "") - return s + dAtA[offset] = uint8(v) + return base } -func (this *LabelNamesRequest) String() string { - if this == nil { - return "nil" +func (m *LabelNamesAndValuesRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LabelNamesRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } } - s := strings.Join([]string{`&LabelNamesResponse{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `}`, - }, "") - return s -} -func (this *UserStatsRequest) String() string { - if this == nil { - return "nil" + if m.CountMethod != 0 { + n += 1 + sovIngester(uint64(m.CountMethod)) } - s := strings.Join([]string{`&UserStatsRequest{`, - `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, - `}`, - }, "") - return s + return n } -func (this *UserStatsResponse) String() string { - if this == nil { - return "nil" + +func (m *LabelNamesAndValuesResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&UserStatsResponse{`, - `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, - `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, - `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, - `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, - `}`, - }, "") - return s -} -func (this *UserIDStatsResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } } - s := strings.Join([]string{`&UserIDStatsResponse{`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *UsersStatsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStats := "[]*UserIDStatsResponse{" - for _, f := range this.Stats { - repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," + +func (m *LabelValues) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForStats += "}" - s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + repeatedStringForStats + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) } - repeatedStringForMatchersSet := "[]*LabelMatchers{" - for _, f := range this.MatchersSet { - repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovIngester(uint64(l)) + } } - repeatedStringForMatchersSet += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + repeatedStringForMatchersSet + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `}`, - }, "") - return s + return n } -func (this *MetricsForLabelMatchersResponse) String() string { - if this == nil { - return "nil" + +func (m *LabelValuesCardinalityRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForMetric := "[]*Metric{" - for _, f := range this.Metric { - repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "mimirpb.Metric", 1) + "," + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + sovIngester(uint64(l)) + } } - repeatedStringForMetric += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + repeatedStringForMetric + `,`, - `}`, - }, "") - return s -} -func (this *MetricsMetadataRequest) String() string { - if this == nil { - return "nil" + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } } - s := strings.Join([]string{`&MetricsMetadataRequest{`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `LimitPerMetric:` + fmt.Sprintf("%v", this.LimitPerMetric) + `,`, - `Metric:` + fmt.Sprintf("%v", this.Metric) + `,`, - `}`, - }, "") - return s + if m.CountMethod != 0 { + n += 1 + sovIngester(uint64(m.CountMethod)) + } + return n } -func (this *MetricsMetadataResponse) String() string { - if this == nil { - return "nil" + +func (m *LabelValuesCardinalityResponse) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForMetadata := "[]*MetricMetadata{" - for _, f := range this.Metadata { - repeatedStringForMetadata += strings.Replace(fmt.Sprintf("%v", f), "MetricMetadata", "mimirpb.MetricMetadata", 1) + "," + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } } - repeatedStringForMetadata += "}" - s := strings.Join([]string{`&MetricsMetadataResponse{`, - `Metadata:` + repeatedStringForMetadata + `,`, - `}`, - }, "") - return s + return n } -func (this *ActiveSeriesResponse) String() string { - if this == nil { - return "nil" + +func (m *LabelValueSeriesCount) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForMetric := "[]*Metric{" - for _, f := range this.Metric { - repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "mimirpb.Metric", 1) + "," + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) } - repeatedStringForMetric += "}" - s := strings.Join([]string{`&ActiveSeriesResponse{`, - `Metric:` + repeatedStringForMetric + `,`, - `BucketCount:` + fmt.Sprintf("%v", this.BucketCount) + `,`, - `}`, - }, "") - return s + if len(m.LabelValueSeries) > 0 { + for k, v := range m.LabelValueSeries { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + sovIngester(uint64(v)) + n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) + } + } + return n } -func (this *Chunk) String() string { - if this == nil { - return "nil" + +func (m *QueryRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Chunk{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.ProjectionInclude { + n += 1 + 1 + } + if len(m.ProjectionLabels) > 0 { + for _, s := range m.ProjectionLabels { + l = len(s) + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.StreamingChunksBatchSize != 0 { + n += 2 + sovIngester(uint64(m.StreamingChunksBatchSize)) + } + return n } -func (this *LabelMatchers) String() string { - if this == nil { - return "nil" + +func (m *ExemplarQueryRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n } -func (this *LabelMatcher) String() string { - if this == nil { - return "nil" + +func (m *ActiveSeriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.Type != 0 { + n += 1 + sovIngester(uint64(m.Type)) + } + return n +} + +func (m *QueryStreamResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StreamingSeries) > 0 { + for _, e := range m.StreamingSeries { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.IsEndOfSeriesStream { + n += 2 + } + if len(m.StreamingSeriesChunks) > 0 { + for _, e := range m.StreamingSeriesChunks { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *QueryStreamSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.ChunkCount != 0 { + n += 1 + sovIngester(uint64(m.ChunkCount)) + } + return n +} + +func (m *QueryStreamSeriesChunks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SeriesIndex != 0 { + n += 1 + sovIngester(uint64(m.SeriesIndex)) + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *ExemplarQueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *LabelValuesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if m.Matchers != nil { + l = m.Matchers.Size() + n += 1 + l + sovIngester(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } + return n +} + +func (m *LabelValuesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + l = len(s) + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *LabelNamesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if m.Matchers != nil { + l = m.Matchers.Size() + n += 1 + l + sovIngester(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } + return n +} + +func (m *LabelNamesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *UserStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CountMethod != 0 { + n += 1 + sovIngester(uint64(m.CountMethod)) + } + return n +} + +func (m *UserStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IngestionRate != 0 { + n += 9 + } + if m.NumSeries != 0 { + n += 1 + sovIngester(uint64(m.NumSeries)) + } + if m.ApiIngestionRate != 0 { + n += 9 + } + if m.RuleIngestionRate != 0 { + n += 9 + } + return n +} + +func (m *UserIDStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovIngester(uint64(l)) + } + return n +} + +func (m *UsersStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *MetricsForLabelMatchersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if len(m.MatchersSet) > 0 { + for _, e := range m.MatchersSet { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } + return n +} + +func (m *MetricsForLabelMatchersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *MetricsMetadataRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sozIngester(uint64(m.Limit)) + } + if m.LimitPerMetric != 0 { + n += 1 + sozIngester(uint64(m.LimitPerMetric)) + } + l = len(m.Metric) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + return n +} + +func (m *MetricsMetadataResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *ActiveSeriesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if len(m.BucketCount) > 0 { + l = 0 + for _, e := range m.BucketCount { + l += sovIngester(uint64(e)) + } + n += 1 + sovIngester(uint64(l)) + l + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if m.Encoding != 0 { + n += 1 + sovIngester(uint64(m.Encoding)) + } + l = m.Data.Size() + n += 1 + l + sovIngester(uint64(l)) + return n +} + +func (m *LabelMatchers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovIngester(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + return n +} + +func (m *ResourceAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovIngester(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.Limit != 0 { + n += 1 + sovIngester(uint64(m.Limit)) + } + if len(m.ResourceAttrFilters) > 0 { + for _, e := range m.ResourceAttrFilters { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *SeriesResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + return n +} + +func (m *ResourceVersionData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Identifying) > 0 { + for k, v := range m.Identifying { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + len(v) + sovIngester(uint64(len(v))) + n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) + } + } + if len(m.Descriptive) > 0 { + for k, v := range m.Descriptive { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + len(v) + sovIngester(uint64(len(v))) + n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) + } + } + if len(m.Entities) > 0 { + for _, e := range m.Entities { + l = e.Size() + n += 1 + l + sovIngester(uint64(l)) + } + } + if m.MinTimeMs != 0 { + n += 1 + sovIngester(uint64(m.MinTimeMs)) + } + if m.MaxTimeMs != 0 { + n += 1 + sovIngester(uint64(m.MaxTimeMs)) + } + return n +} + +func (m *EntityData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovIngester(uint64(l)) + } + if len(m.Id) > 0 { + for k, v := range m.Id { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + len(v) + sovIngester(uint64(len(v))) + n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) + } + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovIngester(uint64(len(k))) + 1 + len(v) + sovIngester(uint64(len(v))) + n += mapEntrySize + 1 + sovIngester(uint64(mapEntrySize)) + } + } + return n +} + +func sovIngester(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozIngester(x uint64) (n int) { + return sovIngester(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LabelNamesAndValuesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&LabelNamesAndValuesRequest{`, + `Matchers:` + repeatedStringForMatchers + `,`, + `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, + `}`, + }, "") + return s +} +func (this *LabelNamesAndValuesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]*LabelValues{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(f.String(), "LabelValues", "LabelValues", 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LabelNamesAndValuesResponse{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LabelValues) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValues{`, + `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesCardinalityRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&LabelValuesCardinalityRequest{`, + `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesCardinalityResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]*LabelValueSeriesCount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(f.String(), "LabelValueSeriesCount", "LabelValueSeriesCount", 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LabelValuesCardinalityResponse{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LabelValueSeriesCount) String() string { + if this == nil { + return "nil" + } + keysForLabelValueSeries := make([]string, 0, len(this.LabelValueSeries)) + for k, _ := range this.LabelValueSeries { + keysForLabelValueSeries = append(keysForLabelValueSeries, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelValueSeries) + mapStringForLabelValueSeries := "map[string]uint64{" + for _, k := range keysForLabelValueSeries { + mapStringForLabelValueSeries += fmt.Sprintf("%v: %v,", k, this.LabelValueSeries[k]) + } + mapStringForLabelValueSeries += "}" + s := strings.Join([]string{`&LabelValueSeriesCount{`, + `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `LabelValueSeries:` + mapStringForLabelValueSeries + `,`, + `}`, + }, "") + return s +} +func (this *QueryRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&QueryRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `StreamingChunksBatchSize:` + fmt.Sprintf("%v", this.StreamingChunksBatchSize) + `,`, + `}`, + }, "") + return s +} +func (this *ExemplarQueryRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatchers{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&ExemplarQueryRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `}`, + }, "") + return s +} +func (this *ActiveSeriesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&ActiveSeriesRequest{`, + `Matchers:` + repeatedStringForMatchers + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *QueryStreamResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStreamingSeries := "[]QueryStreamSeries{" + for _, f := range this.StreamingSeries { + repeatedStringForStreamingSeries += strings.Replace(strings.Replace(f.String(), "QueryStreamSeries", "QueryStreamSeries", 1), `&`, ``, 1) + "," + } + repeatedStringForStreamingSeries += "}" + repeatedStringForStreamingSeriesChunks := "[]QueryStreamSeriesChunks{" + for _, f := range this.StreamingSeriesChunks { + repeatedStringForStreamingSeriesChunks += strings.Replace(strings.Replace(f.String(), "QueryStreamSeriesChunks", "QueryStreamSeriesChunks", 1), `&`, ``, 1) + "," + } + repeatedStringForStreamingSeriesChunks += "}" + s := strings.Join([]string{`&QueryStreamResponse{`, + `StreamingSeries:` + repeatedStringForStreamingSeries + `,`, + `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, + `StreamingSeriesChunks:` + repeatedStringForStreamingSeriesChunks + `,`, + `}`, + }, "") + return s +} +func (this *QueryStreamSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryStreamSeries{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `ChunkCount:` + fmt.Sprintf("%v", this.ChunkCount) + `,`, + `}`, + }, "") + return s +} +func (this *QueryStreamSeriesChunks) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," + } + repeatedStringForChunks += "}" + s := strings.Join([]string{`&QueryStreamSeriesChunks{`, + `SeriesIndex:` + fmt.Sprintf("%v", this.SeriesIndex) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, + `}`, + }, "") + return s +} +func (this *ExemplarQueryResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTimeseries := "[]TimeSeries{" + for _, f := range this.Timeseries { + repeatedStringForTimeseries += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTimeseries += "}" + s := strings.Join([]string{`&ExemplarQueryResponse{`, + `Timeseries:` + repeatedStringForTimeseries + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesRequest{`, + `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *LabelValuesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelValuesResponse{`, + `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, + `}`, + }, "") + return s +} +func (this *LabelNamesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *LabelNamesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelNamesResponse{`, + `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, + `}`, + }, "") + return s +} +func (this *UserStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsRequest{`, + `CountMethod:` + fmt.Sprintf("%v", this.CountMethod) + `,`, + `}`, + }, "") + return s +} +func (this *UserStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserStatsResponse{`, + `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, + `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, + `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, + `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, + `}`, + }, "") + return s +} +func (this *UserIDStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIDStatsResponse{`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UsersStatsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStats := "[]*UserIDStatsResponse{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," + } + repeatedStringForStats += "}" + s := strings.Join([]string{`&UsersStatsResponse{`, + `Stats:` + repeatedStringForStats + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchersSet := "[]*LabelMatchers{" + for _, f := range this.MatchersSet { + repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," + } + repeatedStringForMatchersSet += "}" + s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `MatchersSet:` + repeatedStringForMatchersSet + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsForLabelMatchersResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetric := "[]*Metric{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "mimirpb.Metric", 1) + "," + } + repeatedStringForMetric += "}" + s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, + `Metric:` + repeatedStringForMetric + `,`, + `}`, + }, "") + return s +} +func (this *MetricsMetadataRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsMetadataRequest{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `LimitPerMetric:` + fmt.Sprintf("%v", this.LimitPerMetric) + `,`, + `Metric:` + fmt.Sprintf("%v", this.Metric) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsMetadataResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetadata := "[]*MetricMetadata{" + for _, f := range this.Metadata { + repeatedStringForMetadata += strings.Replace(fmt.Sprintf("%v", f), "MetricMetadata", "mimirpb.MetricMetadata", 1) + "," + } + repeatedStringForMetadata += "}" + s := strings.Join([]string{`&MetricsMetadataResponse{`, + `Metadata:` + repeatedStringForMetadata + `,`, + `}`, + }, "") + return s +} +func (this *ActiveSeriesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetric := "[]*Metric{" + for _, f := range this.Metric { + repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "mimirpb.Metric", 1) + "," + } + repeatedStringForMetric += "}" + s := strings.Join([]string{`&ActiveSeriesResponse{`, + `Metric:` + repeatedStringForMetric + `,`, + `BucketCount:` + fmt.Sprintf("%v", this.BucketCount) + `,`, + `}`, + }, "") + return s +} +func (this *Chunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Chunk{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatchers) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&LabelMatchers{`, + `Matchers:` + repeatedStringForMatchers + `,`, + `}`, + }, "") + return s +} +func (this *LabelMatcher) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelMatcher{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]*LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&ResourceAttributesRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]*SeriesResourceAttributes{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(f.String(), "SeriesResourceAttributes", "SeriesResourceAttributes", 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceAttributesResponse{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SeriesResourceAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]*ResourceVersionData{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(f.String(), "ResourceVersionData", "ResourceVersionData", 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&SeriesResourceAttributes{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceVersionData) String() string { + if this == nil { + return "nil" + } + repeatedStringForEntities := "[]*EntityData{" + for _, f := range this.Entities { + repeatedStringForEntities += strings.Replace(f.String(), "EntityData", "EntityData", 1) + "," + } + repeatedStringForEntities += "}" + keysForIdentifying := make([]string, 0, len(this.Identifying)) + for k, _ := range this.Identifying { + keysForIdentifying = append(keysForIdentifying, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForIdentifying) + mapStringForIdentifying := "map[string]string{" + for _, k := range keysForIdentifying { + mapStringForIdentifying += fmt.Sprintf("%v: %v,", k, this.Identifying[k]) + } + mapStringForIdentifying += "}" + keysForDescriptive := make([]string, 0, len(this.Descriptive)) + for k, _ := range this.Descriptive { + keysForDescriptive = append(keysForDescriptive, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescriptive) + mapStringForDescriptive := "map[string]string{" + for _, k := range keysForDescriptive { + mapStringForDescriptive += fmt.Sprintf("%v: %v,", k, this.Descriptive[k]) + } + mapStringForDescriptive += "}" + s := strings.Join([]string{`&ResourceVersionData{`, + `Identifying:` + mapStringForIdentifying + `,`, + `Descriptive:` + mapStringForDescriptive + `,`, + `Entities:` + repeatedStringForEntities + `,`, + `MinTimeMs:` + fmt.Sprintf("%v", this.MinTimeMs) + `,`, + `MaxTimeMs:` + fmt.Sprintf("%v", this.MaxTimeMs) + `,`, + `}`, + }, "") + return s +} +func (this *EntityData) String() string { + if this == nil { + return "nil" + } + keysForId := make([]string, 0, len(this.Id)) + for k, _ := range this.Id { + keysForId = append(keysForId, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForId) + mapStringForId := "map[string]string{" + for _, k := range keysForId { + mapStringForId += fmt.Sprintf("%v: %v,", k, this.Id[k]) + } + mapStringForId += "}" + keysForDescription := make([]string, 0, len(this.Description)) + for k, _ := range this.Description { + keysForDescription = append(keysForDescription, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + mapStringForDescription := "map[string]string{" + for _, k := range keysForDescription { + mapStringForDescription += fmt.Sprintf("%v: %v,", k, this.Description[k]) + } + mapStringForDescription += "}" + s := strings.Join([]string{`&EntityData{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Id:` + mapStringForId + `,`, + `Description:` + mapStringForDescription + `,`, + `}`, + }, "") + return s +} +func valueToStringIngester(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesAndValuesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesAndValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) + } + m.CountMethod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CountMethod |= CountMethod(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesAndValuesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesAndValuesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesAndValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &LabelValues{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValues) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValues: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValues: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesCardinalityRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesCardinalityRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) + } + m.CountMethod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CountMethod |= CountMethod(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesCardinalityResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesCardinalityResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesCardinalityResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &LabelValueSeriesCount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValueSeriesCount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValueSeriesCount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValueSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelValueSeries == nil { + m.LabelValueSeries = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelValueSeries[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&LabelMatcher{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func valueToStringIngester(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { +func (m *QueryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6074,13 +8223,51 @@ func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesAndValuesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesAndValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } @@ -6114,11 +8301,11 @@ func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProjectionInclude", wireType) } - m.CountMethod = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6128,7 +8315,59 @@ func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CountMethod |= CountMethod(b&0x7F) << shift + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProjectionInclude = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectionLabels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectionLabels = append(m.ProjectionLabels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksBatchSize", wireType) + } + m.StreamingChunksBatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamingChunksBatchSize |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6154,7 +8393,7 @@ func (m *LabelNamesAndValuesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesAndValuesResponse) Unmarshal(dAtA []byte) error { +func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6177,15 +8416,53 @@ func (m *LabelNamesAndValuesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesAndValuesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExemplarQueryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesAndValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExemplarQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6212,8 +8489,8 @@ func (m *LabelNamesAndValuesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &LabelValues{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Matchers = append(m.Matchers, &LabelMatchers{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6238,7 +8515,7 @@ func (m *LabelNamesAndValuesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValues) Unmarshal(dAtA []byte) error { +func (m *ActiveSeriesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6261,17 +8538,17 @@ func (m *LabelValues) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValues: wiretype end group for non-group") + return fmt.Errorf("proto: ActiveSeriesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValues: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActiveSeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6281,29 +8558,31 @@ func (m *LabelValues) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelName = string(dAtA[iNdEx:postIndex]) + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var stringLen uint64 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6312,25 +8591,12 @@ func (m *LabelValues) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF + iNdEx++ + m.Type |= ActiveSeriesRequest_RequestType(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6352,7 +8618,7 @@ func (m *LabelValues) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { +func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6375,17 +8641,17 @@ func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesCardinalityRequest: wiretype end group for non-group") + return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesCardinalityRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeries", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6395,27 +8661,49 @@ func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + m.StreamingSeries = append(m.StreamingSeries, QueryStreamSeries{}) + if err := m.StreamingSeries[len(m.StreamingSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsEndOfSeriesStream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsEndOfSeriesStream = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeriesChunks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6442,30 +8730,11 @@ func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.StreamingSeriesChunks = append(m.StreamingSeriesChunks, QueryStreamSeriesChunks{}) + if err := m.StreamingSeriesChunks[len(m.StreamingSeriesChunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) - } - m.CountMethod = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CountMethod |= CountMethod(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6487,7 +8756,7 @@ func (m *LabelValuesCardinalityRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesCardinalityResponse) Unmarshal(dAtA []byte) error { +func (m *QueryStreamSeries) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6510,15 +8779,15 @@ func (m *LabelValuesCardinalityResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesCardinalityResponse: wiretype end group for non-group") + return fmt.Errorf("proto: QueryStreamSeries: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesCardinalityResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryStreamSeries: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6545,11 +8814,30 @@ func (m *LabelValuesCardinalityResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, &LabelValueSeriesCount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Labels = append(m.Labels, github_com_grafana_mimir_pkg_mimirpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkCount", wireType) + } + m.ChunkCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChunkCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6571,7 +8859,7 @@ func (m *LabelValuesCardinalityResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { +func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6594,17 +8882,17 @@ func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValueSeriesCount: wiretype end group for non-group") + return fmt.Errorf("proto: QueryStreamSeriesChunks: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValueSeriesCount: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QueryStreamSeriesChunks: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesIndex", wireType) } - var stringLen uint64 + m.SeriesIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6614,27 +8902,14 @@ func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SeriesIndex |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValueSeries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6661,89 +8936,94 @@ func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LabelValueSeries == nil { - m.LabelValueSeries = make(map[string]uint64) + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue uint64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthIngester - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthIngester - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else { - iNdEx = entryPreIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemplarQueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemplarQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.LabelValueSeries[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, mimirpb.TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6766,7 +9046,7 @@ func (m *LabelValueSeriesCount) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryRequest) Unmarshal(dAtA []byte) error { +func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6789,13 +9069,45 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) } @@ -6814,7 +9126,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { break } } - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) } @@ -6833,7 +9145,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { break } } - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } @@ -6862,16 +9174,18 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Matchers == nil { + m.Matchers = &LabelMatchers{} + } + if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectionInclude", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var v int + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -6881,15 +9195,64 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.ProjectionInclude = bool(v != 0) - case 5: + default: + iNdEx = preIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectionLabels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6917,27 +9280,8 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProjectionLabels = append(m.ProjectionLabels, string(dAtA[iNdEx:postIndex])) + m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 100: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksBatchSize", wireType) - } - m.StreamingChunksBatchSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamingChunksBatchSize |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -6959,7 +9303,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { +func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6982,10 +9326,10 @@ func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExemplarQueryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExemplarQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7055,11 +9399,32 @@ func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, &LabelMatchers{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Matchers == nil { + m.Matchers = &LabelMatchers{} + } + if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -7081,7 +9446,7 @@ func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ActiveSeriesRequest) Unmarshal(dAtA []byte) error { +func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7104,51 +9469,17 @@ func (m *ActiveSeriesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActiveSeriesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActiveSeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) } - m.Type = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7158,11 +9489,24 @@ func (m *ActiveSeriesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= ActiveSeriesRequest_RequestType(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -7184,7 +9528,7 @@ func (m *ActiveSeriesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { +func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7207,71 +9551,17 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StreamingSeries = append(m.StreamingSeries, QueryStreamSeries{}) - if err := m.StreamingSeries[len(m.StreamingSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsEndOfSeriesStream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsEndOfSeriesStream = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeriesChunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) } - var msglen int + m.CountMethod = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7281,26 +9571,11 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.CountMethod |= CountMethod(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StreamingSeriesChunks = append(m.StreamingSeriesChunks, QueryStreamSeriesChunks{}) - if err := m.StreamingSeriesChunks[len(m.StreamingSeriesChunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -7322,7 +9597,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryStreamSeries) Unmarshal(dAtA []byte) error { +func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7345,51 +9620,28 @@ func (m *QueryStreamSeries) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryStreamSeries: wiretype end group for non-group") + return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamSeries: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, github_com_grafana_mimir_pkg_mimirpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.IngestionRate = float64(math.Float64frombits(v)) case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkCount", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) } - m.ChunkCount = 0 + m.NumSeries = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7399,11 +9651,33 @@ func (m *QueryStreamSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ChunkCount |= int64(b&0x7F) << shift + m.NumSeries |= uint64(b&0x7F) << shift if b < 0x80 { break } } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ApiIngestionRate = float64(math.Float64frombits(v)) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.RuleIngestionRate = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -7425,7 +9699,7 @@ func (m *QueryStreamSeries) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { +func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7448,17 +9722,17 @@ func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryStreamSeriesChunks: wiretype end group for non-group") + return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamSeriesChunks: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SeriesIndex", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) } - m.SeriesIndex = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7468,14 +9742,27 @@ func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SeriesIndex |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7502,8 +9789,10 @@ func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Chunks = append(m.Chunks, Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Data == nil { + m.Data = &UserStatsResponse{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7528,7 +9817,7 @@ func (m *QueryStreamSeriesChunks) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { +func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7551,15 +9840,15 @@ func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExemplarQueryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExemplarQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7586,8 +9875,8 @@ func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Timeseries = append(m.Timeseries, mimirpb.TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Stats = append(m.Stats, &UserIDStatsResponse{}) + if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7612,7 +9901,7 @@ func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { +func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7635,45 +9924,13 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) } @@ -7692,7 +9949,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { break } } - case 3: + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) } @@ -7711,9 +9968,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { break } } - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7740,14 +9997,12 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Matchers == nil { - m.Matchers = &LabelMatchers{} - } - if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) + if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } @@ -7787,7 +10042,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { +func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7810,17 +10065,17 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7830,23 +10085,25 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) + m.Metric = append(m.Metric, &mimirpb.Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7869,7 +10126,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { +func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7892,17 +10149,17 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MetricsMetadataRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - m.StartTimestampMs = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7912,16 +10169,18 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Limit = v case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LimitPerMetric", wireType) } - m.EndTimestampMs = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7931,16 +10190,18 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.LimitPerMetric = v case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -7950,47 +10211,24 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Matchers == nil { - m.Matchers = &LabelMatchers{} - } - if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Metric = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -8012,7 +10250,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { +func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8035,17 +10273,17 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MetricsMetadataResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MetricsMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8055,23 +10293,25 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + m.Metadata = append(m.Metadata, &mimirpb.MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -8094,7 +10334,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { +func (m *ActiveSeriesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8117,17 +10357,17 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ActiveSeriesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ActiveSeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountMethod", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } - m.CountMethod = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8137,113 +10377,102 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CountMethod |= CountMethod(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF + m.Metric = append(m.Metric, &mimirpb.Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.IngestionRate = float64(math.Float64frombits(v)) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) - } - m.NumSeries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCount = append(m.BucketCount, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + elementCount = count + if elementCount != 0 && len(m.BucketCount) == 0 { + m.BucketCount = make([]uint64, 0, elementCount) } - b := dAtA[iNdEx] - iNdEx++ - m.NumSeries |= uint64(b&0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCount = append(m.BucketCount, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCount", wireType) } - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ApiIngestionRate = float64(math.Float64frombits(v)) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.RuleIngestionRate = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -8265,7 +10494,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { +func (m *Chunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8288,17 +10517,17 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) } - var stringLen uint64 + m.StartTimestampMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8308,29 +10537,54 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.StartTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.Encoding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Encoding |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8340,24 +10594,21 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Data == nil { - m.Data = &UserStatsResponse{} - } if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -8383,7 +10634,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { +func (m *LabelMatchers) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8406,15 +10657,15 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8441,8 +10692,8 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Stats = append(m.Stats, &UserIDStatsResponse{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8467,7 +10718,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8490,17 +10741,17 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.StartTimestampMs = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8510,35 +10761,16 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift + m.Type |= MatchType(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8548,100 +10780,29 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthIngester } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) - if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8651,25 +10812,23 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.Metric = append(m.Metric, &mimirpb.Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8692,7 +10851,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { +func (m *ResourceAttributesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8715,17 +10874,17 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MetricsMetadataRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceAttributesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) } - var v int32 + m.StartTimestampMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8735,18 +10894,16 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.StartTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Limit = v case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LimitPerMetric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) } - var v int32 + m.EndTimestampMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8756,18 +10913,16 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.EndTimestampMs |= int64(b&0x7F) << shift if b < 0x80 { break } } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.LimitPerMetric = v case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -8777,77 +10932,48 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.Metric = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsMetadataResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttrFilters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8874,8 +11000,8 @@ func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Metadata = append(m.Metadata, &mimirpb.MetricMetadata{}) - if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceAttrFilters = append(m.ResourceAttrFilters, &ResourceAttrFilter{}) + if err := m.ResourceAttrFilters[len(m.ResourceAttrFilters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8900,7 +11026,7 @@ func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *ActiveSeriesResponse) Unmarshal(dAtA []byte) error { +func (m *ResourceAttributesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8923,15 +11049,15 @@ func (m *ActiveSeriesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ActiveSeriesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceAttributesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ActiveSeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8958,87 +11084,11 @@ func (m *ActiveSeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Metric = append(m.Metric, &mimirpb.Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &SeriesResourceAttributes{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BucketCount = append(m.BucketCount, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.BucketCount) == 0 { - m.BucketCount = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BucketCount = append(m.BucketCount, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field BucketCount", wireType) - } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -9060,7 +11110,7 @@ func (m *ActiveSeriesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *Chunk) Unmarshal(dAtA []byte) error { +func (m *SeriesResourceAttributes) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9083,55 +11133,17 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeriesResourceAttributes: wiretype end group for non-group") } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - m.Encoding = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -9141,16 +11153,31 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Encoding |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_grafana_mimir_pkg_mimirpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -9160,22 +11187,23 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Versions = append(m.Versions, &ResourceVersionData{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9200,7 +11228,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelMatchers) Unmarshal(dAtA []byte) error { +func (m *ResourceVersionData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9223,15 +11251,269 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceVersionData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceVersionData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Identifying", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Identifying == nil { + m.Identifying = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Identifying[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Descriptive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Descriptive == nil { + m.Descriptive = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Descriptive[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9258,11 +11540,49 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Entities = append(m.Entities, &EntityData{}) + if err := m.Entities[len(m.Entities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) + } + m.MinTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) + } + m.MaxTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipIngester(dAtA[iNdEx:]) @@ -9284,7 +11604,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelMatcher) Unmarshal(dAtA []byte) error { +func (m *EntityData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9307,17 +11627,17 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + return fmt.Errorf("proto: EntityData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EntityData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { + if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Type = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -9327,16 +11647,29 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= MatchType(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIngester + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIngester + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -9346,29 +11679,124 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.Id == nil { + m.Id = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Id[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowIngester @@ -9378,23 +11806,118 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthIngester } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthIngester } if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIngester + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthIngester + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthIngester + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipIngester(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthIngester + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index ba9e364f3e3..b3b1dbed480 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -38,6 +38,9 @@ service Ingester { rpc ActiveSeries(ActiveSeriesRequest) returns (stream ActiveSeriesResponse) {} + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + rpc ResourceAttributes(ResourceAttributesRequest) returns (stream ResourceAttributesResponse) {} + // When adding more read-path methods here, please update ingester_read_path_routes_regex in operations/mimir-mixin/config.libsonnet as well. } @@ -230,3 +233,52 @@ message LabelMatcher { string name = 2; string value = 3; } + +// ResourceAttrFilter specifies a resource attribute key:value pair for reverse lookup. +message ResourceAttrFilter { + string key = 1; + string value = 2; +} + +// ResourceAttributesRequest queries OTel resource attributes for series matching matchers. +message ResourceAttributesRequest { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated LabelMatcher matchers = 3; + int64 limit = 4; + // When resource_attr_filters is non-empty, use inverted index lookup instead of series matchers. + repeated ResourceAttrFilter resource_attr_filters = 5; +} + +// ResourceAttributesResponse contains batches of series with their resource attributes. +message ResourceAttributesResponse { + repeated SeriesResourceAttributes items = 1; +} + +// SeriesResourceAttributes contains resource data for a single series. +message SeriesResourceAttributes { + // The series labels. + repeated cortexpb.LabelPair labels = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/grafana/mimir/pkg/mimirpb.LabelAdapter" + ]; + + // Resource versions for this series. + repeated ResourceVersionData versions = 2; +} + +// ResourceVersionData represents a snapshot of resource data at a point in time. +message ResourceVersionData { + map identifying = 1; + map descriptive = 2; + repeated EntityData entities = 3; + int64 min_time_ms = 4; + int64 max_time_ms = 5; +} + +// EntityData represents a typed OTel entity. +message EntityData { + string type = 1; + map id = 2; + map description = 3; +} diff --git a/pkg/ingester/client/mimir_mock_test.go b/pkg/ingester/client/mimir_mock_test.go index 3f6d97c4f79..c4f14d96386 100644 --- a/pkg/ingester/client/mimir_mock_test.go +++ b/pkg/ingester/client/mimir_mock_test.go @@ -76,3 +76,8 @@ func (m *IngesterServerMock) ActiveSeries(req *ActiveSeriesRequest, srv Ingester args := m.Called(req, srv) return args.Error(0) } + +func (m *IngesterServerMock) ResourceAttributes(req *ResourceAttributesRequest, srv Ingester_ResourceAttributesServer) error { + args := m.Called(req, srv) + return args.Error(0) +} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 862c0e713d2..1c52f5d43bf 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1081,8 +1081,12 @@ func (i *Ingester) updateLimitMetrics() { // GetRef() is an extra method added to TSDB to let Mimir check before calling Add() type extendedAppender interface { - storage.Appender + storage.AppenderV2 storage.GetRef + // AppendExemplar appends an exemplar to an existing series. This is needed + // as a fallback for time series entries that contain exemplars but no samples, + // since AppenderV2.Append bundles exemplars with sample writes. + AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) } type pushStats struct { @@ -1437,7 +1441,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques ) // Walk the samples, appending them to the users database - app := db.Appender(ctx).(extendedAppender) + app := db.CombinedAppender(ctx) spanlog.DebugLog("event", "got appender for timeseries", "series", len(req.Timeseries)) var activeSeries *activeseries.ActiveSeries @@ -1655,6 +1659,64 @@ func (i *Ingester) pushSamplesToAppender( ingestCreatedTimestamp := ts.CreatedTimestamp > 0 + // Build resource context once per time series. + var resourceCtx *storage.ResourceContext + if ts.ResourceAttributes != nil && len(ts.ResourceAttributes.Identifying) > 0 { + metricName := nonCopiedLabels.Get(model.MetricNameLabel) + if metricName != "target_info" { + resourceCtx = &storage.ResourceContext{ + Identifying: entriesToMap(ts.ResourceAttributes.Identifying), + Descriptive: entriesToMap(ts.ResourceAttributes.Descriptive), + Entities: convertResourceEntities(ts.ResourceAttributes.Entities), + } + } + } + + // Build scope context once per time series. + var scopeCtx *storage.ScopeContext + if ts.ScopeAttributes != nil { + if ts.ScopeAttributes.Name != "" || ts.ScopeAttributes.Version != "" || ts.ScopeAttributes.SchemaURL != "" || len(ts.ScopeAttributes.Attrs) > 0 { + scopeCtx = &storage.ScopeContext{ + Name: ts.ScopeAttributes.Name, + Version: ts.ScopeAttributes.Version, + SchemaURL: ts.ScopeAttributes.SchemaURL, + Attrs: entriesToMap(ts.ScopeAttributes.Attrs), + } + } + } + + // Pre-validate exemplars for attachment to the first successful sample. + // This ensures exemplars are committed atomically with samples in a single + // WAL operation (via opts.Exemplars) instead of a separate AppendExemplar call. + var validExemplars []exemplar.Exemplar + var validExemplarOriginals []mimirpb.Exemplar // parallel slice for error reporting + exemplarsAttached := false + hasExemplars := len(ts.Exemplars) > 0 && i.limits.MaxGlobalExemplarsPerUser(userID) > 0 + if hasExemplars { + for _, ex := range ts.Exemplars { + if ex.TimestampMs > maxTimestampMs { + stats.failedExemplarsCount++ + updateFirstPartial(nil, func() softError { + return newExemplarTimestampTooFarInFutureError(model.Time(ex.TimestampMs), ts.Labels, ex.Labels) + }) + continue + } else if ex.TimestampMs < minTimestampMs { + stats.failedExemplarsCount++ + updateFirstPartial(nil, func() softError { + return newExemplarTimestampTooFarInPastError(model.Time(ex.TimestampMs), ts.Labels, ex.Labels) + }) + continue + } + validExemplars = append(validExemplars, exemplar.Exemplar{ + Value: ex.Value, + Ts: ex.TimestampMs, + HasTs: true, + Labels: mimirpb.FromLabelAdaptersToLabelsWithCopy(ex.Labels), + }) + validExemplarOriginals = append(validExemplarOriginals, ex) + } + } + for _, s := range ts.Samples { var err error @@ -1667,52 +1729,71 @@ func (i *Ingester) pushSamplesToAppender( continue } + // Append ST zero sample (created timestamp) BEFORE the regular sample to maintain + // in-order writes (st < t). AppenderV2 writes to memSeries immediately, so order matters. + // RejectOutOfOrder prevents duplicate ST zeros from being accepted via the OOO path + // (matching v1 AppendSTZeroSample behavior where OOO ST zeros are always rejected). if ingestCreatedTimestamp && ts.CreatedTimestamp < s.TimestampMs && (!nativeHistogramsIngestionEnabled || len(ts.Histograms) == 0 || ts.Histograms[0].Timestamp >= s.TimestampMs) { + stOpts := storage.AppendV2Options{RejectOutOfOrder: true} if ref != 0 { - _, err = app.AppendSTZeroSample(ref, copiedLabels, s.TimestampMs, ts.CreatedTimestamp) + _, err = app.Append(ref, copiedLabels, 0, ts.CreatedTimestamp, 0, nil, nil, stOpts) } else { - // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - ref, err = app.AppendSTZeroSample(0, copiedLabels, s.TimestampMs, ts.CreatedTimestamp) + ref, err = app.Append(0, copiedLabels, 0, ts.CreatedTimestamp, 0, nil, nil, stOpts) } if err == nil { stats.succeededSamplesCount++ } else if !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) && !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrOutOfOrderSample) { - // According to OTEL spec: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time - // if the start time is unknown, then it should equal to the timestamp of the first sample, - // which will mean a created timestamp equal to the timestamp of the first sample for later - // samples. Thus we ignore if zero sample would cause duplicate. - // We also ignore out of order sample as created timestamp is out of order most of the time, - // except when written before the first sample. + // Duplicates and OOO ST zeros are expected (e.g. long-lived counters resend the same CT). errProcessor.ProcessErr(err, ts.CreatedTimestamp, ts.Labels) } ingestCreatedTimestamp = false // Only try to append created timestamp once per series. } + opts := storage.AppendV2Options{ + Resource: resourceCtx, + Scope: scopeCtx, + } + if !exemplarsAttached && len(validExemplars) > 0 { + opts.Exemplars = validExemplars + } + // If the cached reference exists, we try to use it. if ref != 0 { - if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil { - stats.succeededSamplesCount++ - continue - } + _, err = app.Append(ref, copiedLabels, 0, s.TimestampMs, s.Value, nil, nil, opts) } else { // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - // Retain the reference in case there are multiple samples for the series. - if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil { + ref, err = app.Append(0, copiedLabels, 0, s.TimestampMs, s.Value, nil, nil, opts) + } + + if err != nil { + // AppendPartialError means the sample succeeded but some exemplars failed. + var partialErr *storage.AppendPartialError + if errors.As(err, &partialErr) { stats.succeededSamplesCount++ + if !exemplarsAttached && len(validExemplars) > 0 { + i.handleExemplarPartialErrors(partialErr, len(ts.Exemplars), validExemplarOriginals, ts, userID, stats, updateFirstPartial) + exemplarsAttached = true + } continue } - } - // If it's a soft error it will be returned back to the distributor later as a 400. - if errProcessor.ProcessErr(err, s.TimestampMs, ts.Labels) { - continue + // If it's a soft error it will be returned back to the distributor later as a 400. + if errProcessor.ProcessErr(err, s.TimestampMs, ts.Labels) { + continue + } + + // Otherwise, return a 500. + return err } - // Otherwise, return a 500. - return err + stats.succeededSamplesCount++ + if !exemplarsAttached && len(validExemplars) > 0 { + stats.succeededExemplarsCount += len(validExemplars) + exemplarsAttached = true + } } numNativeHistogramBuckets := -1 @@ -1738,50 +1819,85 @@ func (i *Ingester) pushSamplesToAppender( ih = mimirpb.FromHistogramProtoToHistogram(&h) } + // Append ST zero sample (created timestamp) BEFORE the regular histogram to maintain + // in-order writes (st < t). AppenderV2 writes to memSeries immediately, so order matters. + // RejectOutOfOrder prevents duplicate ST zeros from being accepted via the OOO path + // (matching v1 AppendHistogramSTZeroSample behavior where OOO ST zeros are always rejected). if ingestCreatedTimestamp && ts.CreatedTimestamp < h.Timestamp { + var zeroIH *histogram.Histogram + var zeroFH *histogram.FloatHistogram + if fh != nil { + zeroFH = &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterReset, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + CustomValues: fh.CustomValues, + } + } else if ih != nil { + zeroIH = &histogram.Histogram{ + CounterResetHint: histogram.CounterReset, + Schema: ih.Schema, + ZeroThreshold: ih.ZeroThreshold, + CustomValues: ih.CustomValues, + } + } + stOpts := storage.AppendV2Options{RejectOutOfOrder: true} if ref != 0 { - _, err = app.AppendHistogramSTZeroSample(ref, copiedLabels, h.Timestamp, ts.CreatedTimestamp, ih, fh) + _, err = app.Append(ref, copiedLabels, 0, ts.CreatedTimestamp, 0, zeroIH, zeroFH, stOpts) } else { - // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - ref, err = app.AppendHistogramSTZeroSample(0, copiedLabels, h.Timestamp, ts.CreatedTimestamp, ih, fh) + ref, err = app.Append(0, copiedLabels, 0, ts.CreatedTimestamp, 0, zeroIH, zeroFH, stOpts) } if err == nil { stats.succeededSamplesCount++ } else if !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) && !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrOutOfOrderSample) { - // According to OTEL spec: https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time - // if the start time is unknown, then it should equal to the timestamp of the first sample, - // which will mean a created timestamp equal to the timestamp of the first sample for later - // samples. Thus we ignore if zero sample would cause duplicate. - // We also ignore out of order sample as created timestamp is out of order most of the time, - // except when written before the first sample. + // Duplicates and OOO ST zeros are expected (e.g. long-lived counters resend the same CT). errProcessor.ProcessErr(err, ts.CreatedTimestamp, ts.Labels) } ingestCreatedTimestamp = false // Only try to append created timestamp once per series. } + opts := storage.AppendV2Options{ + Resource: resourceCtx, + Scope: scopeCtx, + } + if !exemplarsAttached && len(validExemplars) > 0 { + opts.Exemplars = validExemplars + } + // If the cached reference exists, we try to use it. if ref != 0 { - if _, err = app.AppendHistogram(ref, copiedLabels, h.Timestamp, ih, fh); err == nil { - stats.succeededSamplesCount++ - continue - } + _, err = app.Append(ref, copiedLabels, 0, h.Timestamp, 0, ih, fh, opts) } else { // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = mimirpb.CopyLabels(nonCopiedLabels) - // Retain the reference in case there are multiple samples for the series. - if ref, err = app.AppendHistogram(0, copiedLabels, h.Timestamp, ih, fh); err == nil { + ref, err = app.Append(0, copiedLabels, 0, h.Timestamp, 0, ih, fh, opts) + } + + if err != nil { + var partialErr *storage.AppendPartialError + if errors.As(err, &partialErr) { stats.succeededSamplesCount++ + if !exemplarsAttached && len(validExemplars) > 0 { + i.handleExemplarPartialErrors(partialErr, len(ts.Exemplars), validExemplarOriginals, ts, userID, stats, updateFirstPartial) + exemplarsAttached = true + } continue } - } - if errProcessor.ProcessErr(err, h.Timestamp, ts.Labels) { - continue + if errProcessor.ProcessErr(err, h.Timestamp, ts.Labels) { + continue + } + + return err } - return err + stats.succeededSamplesCount++ + if !exemplarsAttached && len(validExemplars) > 0 { + stats.succeededExemplarsCount += len(validExemplars) + exemplarsAttached = true + } } numNativeHistograms := len(ts.Histograms) if numNativeHistograms > 0 { @@ -1797,63 +1913,42 @@ func (i *Ingester) pushSamplesToAppender( activeSeries.UpdateSeries(nonCopiedLabels, ref, startAppend, numNativeHistogramBuckets, isOTLP, idx) } - if len(ts.Exemplars) > 0 && i.limits.MaxGlobalExemplarsPerUser(userID) > 0 { - // app.AppendExemplar currently doesn't create the series, it must - // already exist. If it does not then drop. + // Handle exemplars that weren't attached to any sample. + if hasExemplars && !exemplarsAttached { if ref == 0 { - updateFirstPartial(nil, func() softError { - return newExemplarMissingSeriesError(model.Time(ts.Exemplars[0].TimestampMs), ts.Labels, ts.Exemplars[0].Labels) - }) - stats.failedExemplarsCount += len(ts.Exemplars) - } else { // Note that else is explicit, rather than a continue in the above if, in case of additional logic post exemplar processing. + // No series reference exists, exemplars can't be ingested. + stats.failedExemplarsCount += len(validExemplars) + if len(ts.Exemplars) > 0 { + updateFirstPartial(nil, func() softError { + return newExemplarMissingSeriesError(model.Time(ts.Exemplars[0].TimestampMs), ts.Labels, ts.Exemplars[0].Labels) + }) + } + } else if len(validExemplars) > 0 { + // Series exists from a previous push but no sample succeeded in this push. + // Use AppendExemplar fallback for this rare case. There is no atomicity concern + // because there are no new samples to be atomic with. outOfOrderExemplars := 0 - for _, ex := range ts.Exemplars { - if ex.TimestampMs > maxTimestampMs { - stats.failedExemplarsCount++ - updateFirstPartial(nil, func() softError { - return newExemplarTimestampTooFarInFutureError(model.Time(ex.TimestampMs), ts.Labels, ex.Labels) - }) - continue - } else if ex.TimestampMs < minTimestampMs { - stats.failedExemplarsCount++ - updateFirstPartial(nil, func() softError { - return newExemplarTimestampTooFarInPastError(model.Time(ex.TimestampMs), ts.Labels, ex.Labels) - }) - continue - } - - e := exemplar.Exemplar{ - Value: ex.Value, - Ts: ex.TimestampMs, - HasTs: true, - Labels: mimirpb.FromLabelAdaptersToLabelsWithCopy(ex.Labels), - } - - var err error - if _, err = app.AppendExemplar(ref, labels.EmptyLabels(), e); err == nil { + for idx, e := range validExemplars { + orig := validExemplarOriginals[idx] + if _, err := app.AppendExemplar(ref, labels.EmptyLabels(), e); err == nil { stats.succeededExemplarsCount++ continue - } - - // We track the failed exemplars ingestion, whatever is the reason. This way, the sum of successfully - // and failed ingested exemplars is equal to the total number of processed ones. - stats.failedExemplarsCount++ + } else { + stats.failedExemplarsCount++ - isOOOExemplar := errors.Is(err, storage.ErrOutOfOrderExemplar) - if isOOOExemplar { - outOfOrderExemplars++ - // Only report out of order exemplars if all are out of order, otherwise this was a partial update - // to some existing set of exemplars. - if outOfOrderExemplars < len(ts.Exemplars) { - continue + isOOOExemplar := errors.Is(err, storage.ErrOutOfOrderExemplar) + if isOOOExemplar { + outOfOrderExemplars++ + if outOfOrderExemplars < len(ts.Exemplars) { + continue + } } - } - // Error adding exemplar. Do not report to client if the error was out of order and we ignore such error. - if !isOOOExemplar || !i.limits.IgnoreOOOExemplars(userID) { - updateFirstPartial(nil, func() softError { - return newTSDBIngestExemplarErr(err, model.Time(ex.TimestampMs), ts.Labels, ex.Labels) - }) + if !isOOOExemplar || !i.limits.IgnoreOOOExemplars(userID) { + updateFirstPartial(nil, func() softError { + return newTSDBIngestExemplarErr(err, model.Time(orig.TimestampMs), ts.Labels, orig.Labels) + }) + } } } } @@ -1862,6 +1957,74 @@ func (i *Ingester) pushSamplesToAppender( return nil } +// handleExemplarPartialErrors processes exemplar errors from an AppendPartialError returned +// by AppenderV2.Append when some exemplars failed TSDB validation (e.g. out-of-order). +// validOriginals is a parallel slice to the validExemplars passed to opts.Exemplars, used +// for error reporting with the original exemplar timestamps and labels. +func (i *Ingester) handleExemplarPartialErrors( + partialErr *storage.AppendPartialError, + numTotal int, + validOriginals []mimirpb.Exemplar, + ts mimirpb.PreallocTimeseries, + userID string, + stats *pushStats, + updateFirstPartial func(sampler *util_log.Sampler, errFn softErrorFunction), +) { + exemplarErrs := partialErr.ExemplarErrors + stats.failedExemplarsCount += len(exemplarErrs) + stats.succeededExemplarsCount += len(validOriginals) - len(exemplarErrs) + + outOfOrderExemplars := 0 + for _, eerr := range exemplarErrs { + if errors.Is(eerr, storage.ErrOutOfOrderExemplar) { + outOfOrderExemplars++ + // Only report out-of-order exemplars if all are out of order, + // otherwise this was a partial update to some existing set of exemplars. + if outOfOrderExemplars < numTotal { + continue + } + } + isOOO := errors.Is(eerr, storage.ErrOutOfOrderExemplar) + if !isOOO || !i.limits.IgnoreOOOExemplars(userID) { + // Use the last valid exemplar's details for the error message. When all + // exemplars are OOO, this matches the old per-exemplar loop behavior which + // reported on the last exemplar processed. + lastOrig := validOriginals[len(validOriginals)-1] + updateFirstPartial(nil, func() softError { + return newTSDBIngestExemplarErr(eerr, model.Time(lastOrig.TimestampMs), ts.Labels, lastOrig.Labels) + }) + } + } +} + +// entriesToMap converts a slice of AttributeEntry to a map. +func entriesToMap(entries []mimirpb.AttributeEntry) map[string]string { + if len(entries) == 0 { + return nil + } + m := make(map[string]string, len(entries)) + for _, e := range entries { + m[e.Key] = e.Value + } + return m +} + +// convertResourceEntities converts ResourceEntity slice to storage.EntityData slice. +func convertResourceEntities(entities []mimirpb.ResourceEntity) []storage.EntityData { + if len(entities) == 0 { + return nil + } + result := make([]storage.EntityData, len(entities)) + for i, e := range entities { + result[i] = storage.EntityData{ + Type: e.Type, + ID: entriesToMap(e.ID), + Description: entriesToMap(e.Description), + } + } + return result +} + // StartReadRequest implements ingesterReceiver and is called by a gRPC tap Handle when a request is first received to // determine if a request should be permitted. When permitted, StartReadRequest returns a context with a function that // should be called to finish the started read request once the request is completed. If it wasn't successful, the @@ -2781,6 +2944,10 @@ func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSD HeadPostingsForMatchersCacheFactory: i.headPostingsForMatchersCacheFactory, BlockPostingsForMatchersCacheFactory: i.blockPostingsForMatchersCacheFactory, PostingsClonerFactory: lookupplan.ActualSelectedPostingsClonerFactory{}, + EnableSTAsZeroSample: false, // Handled manually for proper counting. + EnableNativeMetadata: i.limits.OTelPersistResourceAttributes(userID), + EnableResourceAttrIndex: i.limits.OTelResourceAttrIndexEnabled(userID), + IndexedResourceAttrs: stringSliceToSet(i.limits.OTelIndexedResourceAttributes(userID)), SecondaryHashFunction: secondaryTSDBHashFunctionForUser(userID), IndexLookupPlannerFunc: userDB.getIndexLookupPlannerFunc(), BlockChunkQuerierFunc: func(b tsdb.BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) { @@ -2859,6 +3026,18 @@ func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSD return userDB, nil } +// stringSliceToSet converts a string slice to a set (map[string]struct{}). +func stringSliceToSet(s []string) map[string]struct{} { + if len(s) == 0 { + return nil + } + m := make(map[string]struct{}, len(s)) + for _, v := range s { + m[v] = struct{}{} + } + return m +} + // createBlockChunkQuerier creates a BlockChunkQuerier that wraps the default querier with stats tracking // and optionally with mirroring for comparison. func (i *Ingester) createBlockChunkQuerier(userID string, b tsdb.BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) { diff --git a/pkg/ingester/ingester_activity.go b/pkg/ingester/ingester_activity.go index 421107ca2d7..eb2995748dd 100644 --- a/pkg/ingester/ingester_activity.go +++ b/pkg/ingester/ingester_activity.go @@ -144,6 +144,15 @@ func (i *ActivityTrackerWrapper) ActiveSeries(request *client.ActiveSeriesReques return i.ing.ActiveSeries(request, server) } +func (i *ActivityTrackerWrapper) ResourceAttributes(request *client.ResourceAttributesRequest, server client.Ingester_ResourceAttributesServer) error { + ix := i.tracker.Insert(func() string { + return requestActivity(server.Context(), "Ingester/ResourceAttributes", request) + }) + defer i.tracker.Delete(ix) + + return i.ing.ResourceAttributes(request, server) +} + func (i *ActivityTrackerWrapper) FlushHandler(w http.ResponseWriter, r *http.Request) { ix := i.tracker.Insert(func() string { return requestActivity(r.Context(), "Ingester/FlushHandler", nil) diff --git a/pkg/ingester/ingester_profiling.go b/pkg/ingester/ingester_profiling.go index 67b4313c487..3391e6ab3a3 100644 --- a/pkg/ingester/ingester_profiling.go +++ b/pkg/ingester/ingester_profiling.go @@ -61,6 +61,15 @@ func (s queryStreamStream) Context() context.Context { return s.ctx } +type resourceAttributesStream struct { + ctx context.Context + client.Ingester_ResourceAttributesServer +} + +func (s resourceAttributesStream) Context() context.Context { + return s.ctx +} + // isTraceSampled checks if the current trace is sampled func isTraceSampled(ctx context.Context) bool { return trace.SpanFromContext(ctx).SpanContext().IsSampled() @@ -234,6 +243,20 @@ func (i *ProfilingWrapper) ActiveSeries(request *client.ActiveSeriesRequest, ser return i.ing.ActiveSeries(request, server) } +func (i *ProfilingWrapper) ResourceAttributes(request *client.ResourceAttributesRequest, server client.Ingester_ResourceAttributesServer) error { + ctx := server.Context() + if isTraceSampled(ctx) { + userID, _ := tenant.TenantID(ctx) + labels := pprof.Labels("userID", userID) + defer pprof.SetGoroutineLabels(ctx) + ctx = pprof.WithLabels(ctx, labels) + pprof.SetGoroutineLabels(ctx) + server = resourceAttributesStream{ctx, server} + } + + return i.ing.ResourceAttributes(request, server) +} + func (i *ProfilingWrapper) FlushHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() if isTraceSampled(ctx) { diff --git a/pkg/ingester/resource_attributes.go b/pkg/ingester/resource_attributes.go new file mode 100644 index 00000000000..e7aa278d898 --- /dev/null +++ b/pkg/ingester/resource_attributes.go @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package ingester + +import ( + "context" + "fmt" + + "github.com/grafana/dskit/tenant" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" + + "github.com/grafana/mimir/pkg/ingester/client" + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/util/spanlogger" +) + +const resourceAttributesMaxSizeBytes = 1 * 1024 * 1024 + +// ResourceAttributes returns OTel resource attributes for series matching the matchers. +// This is a streaming RPC that returns batches of series with their resource attributes. +// When ResourceAttrFilters is present, it uses the inverted index for reverse lookup +// instead of PostingsForMatchers. +func (i *Ingester) ResourceAttributes(request *client.ResourceAttributesRequest, stream client.Ingester_ResourceAttributesServer) (err error) { + defer func() { err = i.mapReadErrorToErrorWithStatus(err) }() + + spanlog, ctx := spanlogger.New(stream.Context(), i.logger, tracer, "Ingester.ResourceAttributes") + defer spanlog.Finish() + + userID, err := tenant.TenantID(ctx) + if err != nil { + return err + } + + // Enforce read consistency before getting TSDB (covers the case the tenant's data has not been ingested + // in this ingester yet, but there's some to ingest in the backlog). + if err := i.enforceReadConsistency(ctx, userID); err != nil { + return err + } + + db := i.getTSDB(userID) + if db == nil { + return nil + } + + // Get series metadata reader for resource attributes + metaReader, err := db.Head().SeriesMetadata() + if err != nil { + return fmt.Errorf("error getting series metadata: %w", err) + } + defer metaReader.Close() + + if len(request.GetResourceAttrFilters()) > 0 { + return i.resourceAttributesByFilter(ctx, request, stream, metaReader) + } + return i.resourceAttributesByMatchers(ctx, request, stream, db, metaReader) +} + +// resourceAttributesByFilter performs a reverse lookup using the inverted index. +// It finds series that have specific resource attribute key:value pairs. +func (i *Ingester) resourceAttributesByFilter( + _ context.Context, + request *client.ResourceAttributesRequest, + stream client.Ingester_ResourceAttributesServer, + metaReader seriesmetadata.Reader, +) error { + filters := request.GetResourceAttrFilters() + + // Intersect results from all filters (AND semantics). + var matchingHashes []uint64 + for idx, filter := range filters { + hashes := metaReader.LookupResourceAttr(filter.GetKey(), filter.GetValue()) + if idx == 0 { + matchingHashes = hashes + } else { + matchingHashes = intersectSortedUint64(matchingHashes, hashes) + } + if len(matchingHashes) == 0 { + return nil + } + } + + startMs := request.GetStartTimestampMs() + endMs := request.GetEndTimestampMs() + limit := request.GetLimit() + + resp := &client.ResourceAttributesResponse{} + currentSize := 0 + count := int64(0) + + for _, labelsHash := range matchingHashes { + if limit > 0 && count >= limit { + break + } + + // Get labels for this hash + lbls, found := metaReader.LabelsForHash(labelsHash) + if !found { + continue + } + + // Get versioned resource attributes + versionedResource, found := metaReader.GetVersionedResource(labelsHash) + if !found || versionedResource == nil || len(versionedResource.Versions) == 0 { + continue + } + + item := buildResourceAttributesItem(lbls, versionedResource, startMs, endMs) + if item == nil { + continue + } + + itemSize := item.Size() + if currentSize+itemSize > resourceAttributesMaxSizeBytes && len(resp.Items) > 0 { + if err := sendResourceAttributesResponse(stream, resp); err != nil { + return fmt.Errorf("error sending response: %w", err) + } + resp = &client.ResourceAttributesResponse{} + currentSize = 0 + } + + resp.Items = append(resp.Items, item) + currentSize += itemSize + count++ + } + + // Send final batch + if len(resp.Items) > 0 { + if err := sendResourceAttributesResponse(stream, resp); err != nil { + return fmt.Errorf("error sending final response: %w", err) + } + } + + return nil +} + +// resourceAttributesByMatchers performs the forward lookup using PostingsForMatchers. +func (i *Ingester) resourceAttributesByMatchers( + ctx context.Context, + request *client.ResourceAttributesRequest, + stream client.Ingester_ResourceAttributesServer, + db *userTSDB, + metaReader seriesmetadata.Reader, +) error { + matchers, err := client.FromLabelMatchers(request.GetMatchers()) + if err != nil { + return fmt.Errorf("error parsing label matchers: %w", err) + } + + idx, err := db.Head().Index() + if err != nil { + return fmt.Errorf("error getting index: %w", err) + } + defer idx.Close() + + // Get postings for matching series + postings, err := tsdb.PostingsForMatchers(ctx, idx, matchers...) + if err != nil { + return fmt.Errorf("error getting postings: %w", err) + } + + startMs := request.GetStartTimestampMs() + endMs := request.GetEndTimestampMs() + limit := request.GetLimit() + + buf := labels.NewScratchBuilder(10) + resp := &client.ResourceAttributesResponse{} + currentSize := 0 + count := int64(0) + + for postings.Next() { + if limit > 0 && count >= limit { + break + } + + seriesRef := postings.At() + err = idx.Series(seriesRef, &buf, nil) + if err != nil { + // Postings may be stale. Skip if no underlying series exists. + if errors.Is(err, storage.ErrNotFound) { + continue + } + return fmt.Errorf("error getting series: %w", err) + } + + lbls := buf.Labels() + labelsHash := labels.StableHash(lbls) + + // Get versioned resource attributes for this series + versionedResource, found := metaReader.GetVersionedResource(labelsHash) + if !found || versionedResource == nil || len(versionedResource.Versions) == 0 { + continue + } + + item := buildResourceAttributesItem(lbls, versionedResource, startMs, endMs) + if item == nil { + continue + } + + itemSize := item.Size() + if currentSize+itemSize > resourceAttributesMaxSizeBytes && len(resp.Items) > 0 { + if err := sendResourceAttributesResponse(stream, resp); err != nil { + return fmt.Errorf("error sending response: %w", err) + } + resp = &client.ResourceAttributesResponse{} + currentSize = 0 + } + + resp.Items = append(resp.Items, item) + currentSize += itemSize + count++ + } + + if err := postings.Err(); err != nil { + return fmt.Errorf("error iterating postings: %w", err) + } + + // Send final batch + if len(resp.Items) > 0 { + if err := sendResourceAttributesResponse(stream, resp); err != nil { + return fmt.Errorf("error sending final response: %w", err) + } + } + + return nil +} + +// buildResourceAttributesItem converts versioned resource data to the response format, +// filtering versions by time range. Returns nil if no versions match. +func buildResourceAttributesItem(lbls labels.Labels, versionedResource *seriesmetadata.VersionedResource, startMs, endMs int64) *client.SeriesResourceAttributes { + item := &client.SeriesResourceAttributes{ + Labels: mimirpb.FromLabelsToLabelAdapters(lbls), + } + + for _, ver := range versionedResource.Versions { + if endMs > 0 && ver.MinTime > endMs { + continue + } + if startMs > 0 && ver.MaxTime < startMs { + continue + } + + version := &client.ResourceVersionData{ + Identifying: make(map[string]string), + Descriptive: make(map[string]string), + MinTimeMs: ver.MinTime, + MaxTimeMs: ver.MaxTime, + } + + for k, v := range ver.Identifying { + version.Identifying[k] = v + } + for k, v := range ver.Descriptive { + version.Descriptive[k] = v + } + + for _, ent := range ver.Entities { + entity := &client.EntityData{ + Type: ent.Type, + Id: make(map[string]string), + Description: make(map[string]string), + } + for k, v := range ent.ID { + entity.Id[k] = v + } + for k, v := range ent.Description { + entity.Description[k] = v + } + version.Entities = append(version.Entities, entity) + } + + item.Versions = append(item.Versions, version) + } + + if len(item.Versions) == 0 { + return nil + } + return item +} + +// intersectSortedUint64 returns the intersection of two sorted uint64 slices. +func intersectSortedUint64(a, b []uint64) []uint64 { + if len(a) == 0 || len(b) == 0 { + return nil + } + + result := make([]uint64, 0, min(len(a), len(b))) + i, j := 0, 0 + for i < len(a) && j < len(b) { + if a[i] == b[j] { + result = append(result, a[i]) + i++ + j++ + } else if a[i] < b[j] { + i++ + } else { + j++ + } + } + return result +} + +func sendResourceAttributesResponse(stream client.Ingester_ResourceAttributesServer, resp *client.ResourceAttributesResponse) error { + return stream.Send(resp) +} diff --git a/pkg/ingester/user_tsdb.go b/pkg/ingester/user_tsdb.go index 49714062afe..9afe55e7e90 100644 --- a/pkg/ingester/user_tsdb.go +++ b/pkg/ingester/user_tsdb.go @@ -18,6 +18,7 @@ import ( "github.com/oklog/ulid/v2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -194,6 +195,47 @@ func (u *userTSDB) Appender(ctx context.Context) storage.Appender { return u.db.Appender(ctx) } +func (u *userTSDB) AppenderV2(ctx context.Context) storage.AppenderV2 { + return u.db.AppenderV2(ctx) +} + +// CombinedAppender returns an extendedAppender that uses AppenderV2 for the +// main path and falls back to v1 AppendExemplar for exemplar-only time series. +func (u *userTSDB) CombinedAppender(ctx context.Context) extendedAppender { + return &combinedAppender{ + AppenderV2: u.db.AppenderV2(ctx), + v1: u.db.Appender(ctx), + } +} + +// combinedAppender wraps both AppenderV2 and v1 Appender from the same TSDB head. +// AppenderV2 is used for all sample/metadata/resource operations. The v1 Appender +// is used only for standalone AppendExemplar calls (exemplars without samples). +type combinedAppender struct { + storage.AppenderV2 + v1 storage.Appender +} + +func (c *combinedAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { + return c.AppenderV2.(storage.GetRef).GetRef(lset, hash) +} + +func (c *combinedAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + return c.v1.AppendExemplar(ref, l, e) +} + +func (c *combinedAppender) Commit() error { + if err := c.AppenderV2.Commit(); err != nil { + return err + } + return c.v1.Commit() +} + +func (c *combinedAppender) Rollback() error { + _ = c.v1.Rollback() + return c.AppenderV2.Rollback() +} + // Querier returns a new querier over the data partition for the given time range. func (u *userTSDB) Querier(mint, maxt int64) (storage.Querier, error) { return u.db.Querier(mint, maxt) diff --git a/pkg/mimir/mimir.go b/pkg/mimir/mimir.go index 168b21b4252..a6f1139568a 100644 --- a/pkg/mimir/mimir.go +++ b/pkg/mimir/mimir.go @@ -864,6 +864,7 @@ type Mimir struct { QuerierQueryable prom_storage.SampleAndChunkQueryable ExemplarQueryable prom_storage.ExemplarQueryable AdditionalStorageQueryables []querier.TimeRangeQueryable + BlocksStoreQueryable querier.ResourceAttributesBlocksQueryable MetadataSupplier querier.MetadataSupplier QuerierEngine promql.QueryEngine QuerierLifecycler *ring.BasicLifecycler diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 38db0b1ad81..24fa12e0c6e 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -693,6 +693,7 @@ func (t *Mimir) initQuerier() (serv services.Service, err error) { t.MetadataSupplier, t.QuerierEngine, t.Distributor, + t.BlocksStoreQueryable, metrics, t.Registerer, util_log.Logger, @@ -745,6 +746,7 @@ func (t *Mimir) initStoreQueryable() (services.Service, error) { return nil, fmt.Errorf("failed to initialize block store queryable: %v", err) } t.AdditionalStorageQueryables = append(t.AdditionalStorageQueryables, querier.NewStoreGatewayTimeRangeQueryable(q, t.Cfg.Querier)) + t.BlocksStoreQueryable = q return q, nil } diff --git a/pkg/mimirpb/mimir.pb.go b/pkg/mimirpb/mimir.pb.go index ccae57f2052..279363737ec 100644 --- a/pkg/mimirpb/mimir.pb.go +++ b/pkg/mimirpb/mimir.pb.go @@ -496,6 +496,11 @@ type TimeSeries struct { // Zero value means value not set. If you need to use exactly zero value for // the timestamp, use 1 millisecond before or after. CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + // Mimir-specific field for OTel resource attributes. + // Uses high field number to avoid conflicts with upstream Prometheus fields. + ResourceAttributes *ResourceAttributes `protobuf:"bytes,1000,opt,name=resource_attributes,json=resourceAttributes,proto3" json:"resource_attributes,omitempty"` + // Mimir-specific field for OTel InstrumentationScope attributes. + ScopeAttributes *ScopeAttributes `protobuf:"bytes,1001,opt,name=scope_attributes,json=scopeAttributes,proto3" json:"scope_attributes,omitempty"` // Skip unmarshaling of exemplars. SkipUnmarshalingExemplars bool @@ -561,6 +566,186 @@ func (m *TimeSeries) GetCreatedTimestamp() int64 { return 0 } +func (m *TimeSeries) GetResourceAttributes() *ResourceAttributes { + if m != nil { + return m.ResourceAttributes + } + return nil +} + +func (m *TimeSeries) GetScopeAttributes() *ScopeAttributes { + if m != nil { + return m.ScopeAttributes + } + return nil +} + +// AttributeEntry represents a key-value pair for resource attributes. +type AttributeEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *AttributeEntry) Reset() { *m = AttributeEntry{} } +func (*AttributeEntry) ProtoMessage() {} +func (m *AttributeEntry) String() string { + return fmt.Sprintf("AttributeEntry{Key: %q, Value: %q}", m.Key, m.Value) +} + +func (m *AttributeEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AttributeEntry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// ResourceEntity represents an OTel entity with its type and attributes. +type ResourceEntity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + ID []AttributeEntry `protobuf:"bytes,2,rep,name=id,proto3" json:"id"` + Description []AttributeEntry `protobuf:"bytes,3,rep,name=description,proto3" json:"description"` +} + +func (m *ResourceEntity) Reset() { *m = ResourceEntity{} } +func (*ResourceEntity) ProtoMessage() {} +func (m *ResourceEntity) String() string { + return fmt.Sprintf("ResourceEntity{Type: %q}", m.Type) +} + +func (m *ResourceEntity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceEntity) GetID() []AttributeEntry { + if m != nil { + return m.ID + } + return nil +} + +func (m *ResourceEntity) GetDescription() []AttributeEntry { + if m != nil { + return m.Description + } + return nil +} + +// ResourceAttributes contains OTel resource-level attributes for a time series. +// These are extracted from OTLP requests and stored to enable resource-aware queries. +type ResourceAttributes struct { + // Identifying attributes that uniquely identify the resource + // (e.g., service.name, service.namespace, service.instance.id). + Identifying []AttributeEntry `protobuf:"bytes,1,rep,name=identifying,proto3" json:"identifying"` + // Descriptive attributes that provide additional context about the resource. + Descriptive []AttributeEntry `protobuf:"bytes,2,rep,name=descriptive,proto3" json:"descriptive"` + // Entities extracted from entity_refs in the OTLP resource. + Entities []ResourceEntity `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities"` + // Timestamp when this resource version was observed (in milliseconds). + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (m *ResourceAttributes) String() string { + return fmt.Sprintf("ResourceAttributes{Identifying: %d, Descriptive: %d, Entities: %d, Timestamp: %d}", + len(m.Identifying), len(m.Descriptive), len(m.Entities), m.Timestamp) +} + +func (m *ResourceAttributes) GetIdentifying() []AttributeEntry { + if m != nil { + return m.Identifying + } + return nil +} + +func (m *ResourceAttributes) GetDescriptive() []AttributeEntry { + if m != nil { + return m.Descriptive + } + return nil +} + +func (m *ResourceAttributes) GetEntities() []ResourceEntity { + if m != nil { + return m.Entities + } + return nil +} + +func (m *ResourceAttributes) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// ScopeAttributes contains OTel InstrumentationScope data for a time series. +// These are extracted from OTLP requests and stored to enable scope-level queries. +type ScopeAttributes struct { + // Name of the instrumentation scope (e.g., package path). + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Version of the instrumentation scope. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // SchemaURL of the instrumentation scope. + SchemaURL string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Attrs are custom key-value attributes on the scope. + Attrs []AttributeEntry `protobuf:"bytes,4,rep,name=attrs,proto3" json:"attrs"` + // Timestamp when this scope version was observed (in milliseconds). + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ScopeAttributes) Reset() { *m = ScopeAttributes{} } +func (*ScopeAttributes) ProtoMessage() {} +func (m *ScopeAttributes) String() string { + return fmt.Sprintf("ScopeAttributes{Name: %q, Version: %q, SchemaURL: %q, Attrs: %d, Timestamp: %d}", + m.Name, m.Version, m.SchemaURL, len(m.Attrs), m.Timestamp) +} + +func (m *ScopeAttributes) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ScopeAttributes) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *ScopeAttributes) GetSchemaURL() string { + if m != nil { + return m.SchemaURL + } + return "" +} + +func (m *ScopeAttributes) GetAttrs() []AttributeEntry { + if m != nil { + return m.Attrs + } + return nil +} + +func (m *ScopeAttributes) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + type LabelPair struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -2718,8 +2903,121 @@ func (this *TimeSeries) Equal(that interface{}) bool { if this.CreatedTimestamp != that1.CreatedTimestamp { return false } + if !this.ResourceAttributes.Equal(that1.ResourceAttributes) { + return false + } + if !this.ScopeAttributes.Equal(that1.ScopeAttributes) { + return false + } + return true +} + +func (this *AttributeEntry) Equal(that *AttributeEntry) bool { + if this == nil && that == nil { + return true + } + if this == nil || that == nil { + return false + } + return this.Key == that.Key && this.Value == that.Value +} + +func (this *ResourceEntity) Equal(that *ResourceEntity) bool { + if this == nil && that == nil { + return true + } + if this == nil || that == nil { + return false + } + if this.Type != that.Type { + return false + } + if len(this.ID) != len(that.ID) { + return false + } + for i := range this.ID { + if !this.ID[i].Equal(&that.ID[i]) { + return false + } + } + if len(this.Description) != len(that.Description) { + return false + } + for i := range this.Description { + if !this.Description[i].Equal(&that.Description[i]) { + return false + } + } + return true +} + +func (this *ResourceAttributes) Equal(that *ResourceAttributes) bool { + if this == nil && that == nil { + return true + } + if this == nil || that == nil { + return false + } + if len(this.Identifying) != len(that.Identifying) { + return false + } + for i := range this.Identifying { + if !this.Identifying[i].Equal(&that.Identifying[i]) { + return false + } + } + if len(this.Descriptive) != len(that.Descriptive) { + return false + } + for i := range this.Descriptive { + if !this.Descriptive[i].Equal(&that.Descriptive[i]) { + return false + } + } + if len(this.Entities) != len(that.Entities) { + return false + } + for i := range this.Entities { + if !this.Entities[i].Equal(&that.Entities[i]) { + return false + } + } + if this.Timestamp != that.Timestamp { + return false + } + return true +} + +func (this *ScopeAttributes) Equal(that *ScopeAttributes) bool { + if this == nil && that == nil { + return true + } + if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.Version != that.Version { + return false + } + if this.SchemaURL != that.SchemaURL { + return false + } + if len(this.Attrs) != len(that.Attrs) { + return false + } + for i := range this.Attrs { + if !this.Attrs[i].Equal(&that.Attrs[i]) { + return false + } + } + if this.Timestamp != that.Timestamp { + return false + } return true } + func (this *LabelPair) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4576,6 +4874,34 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ScopeAttributes != nil { + { + size, err := m.ScopeAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xca + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc2 + } if m.CreatedTimestamp != 0 { i = encodeVarintMimir(dAtA, i, uint64(m.CreatedTimestamp)) i-- @@ -4640,7 +4966,7 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *LabelPair) Marshal() (dAtA []byte, err error) { +func (m *AttributeEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4650,12 +4976,12 @@ func (m *LabelPair) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { +func (m *AttributeEntry) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AttributeEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4667,17 +4993,17 @@ func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMimir(dAtA, i, uint64(len(m.Name))) + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Key))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Sample) Marshal() (dAtA []byte, err error) { +func (m *ResourceEntity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4687,31 +5013,55 @@ func (m *Sample) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceEntity) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.TimestampMs != 0 { - i = encodeVarintMimir(dAtA, i, uint64(m.TimestampMs)) - i-- - dAtA[i] = 0x10 + if len(m.Description) > 0 { + for iNdEx := len(m.Description) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Description[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + if len(m.ID) > 0 { + for iNdEx := len(m.ID) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ID[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Type))) i-- - dAtA[i] = 0x9 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { +func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4721,35 +5071,239 @@ func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { +func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Unit) > 0 { - i -= len(m.Unit) - copy(dAtA[i:], m.Unit) - i = encodeVarintMimir(dAtA, i, uint64(len(m.Unit))) - i-- - dAtA[i] = 0x2a - } - if len(m.Help) > 0 { - i -= len(m.Help) - copy(dAtA[i:], m.Help) - i = encodeVarintMimir(dAtA, i, uint64(len(m.Help))) + if m.Timestamp != 0 { + i = encodeVarintMimir(dAtA, i, uint64(m.Timestamp)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 } - if len(m.MetricFamilyName) > 0 { - i -= len(m.MetricFamilyName) - copy(dAtA[i:], m.MetricFamilyName) - i = encodeVarintMimir(dAtA, i, uint64(len(m.MetricFamilyName))) - i-- + if len(m.Entities) > 0 { + for iNdEx := len(m.Entities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Descriptive) > 0 { + for iNdEx := len(m.Descriptive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Descriptive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Identifying) > 0 { + for iNdEx := len(m.Identifying) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Identifying[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopeAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintMimir(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x28 + } + if len(m.Attrs) > 0 { + for iNdEx := len(m.Attrs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attrs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMimir(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.SchemaURL) > 0 { + i -= len(m.SchemaURL) + copy(dAtA[i:], m.SchemaURL) + i = encodeVarintMimir(dAtA, i, uint64(len(m.SchemaURL))) + i-- + dAtA[i] = 0x1a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimestampMs != 0 { + i = encodeVarintMimir(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintMimir(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x22 + } + if len(m.MetricFamilyName) > 0 { + i -= len(m.MetricFamilyName) + copy(dAtA[i:], m.MetricFamilyName) + i = encodeVarintMimir(dAtA, i, uint64(len(m.MetricFamilyName))) + i-- dAtA[i] = 0x12 } if m.Type != 0 { @@ -6200,16 +6754,24 @@ func (m *TimeSeries) Size() (n int) { if m.CreatedTimestamp != 0 { n += 1 + sovMimir(uint64(m.CreatedTimestamp)) } + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 2 + l + sovMimir(uint64(l)) + } + if m.ScopeAttributes != nil { + l = m.ScopeAttributes.Size() + n += 2 + l + sovMimir(uint64(l)) + } return n } -func (m *LabelPair) Size() (n int) { +func (m *AttributeEntry) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.Key) if l > 0 { n += 1 + l + sovMimir(uint64(l)) } @@ -6220,69 +6782,171 @@ func (m *LabelPair) Size() (n int) { return n } -func (m *Sample) Size() (n int) { +func (m *ResourceEntity) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Value != 0 { - n += 9 + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) } - if m.TimestampMs != 0 { - n += 1 + sovMimir(uint64(m.TimestampMs)) + if len(m.ID) > 0 { + for _, e := range m.ID { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } + } + if len(m.Description) > 0 { + for _, e := range m.Description { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } } return n } -func (m *MetricMetadata) Size() (n int) { +func (m *ResourceAttributes) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Type != 0 { - n += 1 + sovMimir(uint64(m.Type)) + if len(m.Identifying) > 0 { + for _, e := range m.Identifying { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } } - l = len(m.MetricFamilyName) - if l > 0 { - n += 1 + l + sovMimir(uint64(l)) + if len(m.Descriptive) > 0 { + for _, e := range m.Descriptive { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } } - l = len(m.Help) - if l > 0 { - n += 1 + l + sovMimir(uint64(l)) + if len(m.Entities) > 0 { + for _, e := range m.Entities { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } } - l = len(m.Unit) - if l > 0 { - n += 1 + l + sovMimir(uint64(l)) + if m.Timestamp != 0 { + n += 1 + sovMimir(uint64(m.Timestamp)) } return n } -func (m *Metric) Size() (n int) { +func (m *ScopeAttributes) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + l = len(m.SchemaURL) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + if len(m.Attrs) > 0 { + for _, e := range m.Attrs { l = e.Size() n += 1 + l + sovMimir(uint64(l)) } } + if m.Timestamp != 0 { + n += 1 + sovMimir(uint64(m.Timestamp)) + } return n } -func (m *Exemplar) Size() (n int) { +func (m *LabelPair) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.TimestampMs != 0 { + n += 1 + sovMimir(uint64(m.TimestampMs)) + } + return n +} + +func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovMimir(uint64(m.Type)) + } + l = len(m.MetricFamilyName) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovMimir(uint64(l)) + } + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() n += 1 + l + sovMimir(uint64(l)) } } @@ -7989,6 +8653,193 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { break } } + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1001: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScopeAttributes == nil { + m.ScopeAttributes = &ScopeAttributes{} + } + if err := m.ScopeAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMimir + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *AttributeEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMimir(dAtA[iNdEx:]) @@ -8010,6 +8861,530 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { } return nil } + +func (m *ResourceEntity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceEntity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceEntity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = append(m.ID, AttributeEntry{}) + if err := m.ID[len(m.ID)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = append(m.Description, AttributeEntry{}) + if err := m.Description[len(m.Description)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMimir + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifying", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifying = append(m.Identifying, AttributeEntry{}) + if err := m.Identifying[len(m.Identifying)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Descriptive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Descriptive = append(m.Descriptive, AttributeEntry{}) + if err := m.Descriptive[len(m.Descriptive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entities = append(m.Entities, ResourceEntity{}) + if err := m.Entities[len(m.Entities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMimir + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *ScopeAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMimir + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMimir + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attrs = append(m.Attrs, AttributeEntry{}) + if err := m.Attrs[len(m.Attrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMimir + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + func (m *LabelPair) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/mimirpb/mimir.pb.go.expdiff b/pkg/mimirpb/mimir.pb.go.expdiff index 97a03aea1bd..86736718f9a 100644 --- a/pkg/mimirpb/mimir.pb.go.expdiff +++ b/pkg/mimirpb/mimir.pb.go.expdiff @@ -1,5 +1,5 @@ diff --git a/pkg/mimirpb/mimir.pb.go b/pkg/mimirpb/mimir.pb.go -index 986c4e1867..dda8609298 100644 +index 0435422038..dda8609298 100644 --- a/pkg/mimirpb/mimir.pb.go +++ b/pkg/mimirpb/mimir.pb.go @@ -14,7 +14,6 @@ import ( @@ -54,17 +54,336 @@ index 986c4e1867..dda8609298 100644 type TimeSeries struct { Labels []UnsafeMutableLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=UnsafeMutableLabel" json:"labels"` // Sorted by time, oldest sample first. -@@ -496,9 +472,6 @@ type TimeSeries struct { +@@ -496,12 +472,6 @@ type TimeSeries struct { // Zero value means value not set. If you need to use exactly zero value for // the timestamp, use 1 millisecond before or after. CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +- // Mimir-specific field for OTel resource attributes. +- // Uses high field number to avoid conflicts with upstream Prometheus fields. +- ResourceAttributes *ResourceAttributes `protobuf:"bytes,1000,opt,name=resource_attributes,json=resourceAttributes,proto3" json:"resource_attributes,omitempty"` - - // Skip unmarshaling of exemplars. - SkipUnmarshalingExemplars bool } func (m *TimeSeries) Reset() { *m = TimeSeries{} } -@@ -5968,25 +5941,19 @@ func (m *TimeSeriesRW2) MarshalToSizedBuffer(dAtA []byte) (int, error) { +@@ -564,122 +534,6 @@ func (m *TimeSeries) GetCreatedTimestamp() int64 { + return 0 + } + +-func (m *TimeSeries) GetResourceAttributes() *ResourceAttributes { +- if m != nil { +- return m.ResourceAttributes +- } +- return nil +-} +- +-// ResourceAttributeEntry represents a key-value pair for resource attributes. +-type ResourceAttributeEntry struct { +- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +-} +- +-func (m *ResourceAttributeEntry) Reset() { *m = ResourceAttributeEntry{} } +-func (*ResourceAttributeEntry) ProtoMessage() {} +-func (m *ResourceAttributeEntry) String() string { +- return fmt.Sprintf("ResourceAttributeEntry{Key: %q, Value: %q}", m.Key, m.Value) +-} +- +-func (m *ResourceAttributeEntry) GetKey() string { +- if m != nil { +- return m.Key +- } +- return "" +-} +- +-func (m *ResourceAttributeEntry) GetValue() string { +- if m != nil { +- return m.Value +- } +- return "" +-} +- +-// ResourceEntity represents an OTel entity with its type and attributes. +-type ResourceEntity struct { +- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +- ID []ResourceAttributeEntry `protobuf:"bytes,2,rep,name=id,proto3" json:"id"` +- Description []ResourceAttributeEntry `protobuf:"bytes,3,rep,name=description,proto3" json:"description"` +-} +- +-func (m *ResourceEntity) Reset() { *m = ResourceEntity{} } +-func (*ResourceEntity) ProtoMessage() {} +-func (m *ResourceEntity) String() string { +- return fmt.Sprintf("ResourceEntity{Type: %q}", m.Type) +-} +- +-func (m *ResourceEntity) GetType() string { +- if m != nil { +- return m.Type +- } +- return "" +-} +- +-func (m *ResourceEntity) GetID() []ResourceAttributeEntry { +- if m != nil { +- return m.ID +- } +- return nil +-} +- +-func (m *ResourceEntity) GetDescription() []ResourceAttributeEntry { +- if m != nil { +- return m.Description +- } +- return nil +-} +- +-// ResourceAttributes contains OTel resource-level attributes for a time series. +-// These are extracted from OTLP requests and stored to enable resource-aware queries. +-type ResourceAttributes struct { +- // Identifying attributes that uniquely identify the resource +- // (e.g., service.name, service.namespace, service.instance.id). +- Identifying []ResourceAttributeEntry `protobuf:"bytes,1,rep,name=identifying,proto3" json:"identifying"` +- // Descriptive attributes that provide additional context about the resource. +- Descriptive []ResourceAttributeEntry `protobuf:"bytes,2,rep,name=descriptive,proto3" json:"descriptive"` +- // Entities extracted from entity_refs in the OTLP resource. +- Entities []ResourceEntity `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities"` +- // Timestamp when this resource version was observed (in milliseconds). +- Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +-} +- +-func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +-func (*ResourceAttributes) ProtoMessage() {} +-func (m *ResourceAttributes) String() string { +- return fmt.Sprintf("ResourceAttributes{Identifying: %d, Descriptive: %d, Entities: %d, Timestamp: %d}", +- len(m.Identifying), len(m.Descriptive), len(m.Entities), m.Timestamp) +-} +- +-func (m *ResourceAttributes) GetIdentifying() []ResourceAttributeEntry { +- if m != nil { +- return m.Identifying +- } +- return nil +-} +- +-func (m *ResourceAttributes) GetDescriptive() []ResourceAttributeEntry { +- if m != nil { +- return m.Descriptive +- } +- return nil +-} +- +-func (m *ResourceAttributes) GetEntities() []ResourceEntity { +- if m != nil { +- return m.Entities +- } +- return nil +-} +- +-func (m *ResourceAttributes) GetTimestamp() int64 { +- if m != nil { +- return m.Timestamp +- } +- return 0 +-} +- + type LabelPair struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +@@ -4695,20 +4549,6 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + _ = i + var l int + _ = l +- if m.ResourceAttributes != nil { +- { +- size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x3e +- i-- +- dAtA[i] = 0xc2 +- } + if m.CreatedTimestamp != 0 { + i = encodeVarintMimir(dAtA, i, uint64(m.CreatedTimestamp)) + i-- +@@ -4773,171 +4613,6 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + return len(dAtA) - i, nil + } + +-func (m *ResourceAttributeEntry) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err +- } +- return dAtA[:n], nil +-} +- +-func (m *ResourceAttributeEntry) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) +-} +- +-func (m *ResourceAttributeEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Value) > 0 { +- i -= len(m.Value) +- copy(dAtA[i:], m.Value) +- i = encodeVarintMimir(dAtA, i, uint64(len(m.Value))) +- i-- +- dAtA[i] = 0x12 +- } +- if len(m.Key) > 0 { +- i -= len(m.Key) +- copy(dAtA[i:], m.Key) +- i = encodeVarintMimir(dAtA, i, uint64(len(m.Key))) +- i-- +- dAtA[i] = 0xa +- } +- return len(dAtA) - i, nil +-} +- +-func (m *ResourceEntity) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err +- } +- return dAtA[:n], nil +-} +- +-func (m *ResourceEntity) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) +-} +- +-func (m *ResourceEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Description) > 0 { +- for iNdEx := len(m.Description) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.Description[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x1a +- } +- } +- if len(m.ID) > 0 { +- for iNdEx := len(m.ID) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.ID[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x12 +- } +- } +- if len(m.Type) > 0 { +- i -= len(m.Type) +- copy(dAtA[i:], m.Type) +- i = encodeVarintMimir(dAtA, i, uint64(len(m.Type))) +- i-- +- dAtA[i] = 0xa +- } +- return len(dAtA) - i, nil +-} +- +-func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err +- } +- return dAtA[:n], nil +-} +- +-func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) +-} +- +-func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if m.Timestamp != 0 { +- i = encodeVarintMimir(dAtA, i, uint64(m.Timestamp)) +- i-- +- dAtA[i] = 0x20 +- } +- if len(m.Entities) > 0 { +- for iNdEx := len(m.Entities) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.Entities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x1a +- } +- } +- if len(m.Descriptive) > 0 { +- for iNdEx := len(m.Descriptive) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.Descriptive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x12 +- } +- } +- if len(m.Identifying) > 0 { +- for iNdEx := len(m.Identifying) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.Identifying[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintMimir(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0xa +- } +- } +- return len(dAtA) - i, nil +-} +- + func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) +@@ -6266,25 +5941,19 @@ func (m *TimeSeriesRW2) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.LabelsRefs) > 0 { @@ -95,7 +414,7 @@ index 986c4e1867..dda8609298 100644 i = encodeVarintMimir(dAtA, i, uint64(j21)) i-- dAtA[i] = 0xa -@@ -6026,25 +5993,19 @@ func (m *ExemplarRW2) MarshalToSizedBuffer(dAtA []byte) (int, error) { +@@ -6324,25 +5993,19 @@ func (m *ExemplarRW2) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x11 } if len(m.LabelsRefs) > 0 { @@ -126,7 +445,161 @@ index 986c4e1867..dda8609298 100644 i = encodeVarintMimir(dAtA, i, uint64(j23)) i-- dAtA[i] = 0xa -@@ -7392,9 +7353,6 @@ func valueToStringMimir(v interface{}) string { +@@ -6498,20 +6161,16 @@ func (m *TimeSeries) Size() (n int) { + if m.CreatedTimestamp != 0 { + n += 1 + sovMimir(uint64(m.CreatedTimestamp)) + } +- if m.ResourceAttributes != nil { +- l = m.ResourceAttributes.Size() +- n += 2 + l + sovMimir(uint64(l)) +- } + return n + } + +-func (m *ResourceAttributeEntry) Size() (n int) { ++func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l +- l = len(m.Key) ++ l = len(m.Name) + if l > 0 { + n += 1 + l + sovMimir(uint64(l)) + } +@@ -6522,113 +6181,41 @@ func (m *ResourceAttributeEntry) Size() (n int) { + return n + } + +-func (m *ResourceEntity) Size() (n int) { ++func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l +- l = len(m.Type) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) +- } +- if len(m.ID) > 0 { +- for _, e := range m.ID { +- l = e.Size() +- n += 1 + l + sovMimir(uint64(l)) +- } ++ if m.Value != 0 { ++ n += 9 + } +- if len(m.Description) > 0 { +- for _, e := range m.Description { +- l = e.Size() +- n += 1 + l + sovMimir(uint64(l)) +- } ++ if m.TimestampMs != 0 { ++ n += 1 + sovMimir(uint64(m.TimestampMs)) + } + return n + } + +-func (m *ResourceAttributes) Size() (n int) { ++func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l +- if len(m.Identifying) > 0 { +- for _, e := range m.Identifying { +- l = e.Size() +- n += 1 + l + sovMimir(uint64(l)) +- } ++ if m.Type != 0 { ++ n += 1 + sovMimir(uint64(m.Type)) + } +- if len(m.Descriptive) > 0 { +- for _, e := range m.Descriptive { +- l = e.Size() +- n += 1 + l + sovMimir(uint64(l)) +- } ++ l = len(m.MetricFamilyName) ++ if l > 0 { ++ n += 1 + l + sovMimir(uint64(l)) + } +- if len(m.Entities) > 0 { +- for _, e := range m.Entities { +- l = e.Size() +- n += 1 + l + sovMimir(uint64(l)) +- } ++ l = len(m.Help) ++ if l > 0 { ++ n += 1 + l + sovMimir(uint64(l)) + } +- if m.Timestamp != 0 { +- n += 1 + sovMimir(uint64(m.Timestamp)) +- } +- return n +-} +- +-func (m *LabelPair) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = len(m.Name) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) +- } +- l = len(m.Value) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) +- } +- return n +-} +- +-func (m *Sample) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- if m.Value != 0 { +- n += 9 +- } +- if m.TimestampMs != 0 { +- n += 1 + sovMimir(uint64(m.TimestampMs)) +- } +- return n +-} +- +-func (m *MetricMetadata) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- if m.Type != 0 { +- n += 1 + sovMimir(uint64(m.Type)) +- } +- l = len(m.MetricFamilyName) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) +- } +- l = len(m.Help) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) +- } +- l = len(m.Unit) +- if l > 0 { +- n += 1 + l + sovMimir(uint64(l)) ++ l = len(m.Unit) ++ if l > 0 { ++ n += 1 + l + sovMimir(uint64(l)) + } + return n + } +@@ -7766,9 +7353,6 @@ func valueToStringMimir(v interface{}) string { return fmt.Sprintf("*%v", pv) } func (m *WriteRequest) Unmarshal(dAtA []byte) error { @@ -136,7 +609,7 @@ index 986c4e1867..dda8609298 100644 l := len(dAtA) iNdEx := 0 for iNdEx < l { -@@ -7424,9 +7382,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7798,9 +7382,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: @@ -146,7 +619,7 @@ index 986c4e1867..dda8609298 100644 if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) } -@@ -7456,8 +7411,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7830,8 +7411,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } m.Timeseries = append(m.Timeseries, PreallocTimeseries{}) @@ -156,7 +629,7 @@ index 986c4e1867..dda8609298 100644 return err } iNdEx = postIndex -@@ -7481,9 +7435,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7855,9 +7435,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } } case 3: @@ -166,7 +639,7 @@ index 986c4e1867..dda8609298 100644 if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } -@@ -7518,9 +7469,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7892,9 +7469,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 4: @@ -176,7 +649,7 @@ index 986c4e1867..dda8609298 100644 if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SymbolsRW2", wireType) } -@@ -7550,16 +7498,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7924,16 +7498,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } @@ -194,7 +667,7 @@ index 986c4e1867..dda8609298 100644 if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TimeseriesRW2", wireType) } -@@ -7588,12 +7529,8 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { +@@ -7962,12 +7529,8 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } @@ -209,35 +682,771 @@ index 986c4e1867..dda8609298 100644 return err } iNdEx = postIndex -@@ -7656,12 +7593,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { - if iNdEx > l { - return io.ErrUnexpectedEOF - } +@@ -8004,401 +7567,13 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] +- iNdEx++ +- v |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- m.SkipLabelCountValidation = bool(v != 0) +- default: +- iNdEx = preIndex +- skippy, err := skipMimir(dAtA[iNdEx:]) +- if err != nil { +- return err +- } +- if (skippy < 0) || (iNdEx+skippy) < 0 { +- return ErrInvalidLengthMimir +- } +- if (iNdEx + skippy) > l { +- return io.ErrUnexpectedEOF +- } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } - - if m.unmarshalFromRW2 { - m.Metadata = metadata.slice() - m.rw2symbols.releasePages() - } - - return nil - } - func (m *WriteResponse) Unmarshal(dAtA []byte) error { -@@ -7929,11 +7860,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { - if postIndex > l { - return io.ErrUnexpectedEOF - } +- return nil +-} +-func (m *WriteResponse) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- default: +- iNdEx = preIndex +- skippy, err := skipMimir(dAtA[iNdEx:]) +- if err != nil { +- return err +- } +- if (skippy < 0) || (iNdEx+skippy) < 0 { +- return ErrInvalidLengthMimir +- } +- if (iNdEx + skippy) > l { +- return io.ErrUnexpectedEOF +- } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *ErrorDetails) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: ErrorDetails: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: ErrorDetails: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Cause", wireType) +- } +- m.Cause = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.Cause |= ErrorCause(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 2: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) +- } +- var v int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- v |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- m.Soft = bool(v != 0) +- default: +- iNdEx = preIndex +- skippy, err := skipMimir(dAtA[iNdEx:]) +- if err != nil { +- return err +- } +- if (skippy < 0) || (iNdEx+skippy) < 0 { +- return ErrInvalidLengthMimir +- } +- if (iNdEx + skippy) > l { +- return io.ErrUnexpectedEOF +- } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *TimeSeries) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Labels = append(m.Labels, UnsafeMutableLabel{}) +- if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex +- case 2: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Samples = append(m.Samples, Sample{}) +- if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex +- case 3: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } - if !m.SkipUnmarshalingExemplars { - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } +- } +- iNdEx = postIndex +- case 4: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Histograms = append(m.Histograms, Histogram{}) +- if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex +- case 6: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) +- } +- m.CreatedTimestamp = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.CreatedTimestamp |= int64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 1000: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- if m.ResourceAttributes == nil { +- m.ResourceAttributes = &ResourceAttributes{} +- } +- if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- iNdEx = postIndex ++ m.SkipLabelCountValidation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) +@@ -8420,8 +7595,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { + } + return nil + } +- +-func (m *ResourceAttributeEntry) Unmarshal(dAtA []byte) error { ++func (m *WriteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -8444,76 +7618,12 @@ func (m *ResourceAttributeEntry) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: ResourceAttributeEntry: wiretype end group for non-group") ++ return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: ResourceAttributeEntry: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { +- case 1: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) +- } +- var stringLen uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- stringLen |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- intStringLen := int(stringLen) +- if intStringLen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + intStringLen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Key = string(dAtA[iNdEx:postIndex]) +- iNdEx = postIndex +- case 2: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) +- } +- var stringLen uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- stringLen |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- intStringLen := int(stringLen) +- if intStringLen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + intStringLen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Value = string(dAtA[iNdEx:postIndex]) +- iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) +@@ -8535,8 +7645,7 @@ func (m *ResourceAttributeEntry) Unmarshal(dAtA []byte) error { + } + return nil + } +- +-func (m *ResourceEntity) Unmarshal(dAtA []byte) error { ++func (m *ErrorDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -8559,17 +7668,17 @@ func (m *ResourceEntity) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: ResourceEntity: wiretype end group for non-group") ++ return fmt.Errorf("proto: ErrorDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: ResourceEntity: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: ErrorDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Cause", wireType) + } +- var stringLen uint64 ++ m.Cause = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir +@@ -8579,63 +7688,16 @@ func (m *ResourceEntity) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- stringLen |= uint64(b&0x7F) << shift ++ m.Cause |= ErrorCause(b&0x7F) << shift + if b < 0x80 { + break + } + } +- intStringLen := int(stringLen) +- if intStringLen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + intStringLen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Type = string(dAtA[iNdEx:postIndex]) +- iNdEx = postIndex + case 2: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) +- } +- var msglen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowMimir +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- msglen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.ID = append(m.ID, ResourceAttributeEntry{}) +- if err := m.ID[len(m.ID)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex +- case 3: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + } +- var msglen int ++ var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir +@@ -8645,26 +7707,12 @@ func (m *ResourceEntity) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { +- return ErrInvalidLengthMimir +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthMimir +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Description = append(m.Description, ResourceAttributeEntry{}) +- if err := m.Description[len(m.Description)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex ++ m.Soft = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMimir(dAtA[iNdEx:]) +@@ -8686,8 +7734,7 @@ func (m *ResourceEntity) Unmarshal(dAtA []byte) error { + } + return nil + } +- +-func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { ++func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -8710,15 +7757,15 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") ++ return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Identifying", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -8745,14 +7792,14 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Identifying = append(m.Identifying, ResourceAttributeEntry{}) +- if err := m.Identifying[len(m.Identifying)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ m.Labels = append(m.Labels, UnsafeMutableLabel{}) ++ if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Descriptive", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -8779,14 +7826,14 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Descriptive = append(m.Descriptive, ResourceAttributeEntry{}) +- if err := m.Descriptive[len(m.Descriptive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ m.Samples = append(m.Samples, Sample{}) ++ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -8813,16 +7860,50 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Entities = append(m.Entities, ResourceEntity{}) +- if err := m.Entities[len(m.Entities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { -+ return err + return err } iNdEx = postIndex case 4: -@@ -11242,10 +11171,6 @@ func (m *WriteRequestRW2) Unmarshal(dAtA []byte) error { ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowMimir ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthMimir ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthMimir ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Histograms = append(m.Histograms, Histogram{}) ++ if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 6: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } +- m.Timestamp = 0 ++ m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMimir +@@ -8832,7 +7913,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Timestamp |= int64(b&0x7F) << shift ++ m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } +@@ -8858,7 +7939,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + } + return nil + } +- + func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 +@@ -12091,10 +11171,6 @@ func (m *WriteRequestRW2) Unmarshal(dAtA []byte) error { return nil } func (m *TimeSeriesRW2) Unmarshal(dAtA []byte) error { @@ -248,7 +1457,7 @@ index 986c4e1867..dda8609298 100644 l := len(dAtA) iNdEx := 0 for iNdEx < l { -@@ -11276,7 +11201,22 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12125,7 +11201,22 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat switch fieldNum { case 1: if wireType == 0 { @@ -272,24 +1481,24 @@ index 986c4e1867..dda8609298 100644 } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { -@@ -11311,14 +11251,9 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12160,14 +11251,9 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat } } elementCount = count - if elementCount%2 != 0 { - return errorOddNumberOfLabelRefs +- } +- if elementCount != 0 && len(m.Labels) == 0 { +- m.Labels = make([]LabelAdapter, 0, elementCount/2) + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) } -- if elementCount != 0 && len(m.Labels) == 0 { -- m.Labels = make([]LabelAdapter, 0, elementCount/2) -- } - idx := 0 - metricNameLabel := false for iNdEx < postIndex { var v uint32 for shift := uint(0); ; shift += 7 { -@@ -11335,27 +11270,7 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12184,27 +11270,7 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat break } } @@ -318,7 +1527,7 @@ index 986c4e1867..dda8609298 100644 } } else { return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) -@@ -11457,11 +11372,9 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12306,11 +11372,9 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat if postIndex > l { return io.ErrUnexpectedEOF } @@ -333,7 +1542,7 @@ index 986c4e1867..dda8609298 100644 } iNdEx = postIndex case 5: -@@ -11493,7 +11406,7 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12342,7 +11406,7 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat if postIndex > l { return io.ErrUnexpectedEOF } @@ -342,7 +1551,7 @@ index 986c4e1867..dda8609298 100644 return err } iNdEx = postIndex -@@ -11538,10 +11451,6 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat +@@ -12387,10 +11451,6 @@ func (m *TimeSeries) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadat return nil } func (m *ExemplarRW2) Unmarshal(dAtA []byte) error { @@ -353,7 +1562,7 @@ index 986c4e1867..dda8609298 100644 l := len(dAtA) iNdEx := 0 for iNdEx < l { -@@ -11572,7 +11481,22 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12421,7 +11481,22 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { switch fieldNum { case 1: if wireType == 0 { @@ -377,7 +1586,7 @@ index 986c4e1867..dda8609298 100644 } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { -@@ -11607,13 +11531,9 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12456,13 +11531,9 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { } } elementCount = count @@ -393,7 +1602,7 @@ index 986c4e1867..dda8609298 100644 for iNdEx < postIndex { var v uint32 for shift := uint(0); ; shift += 7 { -@@ -11630,20 +11550,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12479,20 +11550,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { break } } @@ -415,7 +1624,7 @@ index 986c4e1867..dda8609298 100644 } } else { return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) -@@ -11663,7 +11570,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12512,7 +11570,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } @@ -424,7 +1633,7 @@ index 986c4e1867..dda8609298 100644 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMimir -@@ -11673,7 +11580,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12522,7 +11580,7 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { } b := dAtA[iNdEx] iNdEx++ @@ -433,7 +1642,7 @@ index 986c4e1867..dda8609298 100644 if b < 0x80 { break } -@@ -11700,16 +11607,6 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { +@@ -12549,16 +11607,6 @@ func (m *Exemplar) UnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols) error { return nil } func (m *MetadataRW2) Unmarshal(dAtA []byte) error { @@ -450,7 +1659,7 @@ index 986c4e1867..dda8609298 100644 l := len(dAtA) iNdEx := 0 for iNdEx < l { -@@ -11742,7 +11639,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12591,7 +11639,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } @@ -459,7 +1668,7 @@ index 986c4e1867..dda8609298 100644 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMimir -@@ -11752,7 +11649,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12601,7 +11649,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata } b := dAtA[iNdEx] iNdEx++ @@ -468,7 +1677,7 @@ index 986c4e1867..dda8609298 100644 if b < 0x80 { break } -@@ -11761,7 +11658,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12610,7 +11658,7 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) } @@ -477,7 +1686,7 @@ index 986c4e1867..dda8609298 100644 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMimir -@@ -11771,20 +11668,16 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12620,20 +11668,16 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata } b := dAtA[iNdEx] iNdEx++ @@ -500,7 +1709,7 @@ index 986c4e1867..dda8609298 100644 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMimir -@@ -11794,15 +11687,11 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12643,15 +11687,11 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata } b := dAtA[iNdEx] iNdEx++ @@ -517,7 +1726,7 @@ index 986c4e1867..dda8609298 100644 default: iNdEx = preIndex skippy, err := skipMimir(dAtA[iNdEx:]) -@@ -11822,23 +11711,6 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata +@@ -12671,23 +11711,6 @@ func MetricMetadataUnmarshalRW2(dAtA []byte, symbols *rw2PagedSymbols, metadata if iNdEx > l { return io.ErrUnexpectedEOF } diff --git a/pkg/mimirpb/mimir.proto b/pkg/mimirpb/mimir.proto index 2a5ef623526..67aab3739cd 100644 --- a/pkg/mimirpb/mimir.proto +++ b/pkg/mimirpb/mimir.proto @@ -83,6 +83,12 @@ message TimeSeries { // Zero value means value not set. If you need to use exactly zero value for // the timestamp, use 1 millisecond before or after. int64 created_timestamp = 6; + + // Mimir-specific field for OTel resource attributes. + // Uses high field number to avoid conflicts with upstream Prometheus fields. + // NOTE: This field is manually added to mimir.pb.go and not auto-generated. + // See ResourceAttributes, ResourceEntity, ResourceAttributeEntry types in mimir.pb.go. + // ResourceAttributes resource_attributes = 1000; } message LabelPair { diff --git a/pkg/mimirpb/timeseries.go b/pkg/mimirpb/timeseries.go index f812dd94ff5..5101ed15e38 100644 --- a/pkg/mimirpb/timeseries.go +++ b/pkg/mimirpb/timeseries.go @@ -649,6 +649,109 @@ func DeepCopyTimeseries(dst, src PreallocTimeseries, keepHistograms, keepExempla } } + // Copy the resource attributes. + if srcTs.ResourceAttributes != nil { + dstTs.ResourceAttributes = DeepCopyResourceAttributes(srcTs.ResourceAttributes) + } else { + dstTs.ResourceAttributes = nil + } + + // Copy the scope attributes. + if srcTs.ScopeAttributes != nil { + dstTs.ScopeAttributes = DeepCopyScopeAttributes(srcTs.ScopeAttributes) + } else { + dstTs.ScopeAttributes = nil + } + + return dst +} + +// DeepCopyResourceAttributes creates a deep copy of ResourceAttributes. +func DeepCopyResourceAttributes(src *ResourceAttributes) *ResourceAttributes { + if src == nil { + return nil + } + + dst := &ResourceAttributes{ + Timestamp: src.Timestamp, + } + + // Copy identifying attributes. + if len(src.Identifying) > 0 { + dst.Identifying = make([]AttributeEntry, len(src.Identifying)) + for i, e := range src.Identifying { + dst.Identifying[i] = AttributeEntry{ + Key: strings.Clone(e.Key), + Value: strings.Clone(e.Value), + } + } + } + + // Copy descriptive attributes. + if len(src.Descriptive) > 0 { + dst.Descriptive = make([]AttributeEntry, len(src.Descriptive)) + for i, e := range src.Descriptive { + dst.Descriptive[i] = AttributeEntry{ + Key: strings.Clone(e.Key), + Value: strings.Clone(e.Value), + } + } + } + + // Copy entities. + if len(src.Entities) > 0 { + dst.Entities = make([]ResourceEntity, len(src.Entities)) + for i, e := range src.Entities { + dst.Entities[i] = ResourceEntity{ + Type: strings.Clone(e.Type), + } + if len(e.ID) > 0 { + dst.Entities[i].ID = make([]AttributeEntry, len(e.ID)) + for j, attr := range e.ID { + dst.Entities[i].ID[j] = AttributeEntry{ + Key: strings.Clone(attr.Key), + Value: strings.Clone(attr.Value), + } + } + } + if len(e.Description) > 0 { + dst.Entities[i].Description = make([]AttributeEntry, len(e.Description)) + for j, attr := range e.Description { + dst.Entities[i].Description[j] = AttributeEntry{ + Key: strings.Clone(attr.Key), + Value: strings.Clone(attr.Value), + } + } + } + } + } + + return dst +} + +// DeepCopyScopeAttributes creates a deep copy of ScopeAttributes. +func DeepCopyScopeAttributes(src *ScopeAttributes) *ScopeAttributes { + if src == nil { + return nil + } + + dst := &ScopeAttributes{ + Name: strings.Clone(src.Name), + Version: strings.Clone(src.Version), + SchemaURL: strings.Clone(src.SchemaURL), + Timestamp: src.Timestamp, + } + + if len(src.Attrs) > 0 { + dst.Attrs = make([]AttributeEntry, len(src.Attrs)) + for i, e := range src.Attrs { + dst.Attrs[i] = AttributeEntry{ + Key: strings.Clone(e.Key), + Value: strings.Clone(e.Value), + } + } + } + return dst } @@ -801,4 +904,34 @@ func (ts *TimeSeries) MakeReferencesSafeToRetain() { ts.Exemplars[i].Labels[j].Value = strings.Clone(l.Value) } } + if ts.ResourceAttributes != nil { + for i, e := range ts.ResourceAttributes.Identifying { + ts.ResourceAttributes.Identifying[i].Key = strings.Clone(e.Key) + ts.ResourceAttributes.Identifying[i].Value = strings.Clone(e.Value) + } + for i, e := range ts.ResourceAttributes.Descriptive { + ts.ResourceAttributes.Descriptive[i].Key = strings.Clone(e.Key) + ts.ResourceAttributes.Descriptive[i].Value = strings.Clone(e.Value) + } + for i, entity := range ts.ResourceAttributes.Entities { + ts.ResourceAttributes.Entities[i].Type = strings.Clone(entity.Type) + for j, attr := range entity.ID { + ts.ResourceAttributes.Entities[i].ID[j].Key = strings.Clone(attr.Key) + ts.ResourceAttributes.Entities[i].ID[j].Value = strings.Clone(attr.Value) + } + for j, attr := range entity.Description { + ts.ResourceAttributes.Entities[i].Description[j].Key = strings.Clone(attr.Key) + ts.ResourceAttributes.Entities[i].Description[j].Value = strings.Clone(attr.Value) + } + } + } + if ts.ScopeAttributes != nil { + ts.ScopeAttributes.Name = strings.Clone(ts.ScopeAttributes.Name) + ts.ScopeAttributes.Version = strings.Clone(ts.ScopeAttributes.Version) + ts.ScopeAttributes.SchemaURL = strings.Clone(ts.ScopeAttributes.SchemaURL) + for i, e := range ts.ScopeAttributes.Attrs { + ts.ScopeAttributes.Attrs[i].Key = strings.Clone(e.Key) + ts.ScopeAttributes.Attrs[i].Value = strings.Clone(e.Value) + } + } } diff --git a/pkg/mimirpb/timeseries_pools.go b/pkg/mimirpb/timeseries_pools.go index f4d15ebaff8..8fc3d655aea 100644 --- a/pkg/mimirpb/timeseries_pools.go +++ b/pkg/mimirpb/timeseries_pools.go @@ -49,7 +49,7 @@ func TimeseriesFromPool() *TimeSeries { // Panic if the pool returns a TimeSeries that wasn't properly cleaned, // which is indicative of a hard bug that we want to catch as soon as possible. - if len(ts.Labels) > 0 || len(ts.Samples) > 0 || len(ts.Histograms) > 0 || len(ts.Exemplars) > 0 || ts.CreatedTimestamp != 0 || ts.SkipUnmarshalingExemplars { + if len(ts.Labels) > 0 || len(ts.Samples) > 0 || len(ts.Histograms) > 0 || len(ts.Exemplars) > 0 || ts.CreatedTimestamp != 0 || ts.SkipUnmarshalingExemplars || ts.ResourceAttributes != nil || ts.ScopeAttributes != nil { panic("pool returned dirty TimeSeries: this indicates a bug where ReuseTimeseries was called on a TimeSeries still in use") } @@ -88,6 +88,8 @@ func ReuseTimeseries(ts *TimeSeries) { ts.CreatedTimestamp = 0 ts.SkipUnmarshalingExemplars = false + ts.ResourceAttributes = nil + ts.ScopeAttributes = nil ClearExemplars(ts) timeSeriesPool.Put(ts) diff --git a/pkg/mimirpb/timeseries_pools_test.go b/pkg/mimirpb/timeseries_pools_test.go index 99014299b5a..ace716ad19e 100644 --- a/pkg/mimirpb/timeseries_pools_test.go +++ b/pkg/mimirpb/timeseries_pools_test.go @@ -40,6 +40,7 @@ func TestTimeseriesFromPool(t *testing.T) { {"exemplars", &TimeSeries{Exemplars: []Exemplar{{Value: 1, TimestampMs: 2}}}}, {"CreatedTimestamp", &TimeSeries{CreatedTimestamp: 1234567890}}, {"SkipUnmarshalingExemplars", &TimeSeries{SkipUnmarshalingExemplars: true}}, + {"ResourceAttributes", &TimeSeries{ResourceAttributes: &ResourceAttributes{Timestamp: 1234567890}}}, } for _, tc := range dirtyPoolTests { t.Run("panics if pool returns dirty TimeSeries with "+tc.name, func(t *testing.T) { diff --git a/pkg/mimirpb/timeseries_test.go b/pkg/mimirpb/timeseries_test.go index eed1e9d1d87..ba622160dce 100644 --- a/pkg/mimirpb/timeseries_test.go +++ b/pkg/mimirpb/timeseries_test.go @@ -257,6 +257,83 @@ func TestDeepCopyTimeseries(t *testing.T) { assert.Len(t, dst.Histograms, 0) } +func TestDeepCopyTimeseriesResourceAttributes(t *testing.T) { + src := PreallocTimeseries{ + TimeSeries: &TimeSeries{ + Labels: []LabelAdapter{ + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []Sample{ + {Value: 1, TimestampMs: 1000}, + }, + ResourceAttributes: &ResourceAttributes{ + Identifying: []AttributeEntry{ + {Key: "service.name", Value: "test-service"}, + {Key: "service.namespace", Value: "test-ns"}, + }, + Descriptive: []AttributeEntry{ + {Key: "host.name", Value: "test-host"}, + }, + Entities: []ResourceEntity{ + { + Type: "service", + ID: []AttributeEntry{ + {Key: "service.name", Value: "test-service"}, + }, + Description: []AttributeEntry{ + {Key: "service.version", Value: "1.0.0"}, + }, + }, + }, + Timestamp: 1234567890, + }, + }, + } + + dst := PreallocTimeseries{} + dst = DeepCopyTimeseries(dst, src, false, false) + + // Check that resource attributes are deeply copied. + require.NotNil(t, dst.ResourceAttributes) + assert.Equal(t, src.ResourceAttributes.Timestamp, dst.ResourceAttributes.Timestamp) + assert.Equal(t, src.ResourceAttributes.Identifying, dst.ResourceAttributes.Identifying) + assert.Equal(t, src.ResourceAttributes.Descriptive, dst.ResourceAttributes.Descriptive) + assert.Equal(t, src.ResourceAttributes.Entities, dst.ResourceAttributes.Entities) + + // Check that the pointers are different (deep copy). + assert.NotSame(t, src.ResourceAttributes, dst.ResourceAttributes) + if len(src.ResourceAttributes.Identifying) > 0 { + assert.NotSame(t, &src.ResourceAttributes.Identifying[0], &dst.ResourceAttributes.Identifying[0]) + } + if len(src.ResourceAttributes.Entities) > 0 { + assert.NotSame(t, &src.ResourceAttributes.Entities[0], &dst.ResourceAttributes.Entities[0]) + } + + // Verify modifying src doesn't affect dst. + src.ResourceAttributes.Timestamp = 9999999999 + assert.NotEqual(t, src.ResourceAttributes.Timestamp, dst.ResourceAttributes.Timestamp) +} + +func TestDeepCopyTimeseriesNilResourceAttributes(t *testing.T) { + src := PreallocTimeseries{ + TimeSeries: &TimeSeries{ + Labels: []LabelAdapter{ + {Name: "__name__", Value: "test_metric"}, + }, + Samples: []Sample{ + {Value: 1, TimestampMs: 1000}, + }, + ResourceAttributes: nil, + }, + } + + dst := PreallocTimeseries{} + dst = DeepCopyTimeseries(dst, src, false, false) + + // Check that nil resource attributes remain nil. + assert.Nil(t, dst.ResourceAttributes) +} + func TestDeepCopyTimeseriesExemplars(t *testing.T) { src := PreallocTimeseries{ TimeSeries: &TimeSeries{ @@ -328,6 +405,19 @@ func TestDeepCopyTimeseriesCopiesAllFields(t *testing.T) { }, CreatedTimestamp: 1234567890, SkipUnmarshalingExemplars: true, + ResourceAttributes: &ResourceAttributes{ + Identifying: []AttributeEntry{{Key: "service.name", Value: "myservice"}}, + Descriptive: []AttributeEntry{{Key: "host.name", Value: "myhost"}}, + Entities: []ResourceEntity{{Type: "service", ID: []AttributeEntry{{Key: "service.name", Value: "myservice"}}, Description: []AttributeEntry{{Key: "desc", Value: "val"}}}}, + Timestamp: 1234567890, + }, + ScopeAttributes: &ScopeAttributes{ + Name: "github.com/example/payment", + Version: "1.2.0", + SchemaURL: "https://opentelemetry.io/schemas/1.24.0", + Attrs: []AttributeEntry{{Key: "library.language", Value: "go"}}, + Timestamp: 1234567890, + }, }, } @@ -735,9 +825,17 @@ func TestTimeSeries_MakeReferencesSafeToRetain(t *testing.T) { const ( origLabelName = "name" origLabelValue = "value" + origAttrKey = "service.name" + origAttrValue = "myservice" + origScopeName = "myscope" + origSchemaURL = "https://example.com/schema" ) labelNameBytes := []byte(origLabelName) labelValueBytes := []byte(origLabelValue) + attrKeyBytes := []byte(origAttrKey) + attrValueBytes := []byte(origAttrValue) + scopeNameBytes := []byte(origScopeName) + schemaURLBytes := []byte(origSchemaURL) ts := TimeSeries{ Labels: []LabelAdapter{ { @@ -755,6 +853,29 @@ func TestTimeSeries_MakeReferencesSafeToRetain(t *testing.T) { }, }, }, + ResourceAttributes: &ResourceAttributes{ + Identifying: []AttributeEntry{ + {Key: yoloString(attrKeyBytes), Value: yoloString(attrValueBytes)}, + }, + Descriptive: []AttributeEntry{ + {Key: yoloString(attrKeyBytes), Value: yoloString(attrValueBytes)}, + }, + Entities: []ResourceEntity{ + { + Type: yoloString(attrKeyBytes), + ID: []AttributeEntry{{Key: yoloString(attrKeyBytes), Value: yoloString(attrValueBytes)}}, + Description: []AttributeEntry{{Key: yoloString(attrKeyBytes), Value: yoloString(attrValueBytes)}}, + }, + }, + }, + ScopeAttributes: &ScopeAttributes{ + Name: yoloString(scopeNameBytes), + Version: yoloString(attrValueBytes), + SchemaURL: yoloString(schemaURLBytes), + Attrs: []AttributeEntry{ + {Key: yoloString(attrKeyBytes), Value: yoloString(attrValueBytes)}, + }, + }, } ts.MakeReferencesSafeToRetain() @@ -762,6 +883,10 @@ func TestTimeSeries_MakeReferencesSafeToRetain(t *testing.T) { // Modify the referenced byte slices, to test whether ts retains them (it shouldn't). labelNameBytes[len(labelNameBytes)-1] = 'x' labelValueBytes[len(labelValueBytes)-1] = 'x' + attrKeyBytes[len(attrKeyBytes)-1] = 'x' + attrValueBytes[len(attrValueBytes)-1] = 'x' + scopeNameBytes[len(scopeNameBytes)-1] = 'x' + schemaURLBytes[len(schemaURLBytes)-1] = 'x' for _, l := range ts.Labels { require.Equal(t, origLabelName, l.Name) @@ -773,4 +898,32 @@ func TestTimeSeries_MakeReferencesSafeToRetain(t *testing.T) { require.Equal(t, origLabelValue, l.Value) } } + // Resource attributes must be deep-copied. + for _, e := range ts.ResourceAttributes.Identifying { + require.Equal(t, origAttrKey, e.Key) + require.Equal(t, origAttrValue, e.Value) + } + for _, e := range ts.ResourceAttributes.Descriptive { + require.Equal(t, origAttrKey, e.Key) + require.Equal(t, origAttrValue, e.Value) + } + for _, entity := range ts.ResourceAttributes.Entities { + require.Equal(t, origAttrKey, entity.Type) + for _, attr := range entity.ID { + require.Equal(t, origAttrKey, attr.Key) + require.Equal(t, origAttrValue, attr.Value) + } + for _, attr := range entity.Description { + require.Equal(t, origAttrKey, attr.Key) + require.Equal(t, origAttrValue, attr.Value) + } + } + // Scope attributes must be deep-copied. + require.Equal(t, origScopeName, ts.ScopeAttributes.Name) + require.Equal(t, origAttrValue, ts.ScopeAttributes.Version) + require.Equal(t, origSchemaURL, ts.ScopeAttributes.SchemaURL) + for _, e := range ts.ScopeAttributes.Attrs { + require.Equal(t, origAttrKey, e.Key) + require.Equal(t, origAttrValue, e.Value) + } } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 9dda2aea33d..51c6d649c9c 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -304,7 +304,7 @@ func (q *BlocksStoreQueryable) Querier(mint, maxt int64) (storage.Querier, error return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) } - return &blocksStoreQuerier{ + baseQuerier := &blocksStoreQuerier{ minT: mint, maxT: maxt, finder: q.finder, @@ -316,7 +316,16 @@ func (q *BlocksStoreQueryable) Querier(mint, maxt int64) (storage.Querier, error consistency: q.consistency, logger: q.logger, queryStoreAfter: q.queryStoreAfter, - }, nil + } + + // Wrap with resource querier cache to support info() PromQL function + return NewResourceQuerierCache( + baseQuerier, + &blocksResourceFetcher{blocksQueryable: q}, + mint, + maxt, + q.logger, + ), nil } type blocksStoreQuerier struct { @@ -1471,3 +1480,194 @@ func convertBlockHintsToULIDsOpaque(hints []hintspb.Block) ([]ulid.ULID, error) return res, nil } + +// ResourceAttributes queries resource attributes from store-gateways for the given time range and matchers. +func (q *BlocksStoreQueryable) ResourceAttributes(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*storepb.ResourceAttrFilter) ([]*storepb.ResourceAttributesSeriesData, error) { + spanLog, ctx := spanlogger.New(ctx, q.logger, tracer, "BlocksStoreQueryable.ResourceAttributes") + defer spanLog.Finish() + + tenantID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + spanLog.DebugLog("start", util.TimeFromMillis(minT).UTC().String(), "end", + util.TimeFromMillis(maxT).UTC().String(), "matchers", util.MatchersStringer(matchers)) + + // Check if we should query block store at all + now := time.Now() + if !ShouldQueryBlockStore(q.queryStoreAfter, now, minT) { + spanLog.DebugLog("msg", "not querying block store; query time range begins after the query-store-after limit") + return nil, nil + } + + maxT = clampMaxTime(spanLog, maxT, now.UnixMilli(), -q.queryStoreAfter, "query store after") + + // Find the list of blocks we need to query given the time range. + knownBlocks, indexMeta, err := q.finder.GetBlocks(ctx, tenantID, minT, maxT) + if err != nil { + return nil, err + } + + if len(knownBlocks) == 0 { + spanLog.DebugLog("msg", "no blocks found") + return nil, nil + } + + spanLog.DebugLog("msg", "found blocks to query", "num_blocks", len(knownBlocks)) + + var ( + results []*storepb.ResourceAttributesSeriesData + mtx sync.Mutex + attemptedBlocks = map[ulid.ULID][]string{} + touchedStores = map[string]struct{}{} + ) + + convertedMatchers := convertMatchersToLabelMatcher(matchers) + + for attempt := 1; attempt <= q.dynamicReplication.MaxReplicationFactor(); attempt++ { + // Find store-gateway instances having the blocks + clients, err := q.stores.GetClientsFor(tenantID, knownBlocks, attemptedBlocks) + if err != nil { + if attempt > 1 { + level.Warn(spanLog).Log("msg", "unable to get store-gateway clients while retrying", "err", err) + break + } + return nil, err + } + + spanLog.DebugLog("msg", "found store-gateway instances to query", "num_instances", len(clients), "attempt", attempt) + + reqCtx := grpcContextWithBucketStoreRequestMeta(ctx, tenantID, indexMeta) + g, gCtx := errgroup.WithContext(reqCtx) + + for c, blockIDs := range clients { + g.Go(func() error { + clientResults, queriedBlocks, err := q.fetchResourceAttributesFromStore(gCtx, c, blockIDs, minT, maxT, convertedMatchers, limit, resourceAttrFilters, spanLog) + if err != nil { + if shouldRetry(err) { + level.Warn(spanLog).Log("msg", "failed to fetch resource attributes; error is retriable", "remote", c.RemoteAddress(), "err", err) + return nil + } + return fmt.Errorf("non-retriable error while fetching resource attributes from store: %w", err) + } + + mtx.Lock() + results = append(results, clientResults...) + touchedStores[c.RemoteAddress()] = struct{}{} + for _, blockID := range queriedBlocks { + attemptedBlocks[blockID] = append(attemptedBlocks[blockID], c.RemoteAddress()) + } + mtx.Unlock() + + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, err + } + + // Update attemptedBlocks for blocks we tried but didn't get queried + for client, blockIDs := range clients { + touchedStores[client.RemoteAddress()] = struct{}{} + for _, blockID := range blockIDs { + if _, ok := attemptedBlocks[blockID]; !ok { + attemptedBlocks[blockID] = append(attemptedBlocks[blockID], client.RemoteAddress()) + } + } + } + } + + spanLog.DebugLog("msg", "resource attributes query complete", "num_results", len(results), "num_stores", len(touchedStores)) + + return results, nil +} + +// fetchResourceAttributesFromStore fetches resource attributes from a single store-gateway. +func (q *BlocksStoreQueryable) fetchResourceAttributesFromStore( + ctx context.Context, + client BlocksStoreClient, + blockIDs []ulid.ULID, + minT, maxT int64, + matchers []storepb.LabelMatcher, + limit int64, + resourceAttrFilters []*storepb.ResourceAttrFilter, + spanLog *spanlogger.SpanLogger, +) ([]*storepb.ResourceAttributesSeriesData, []ulid.ULID, error) { + req, err := createResourceAttributesRequest(minT, maxT, blockIDs, matchers, limit, resourceAttrFilters) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create resource attributes request") + } + + stream, err := client.ResourceAttributes(ctx, req) + if err != nil { + return nil, nil, err + } + + var ( + results []*storepb.ResourceAttributesSeriesData + queriedBlocks []ulid.ULID + ) + + for { + resp, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, nil, err + } + + results = append(results, resp.Items...) + + // Extract queried blocks from hints if available + if resp.Hints != nil { + hints := hintspb.ResourceAttributesResponseHints{} + if err := types.UnmarshalAny(resp.Hints, &hints); err == nil { + ids, err := convertBlockHintsToULIDsOpaque(hints.QueriedBlocks) + if err == nil { + queriedBlocks = append(queriedBlocks, ids...) + } + } + } + } + + spanLog.DebugLog("msg", "received resource attributes from store-gateway", + "instance", client.RemoteAddress(), + "num_results", len(results), + "requested_blocks", strings.Join(convertULIDsToString(blockIDs), " "), + "queried_blocks", strings.Join(convertULIDsToString(queriedBlocks), " ")) + + return results, queriedBlocks, nil +} + +func createResourceAttributesRequest(minT, maxT int64, blockIDs []ulid.ULID, matchers []storepb.LabelMatcher, limit int64, resourceAttrFilters []*storepb.ResourceAttrFilter) (*storepb.ResourceAttributesRequest, error) { + req := &storepb.ResourceAttributesRequest{ + Start: minT, + End: maxT, + Matchers: matchers, + Limit: limit, + ResourceAttrFilters: resourceAttrFilters, + } + + // Selectively query only specific blocks + requestHints := &hintspb.ResourceAttributesRequestHints{ + BlockMatchers: []storepb.LabelMatcher{ + { + Type: storepb.LabelMatcher_RE, + Name: block.BlockIDLabel, + Value: strings.Join(convertULIDsToString(blockIDs), "|"), + }, + }, + } + + anyRequestHints, err := types.MarshalAny(requestHints) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal resource attributes request hints") + } + + req.Hints = anyRequestHints + + return req, nil +} diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index cc42f3abc36..87a40616856 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -3335,6 +3335,11 @@ func (m *storeGatewayClientMock) RemoteZone() string { return m.remoteZone } +func (m *storeGatewayClientMock) ResourceAttributes(context.Context, *storepb.ResourceAttributesRequest, ...grpc.CallOption) (storegatewaypb.StoreGateway_ResourceAttributesClient, error) { + // Return empty stream for tests - resource attributes not mocked + return nil, nil +} + type storeGatewaySeriesClientMock struct { grpc.ClientStream @@ -3416,6 +3421,11 @@ func (m *cancelerStoreGatewayClientMock) RemoteZone() string { return m.remoteZone } +func (m *cancelerStoreGatewayClientMock) ResourceAttributes(ctx context.Context, _ *storepb.ResourceAttributesRequest, _ ...grpc.CallOption) (storegatewaypb.StoreGateway_ResourceAttributesClient, error) { + m.cancel() + return nil, ctx.Err() +} + type blocksStoreLimitsMock struct { maxLabelsQueryLength time.Duration maxChunksPerQuery int diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index b712ac2fd42..0858e72db1c 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -44,6 +44,7 @@ type Distributor interface { LabelValuesCardinality(ctx context.Context, labelNames []model.LabelName, matchers []*labels.Matcher, countMethod cardinality.CountMethod) (uint64, *client.LabelValuesCardinalityResponse, error) ActiveSeries(ctx context.Context, matchers []*labels.Matcher) ([]labels.Labels, error) ActiveNativeHistogramMetrics(ctx context.Context, matchers []*labels.Matcher) (*cardinality.ActiveNativeHistogramMetricsResponse, error) + ResourceAttributes(ctx context.Context, startMs, endMs int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*client.ResourceAttrFilter) ([]*client.SeriesResourceAttributes, error) } func NewDistributorQueryable(distributor Distributor, cfgProvider distributorQueryableConfigProvider, queryMetrics *stats.QueryMetrics, logger log.Logger) storage.Queryable { @@ -67,14 +68,23 @@ type distributorQueryable struct { } func (d distributorQueryable) Querier(mint, maxt int64) (storage.Querier, error) { - return &distributorQuerier{ + baseQuerier := &distributorQuerier{ logger: d.logger, distributor: d.distributor, mint: mint, maxt: maxt, queryMetrics: d.queryMetrics, cfgProvider: d.cfgProvider, - }, nil + } + + // Wrap with resource querier cache to support info() PromQL function + return NewResourceQuerierCache( + baseQuerier, + &distributorResourceFetcher{distributor: d.distributor}, + mint, + maxt, + d.logger, + ), nil } type distributorQuerier struct { diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index aa37308efc2..2550dd41f15 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -878,6 +878,11 @@ func (m *mockDistributor) ActiveNativeHistogramMetrics(ctx context.Context, matc return args.Get(0).(*cardinality.ActiveNativeHistogramMetricsResponse), args.Error(1) } +func (m *mockDistributor) ResourceAttributes(ctx context.Context, startMs, endMs int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*client.ResourceAttrFilter) ([]*client.SeriesResourceAttributes, error) { + args := m.Called(ctx, startMs, endMs, matchers, limit, resourceAttrFilters) + return args.Get(0).([]*client.SeriesResourceAttributes), args.Error(1) +} + type mockConfigProvider struct { queryIngestersWithin time.Duration seenUserIDs []string diff --git a/pkg/querier/engine/config.go b/pkg/querier/engine/config.go index 8476b273387..031f82c5266 100644 --- a/pkg/querier/engine/config.go +++ b/pkg/querier/engine/config.go @@ -34,6 +34,9 @@ type Config struct { EnableDelayedNameRemovalPrometheusEngine bool `yaml:"enable_delayed_name_removal_prometheus_engine" category:"experimental"` + EnableNativeMetadata bool `yaml:"enable_native_metadata" category:"experimental"` + InfoResourceStrategy string `yaml:"info_resource_strategy" category:"experimental"` + MimirQueryEngine streamingpromql.EngineOpts `yaml:"mimir_query_engine" category:"experimental"` } @@ -52,6 +55,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, sharedWithQueryFrontend("The default evaluation interval or step size for subqueries.")) f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, sharedWithQueryFrontend("Time since the last sample after which a time series is considered stale and ignored by expression evaluations.")) f.BoolVar(&cfg.EnableDelayedNameRemovalPrometheusEngine, "querier.enable-delayed-name-removal-prometheus-engine", false, "Enable the experimental PromQL feature for delayed name removal in the Prometheus engine. Note that this only applies when the Prometheus engine is selected or used as fallback from the Mimir Query Engine.") + f.BoolVar(&cfg.EnableNativeMetadata, "querier.enable-native-metadata", false, "Enable native OTel resource attribute metadata for the info() function.") + f.StringVar(&cfg.InfoResourceStrategy, "querier.info-resource-strategy", "target-info", "Strategy for info() to resolve resource attributes. Valid values: target-info, resource-attributes, hybrid.") cfg.MimirQueryEngine.RegisterFlags(f) } @@ -78,6 +83,8 @@ func NewPromQLEngineOptions(cfg Config, activityTracker *activitytracker.Activit }, // This only applies to the fallback Prometheus engine. MQE's is defined per-tenant via limits. EnableDelayedNameRemoval: cfg.EnableDelayedNameRemovalPrometheusEngine, + EnableNativeMetadata: cfg.EnableNativeMetadata, + InfoResourceStrategy: promql.InfoResourceStrategy(cfg.InfoResourceStrategy), Parser: promqlext.NewPromQLParser(), } diff --git a/pkg/querier/error_translate_queryable.go b/pkg/querier/error_translate_queryable.go index 410a48260d6..e6435582600 100644 --- a/pkg/querier/error_translate_queryable.go +++ b/pkg/querier/error_translate_queryable.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/mimirpb" @@ -158,6 +159,22 @@ func (e errorTranslateQuerier) Select(ctx context.Context, sortSeries bool, hint return errorTranslateSeriesSet{s: s, fn: e.fn} } +// GetResourceAt implements storage.ResourceQuerier by delegating to the underlying querier if it supports it. +func (e errorTranslateQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + if rq, ok := e.q.(storage.ResourceQuerier); ok { + return rq.GetResourceAt(labelsHash, timestamp) + } + return nil, false +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier by delegating to the underlying querier if it supports it. +func (e errorTranslateQuerier) IterUniqueAttributeNames(fn func(name string)) error { + if rq, ok := e.q.(storage.ResourceQuerier); ok { + return rq.IterUniqueAttributeNames(fn) + } + return nil +} + type errorTranslateChunkQuerier struct { q storage.ChunkQuerier fn ErrTranslateFn diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 36f23c72f02..5b160ac8834 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -145,8 +145,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { api := v1.NewAPI( engine, q, - nil, - nil, + nil, nil, // No remote write support (Appendable, AppendableV2). nil, func(context.Context) v1.ScrapePoolsRetriever { return &DummyTargetRetriever{} }, func(context.Context) v1.TargetRetriever { return &DummyTargetRetriever{} }, @@ -179,8 +178,9 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { 5*time.Minute, false, false, - nil, - nil, + nil, // overrideErrorCode + false, // enableNativeMetadata + nil, // featureRegistry v1.OpenAPIOptions{}, nil, ) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 469cd64ea78..73dfd9e5671 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "go.opentelemetry.io/otel" @@ -702,6 +703,47 @@ func (mq *multiQuerier) Close() error { return me.Err() } +// GetResourceAt implements storage.ResourceQuerier by delegating to the first underlying querier that supports it. +func (mq *multiQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + mq.queriersMtx.Lock() + defer mq.queriersMtx.Unlock() + + for _, q := range mq.queriers { + if rq, ok := q.(storage.ResourceQuerier); ok { + if rv, found := rq.GetResourceAt(labelsHash, timestamp); found { + return rv, true + } + } + } + return nil, false +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier by iterating over all underlying queriers that support it. +func (mq *multiQuerier) IterUniqueAttributeNames(fn func(name string)) error { + mq.queriersMtx.Lock() + defer mq.queriersMtx.Unlock() + + level.Debug(mq.logger).Log("msg", "IterUniqueAttributeNames called", "queriers", len(mq.queriers)) + + seen := make(map[string]struct{}) + for i, q := range mq.queriers { + if rq, ok := q.(storage.ResourceQuerier); ok { + if err := rq.IterUniqueAttributeNames(func(name string) { + if _, exists := seen[name]; !exists { + seen[name] = struct{}{} + fn(name) + } + }); err != nil { + return err + } + } else { + level.Debug(mq.logger).Log("msg", "querier does not implement ResourceQuerier for IterUniqueAttributeNames", "querier", i, "type", fmt.Sprintf("%T", q)) + } + } + level.Debug(mq.logger).Log("msg", "IterUniqueAttributeNames completed", "uniqueNames", len(seen)) + return nil +} + func (mq *multiQuerier) mergeSeriesSets(sets []storage.SeriesSet) storage.SeriesSet { // Here we deal with sets that are based on chunks and build single set from them. // Remaining sets are merged with chunks-based one using storage.NewMergeSeriesSet diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 32b10a6b8db..2055249e0a2 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1500,6 +1500,10 @@ func (m *errDistributor) ActiveNativeHistogramMetrics(context.Context, []*labels return nil, errDistributorError } +func (m *errDistributor) ResourceAttributes(context.Context, int64, int64, []*labels.Matcher, int64, []*client.ResourceAttrFilter) ([]*client.SeriesResourceAttributes, error) { + return nil, errDistributorError +} + func TestQuerier_QueryStoreAfterConfig(t *testing.T) { testCases := []struct { name string diff --git a/pkg/querier/resource_attributes_handler.go b/pkg/querier/resource_attributes_handler.go new file mode 100644 index 00000000000..a44227536b5 --- /dev/null +++ b/pkg/querier/resource_attributes_handler.go @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "bytes" + "context" + "fmt" + "net/http" + "sort" + "strconv" + "time" + + "github.com/grafana/dskit/tenant" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/labels" + "golang.org/x/sync/errgroup" + + ingester_client "github.com/grafana/mimir/pkg/ingester/client" + "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/grafana/mimir/pkg/util" + "github.com/grafana/mimir/pkg/util/promqlext" +) + +// ResourceAttributesResponseData contains the response data format. +type ResourceAttributesResponseData struct { + Series []*SeriesResourceAttributesData `json:"series"` +} + +// SeriesResourceAttributesData contains resource attributes for a single series. +type SeriesResourceAttributesData struct { + Labels map[string]string `json:"labels"` + Versions []*ResourceVersionData `json:"versions"` +} + +// ResourceVersionData contains versioned resource attribute data. +type ResourceVersionData struct { + Identifying map[string]string `json:"identifying,omitempty"` + Descriptive map[string]string `json:"descriptive,omitempty"` + Entities []*EntityData `json:"entities,omitempty"` + MinTimeMs int64 `json:"minTimeMs"` + MaxTimeMs int64 `json:"maxTimeMs"` +} + +// EntityData contains entity information. +type EntityData struct { + Type string `json:"type"` + ID map[string]string `json:"id,omitempty"` + Description map[string]string `json:"description,omitempty"` +} + +// ResourceAttributesResponse matches the Prometheus API response format. +type ResourceAttributesResponse struct { + Status string `json:"status"` + Data *ResourceAttributesResponseData `json:"data,omitempty"` + Error string `json:"error,omitempty"` +} + +// ResourceAttributesBlocksQueryable is an interface for querying resource attributes from block storage. +type ResourceAttributesBlocksQueryable interface { + ResourceAttributes(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher, limit int64, resourceAttrFilters []*storepb.ResourceAttrFilter) ([]*storepb.ResourceAttributesSeriesData, error) +} + +// ResourceAttributesHandlerConfig holds configuration for resource attributes handler. +type ResourceAttributesHandlerConfig struct { + QueryStoreAfter time.Duration + QueryIngestersWithin func(userID string) time.Duration +} + +// NewResourceAttributesHandler creates a http.Handler for the /api/v1/resources endpoint. +// This endpoint is for querying OTel resource attributes persisted per time series. +// It queries both ingesters (via distributor) and store-gateways (via blocksQueryable) and merges results. +// If blocksQueryable is nil, only ingesters are queried. +func NewResourceAttributesHandler(d Distributor, blocksQueryable ResourceAttributesBlocksQueryable, cfg ResourceAttributesHandlerConfig) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Validate tenant + tenantID, err := tenant.TenantID(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Parse request parameters + if err := r.ParseForm(); err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "error parsing request form: " + err.Error(), + }) + return + } + + // Parse time range + startMs, endMs, err := parseResourceTimeRange(r) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: err.Error(), + }) + return + } + + // Parse matchers + matcherSets := r.Form["match[]"] + if len(matcherSets) == 0 { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "at least one matcher is required (use {__name__=~\".+\"} for all series)", + }) + return + } + + pqlParser := promqlext.NewPromQLParser() + var allMatchers []*labels.Matcher + for _, matcherSet := range matcherSets { + matchers, err := pqlParser.ParseMetricSelector(matcherSet) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "error parsing matcher: " + err.Error(), + }) + return + } + allMatchers = append(allMatchers, matchers...) + } + + // Parse limit (optional) + var limit int64 + if limitStr := r.FormValue("limit"); limitStr != "" { + limit, err = strconv.ParseInt(limitStr, 10, 64) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "invalid limit parameter: " + err.Error(), + }) + return + } + } + + now := time.Now() + nowMs := now.UnixMilli() + var allSeries []*SeriesResourceAttributesData + + // Default endMs to now if not specified + if endMs == 0 { + endMs = nowMs + } + + // Query both ingesters and store-gateways in parallel + g, gCtx := errgroup.WithContext(ctx) + + // Query ingesters via distributor + var ingesterResults []*ingester_client.SeriesResourceAttributes + var queryIngestersWithin time.Duration + if cfg.QueryIngestersWithin != nil { + queryIngestersWithin = cfg.QueryIngestersWithin(tenantID) + } + shouldQueryIngesters := ShouldQueryIngesters(queryIngestersWithin, now, endMs) + if shouldQueryIngesters { + g.Go(func() error { + var err error + ingesterResults, err = d.ResourceAttributes(gCtx, startMs, endMs, allMatchers, limit, nil) + return err + }) + } + + // Query store-gateways via blocks queryable + var storeResults []*storepb.ResourceAttributesSeriesData + if blocksQueryable != nil && ShouldQueryBlockStore(cfg.QueryStoreAfter, now, startMs) { + g.Go(func() error { + var err error + storeResults, err = blocksQueryable.ResourceAttributes(gCtx, startMs, endMs, allMatchers, limit, nil) + return err + }) + } + + // Wait for both queries to complete + if err := g.Wait(); err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "error querying resource attributes: " + err.Error(), + }) + return + } + + // Convert and merge results from both sources + ingesterConverted := convertIngesterResults(ingesterResults) + storeConverted := convertStoreResults(storeResults) + + // Merge results, deduplicating by series labels + allSeries = mergeResourceAttributesSeries(ingesterConverted, storeConverted) + + // Apply limit if specified + if limit > 0 && int64(len(allSeries)) > limit { + allSeries = allSeries[:limit] + } + + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusSuccess, + Data: &ResourceAttributesResponseData{ + Series: allSeries, + }, + }) + }) +} + +// parseResourceTimeRange parses the start and end time parameters from the request. +func parseResourceTimeRange(r *http.Request) (int64, int64, error) { + var startMs, endMs int64 + + if startStr := r.FormValue("start"); startStr != "" { + t, err := util.ParseTime(startStr) + if err != nil { + return 0, 0, errors.Wrap(err, "error parsing start time") + } + startMs = t + } + + if endStr := r.FormValue("end"); endStr != "" { + t, err := util.ParseTime(endStr) + if err != nil { + return 0, 0, errors.Wrap(err, "error parsing end time") + } + endMs = t + } + + return startMs, endMs, nil +} + +// convertIngesterResults converts ingester client results to the HTTP response format. +func convertIngesterResults(results []*ingester_client.SeriesResourceAttributes) []*SeriesResourceAttributesData { + if results == nil { + return nil + } + + series := make([]*SeriesResourceAttributesData, 0, len(results)) + + for _, item := range results { + lbls := make(map[string]string) + for _, l := range item.Labels { + lbls[l.Name] = l.Value + } + + versions := make([]*ResourceVersionData, 0, len(item.Versions)) + for _, v := range item.Versions { + version := &ResourceVersionData{ + MinTimeMs: v.MinTimeMs, + MaxTimeMs: v.MaxTimeMs, + Identifying: v.Identifying, + Descriptive: v.Descriptive, + } + + if len(v.Entities) > 0 { + entities := make([]*EntityData, 0, len(v.Entities)) + for _, e := range v.Entities { + entities = append(entities, &EntityData{ + Type: e.Type, + ID: e.Id, + Description: e.Description, + }) + } + version.Entities = entities + } + + versions = append(versions, version) + } + + series = append(series, &SeriesResourceAttributesData{ + Labels: lbls, + Versions: versions, + }) + } + + return series +} + +// convertStoreResults converts store-gateway results to the HTTP response format. +func convertStoreResults(results []*storepb.ResourceAttributesSeriesData) []*SeriesResourceAttributesData { + if results == nil { + return nil + } + + series := make([]*SeriesResourceAttributesData, 0, len(results)) + + for _, item := range results { + versions := make([]*ResourceVersionData, 0, len(item.Versions)) + for _, v := range item.Versions { + version := &ResourceVersionData{ + MinTimeMs: v.MinTimeMs, + MaxTimeMs: v.MaxTimeMs, + Identifying: v.Identifying, + Descriptive: v.Descriptive, + } + + if len(v.Entities) > 0 { + entities := make([]*EntityData, 0, len(v.Entities)) + for _, e := range v.Entities { + entities = append(entities, &EntityData{ + Type: e.Type, + ID: e.Id, + Description: e.Description, + }) + } + version.Entities = entities + } + + versions = append(versions, version) + } + + series = append(series, &SeriesResourceAttributesData{ + Labels: item.Labels, + Versions: versions, + }) + } + + return series +} + +// mergeResourceAttributesSeries merges results from ingesters and store-gateways. +// Series with the same labels are merged by combining their versions and deduplicating. +func mergeResourceAttributesSeries(ingesterSeries, storeSeries []*SeriesResourceAttributesData) []*SeriesResourceAttributesData { + // Create a map for efficient lookup by label fingerprint + seriesMap := make(map[string]*SeriesResourceAttributesData) + + // Add all ingester series to the map + for _, s := range ingesterSeries { + key := labelsMapToKey(s.Labels) + seriesMap[key] = s + } + + // Merge store series with existing ingester series + for _, s := range storeSeries { + key := labelsMapToKey(s.Labels) + if existing, ok := seriesMap[key]; ok { + // Merge versions from both sources + existing.Versions = mergeResourceVersions(existing.Versions, s.Versions) + } else { + seriesMap[key] = s + } + } + + // Convert map back to slice + result := make([]*SeriesResourceAttributesData, 0, len(seriesMap)) + for _, s := range seriesMap { + result = append(result, s) + } + + // Sort by labels for consistent ordering + sort.Slice(result, func(i, j int) bool { + return labelsMapToKey(result[i].Labels) < labelsMapToKey(result[j].Labels) + }) + + return result +} + +// labelsMapToKey creates a string key from a labels map for deduplication. +// Uses null-byte separators to prevent collisions from values containing delimiters. +func labelsMapToKey(lbls map[string]string) string { + keys := make([]string, 0, len(lbls)) + for k := range lbls { + keys = append(keys, k) + } + sort.Strings(keys) + + var buf bytes.Buffer + for _, k := range keys { + buf.WriteString(k) + buf.WriteByte(0) + buf.WriteString(lbls[k]) + buf.WriteByte(0) + } + return buf.String() +} + +// mergeResourceVersions merges and deduplicates resource versions from two sources. +func mergeResourceVersions(a, b []*ResourceVersionData) []*ResourceVersionData { + // Simple merge: append and sort by time + // In the future, we could deduplicate overlapping time ranges + all := append(a, b...) + + // Sort by MinTimeMs + sort.Slice(all, func(i, j int) bool { + return all[i].MinTimeMs < all[j].MinTimeMs + }) + + // Deduplicate versions with same time range + if len(all) == 0 { + return all + } + + result := []*ResourceVersionData{all[0]} + for i := 1; i < len(all); i++ { + last := result[len(result)-1] + // Skip if same time range (keep the first one, which is from ingesters) + if all[i].MinTimeMs == last.MinTimeMs && all[i].MaxTimeMs == last.MaxTimeMs { + continue + } + result = append(result, all[i]) + } + + return result +} + +// NewResourceAttributesSeriesHandler creates a http.Handler for the /api/v1/resources/series endpoint. +// This is the reverse lookup endpoint: given resource attribute key:value filters, find matching series. +// The response format is the same as the forward lookup endpoint. +func NewResourceAttributesSeriesHandler(d Distributor, blocksQueryable ResourceAttributesBlocksQueryable, cfg ResourceAttributesHandlerConfig) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + tenantID, err := tenant.TenantID(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if err := r.ParseForm(); err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "error parsing request form: " + err.Error(), + }) + return + } + + // Parse time range + startMs, endMs, err := parseResourceTimeRange(r) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: err.Error(), + }) + return + } + + // Parse resource attribute filters: resource.attr=key:value + filterParams := r.Form["resource.attr"] + if len(filterParams) == 0 { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "at least one resource.attr parameter is required (format: resource.attr=key:value)", + }) + return + } + + ingesterFilters, storeFilters, err := parseResourceAttrFilters(filterParams) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: err.Error(), + }) + return + } + + // Parse limit (optional) + var limit int64 + if limitStr := r.FormValue("limit"); limitStr != "" { + limit, err = strconv.ParseInt(limitStr, 10, 64) + if err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "invalid limit parameter: " + err.Error(), + }) + return + } + } + + now := time.Now() + nowMs := now.UnixMilli() + var allSeries []*SeriesResourceAttributesData + + if endMs == 0 { + endMs = nowMs + } + + g, gCtx := errgroup.WithContext(ctx) + + // Query ingesters via distributor with filters (no matchers needed) + var ingesterResults []*ingester_client.SeriesResourceAttributes + var queryIngestersWithin time.Duration + if cfg.QueryIngestersWithin != nil { + queryIngestersWithin = cfg.QueryIngestersWithin(tenantID) + } + shouldQueryIngesters := ShouldQueryIngesters(queryIngestersWithin, now, endMs) + if shouldQueryIngesters { + g.Go(func() error { + var err error + ingesterResults, err = d.ResourceAttributes(gCtx, startMs, endMs, nil, limit, ingesterFilters) + return err + }) + } + + // Query store-gateways with filters + var storeResults []*storepb.ResourceAttributesSeriesData + if blocksQueryable != nil && ShouldQueryBlockStore(cfg.QueryStoreAfter, now, startMs) { + g.Go(func() error { + var err error + storeResults, err = blocksQueryable.ResourceAttributes(gCtx, startMs, endMs, nil, limit, storeFilters) + return err + }) + } + + if err := g.Wait(); err != nil { + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusError, + Error: "error querying resource attributes: " + err.Error(), + }) + return + } + + ingesterConverted := convertIngesterResults(ingesterResults) + storeConverted := convertStoreResults(storeResults) + allSeries = mergeResourceAttributesSeries(ingesterConverted, storeConverted) + + if limit > 0 && int64(len(allSeries)) > limit { + allSeries = allSeries[:limit] + } + + util.WriteJSONResponse(w, ResourceAttributesResponse{ + Status: statusSuccess, + Data: &ResourceAttributesResponseData{ + Series: allSeries, + }, + }) + }) +} + +// parseResourceAttrFilters parses "key:value" filter strings into both ingester and store-gateway filter types. +func parseResourceAttrFilters(params []string) ([]*ingester_client.ResourceAttrFilter, []*storepb.ResourceAttrFilter, error) { + ingesterFilters := make([]*ingester_client.ResourceAttrFilter, 0, len(params)) + storeFilters := make([]*storepb.ResourceAttrFilter, 0, len(params)) + + for _, param := range params { + // Find the first colon separator + idx := -1 + for i, c := range param { + if c == ':' { + idx = i + break + } + } + if idx <= 0 { + return nil, nil, fmt.Errorf("invalid resource.attr format %q: expected key:value", param) + } + + key := param[:idx] + value := param[idx+1:] + + ingesterFilters = append(ingesterFilters, &ingester_client.ResourceAttrFilter{ + Key: key, + Value: value, + }) + storeFilters = append(storeFilters, &storepb.ResourceAttrFilter{ + Key: key, + Value: value, + }) + } + + return ingesterFilters, storeFilters, nil +} diff --git a/pkg/querier/resource_attributes_handler_test.go b/pkg/querier/resource_attributes_handler_test.go new file mode 100644 index 00000000000..606ced831c2 --- /dev/null +++ b/pkg/querier/resource_attributes_handler_test.go @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/grafana/dskit/user" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/grafana/mimir/pkg/ingester/client" + "github.com/grafana/mimir/pkg/mimirpb" +) + +func TestResourceAttributesHandler_RequiresMatcher(t *testing.T) { + distributor := &mockDistributor{} + handler := NewResourceAttributesHandler(distributor, nil, ResourceAttributesHandlerConfig{}) + + ctx := user.InjectOrgID(context.Background(), "test-tenant") + request, err := http.NewRequestWithContext(ctx, "GET", "/api/v1/resources", nil) + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, request) + + // Should return error for missing matcher + assert.Equal(t, http.StatusOK, recorder.Result().StatusCode) + + var resp ResourceAttributesResponse + err = json.Unmarshal(recorder.Body.Bytes(), &resp) + require.NoError(t, err) + assert.Equal(t, statusError, resp.Status) + assert.Contains(t, resp.Error, "at least one matcher is required") +} + +func TestResourceAttributesHandler_Success(t *testing.T) { + distributor := &mockDistributor{} + + // Mock the distributor to return test data + testItems := []*client.SeriesResourceAttributes{ + { + Labels: []mimirpb.LabelAdapter{ + {Name: "__name__", Value: "test_metric"}, + {Name: "job", Value: "test"}, + }, + Versions: []*client.ResourceVersionData{ + { + Identifying: map[string]string{"service.name": "my-service"}, + Descriptive: map[string]string{"service.version": "1.0.0"}, + MinTimeMs: 1000, + MaxTimeMs: 2000, + }, + }, + }, + } + + distributor.On("ResourceAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(testItems, nil) + + handler := NewResourceAttributesHandler(distributor, nil, ResourceAttributesHandlerConfig{}) + + ctx := user.InjectOrgID(context.Background(), "test-tenant") + request, err := http.NewRequestWithContext(ctx, "GET", "/api/v1/resources?match[]={__name__=~\".+\"}", nil) + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, request) + + // Should return success + assert.Equal(t, http.StatusOK, recorder.Result().StatusCode) + + responseBody, err := io.ReadAll(recorder.Result().Body) + require.NoError(t, err) + + var resp ResourceAttributesResponse + err = json.Unmarshal(responseBody, &resp) + require.NoError(t, err) + + assert.Equal(t, statusSuccess, resp.Status) + assert.Len(t, resp.Data.Series, 1) + assert.Equal(t, "test_metric", resp.Data.Series[0].Labels["__name__"]) + assert.Len(t, resp.Data.Series[0].Versions, 1) + assert.Equal(t, "my-service", resp.Data.Series[0].Versions[0].Identifying["service.name"]) +} + +func TestResourceAttributesHandler_RequiresTenant(t *testing.T) { + distributor := &mockDistributor{} + handler := NewResourceAttributesHandler(distributor, nil, ResourceAttributesHandlerConfig{}) + + // No tenant ID in context + request, err := http.NewRequest("GET", "/api/v1/resources?match[]={__name__=~\".+\"}", nil) + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, request) + + // Should return 400 for missing tenant + assert.Equal(t, http.StatusBadRequest, recorder.Result().StatusCode) +} + +func TestMergeResourceAttributesSeries(t *testing.T) { + // Test case: Merging series from ingesters and store-gateways + ingesterSeries := []*SeriesResourceAttributesData{ + { + Labels: map[string]string{"__name__": "metric1", "job": "test"}, + Versions: []*ResourceVersionData{ + {MinTimeMs: 5000, MaxTimeMs: 6000, Identifying: map[string]string{"service.name": "svc1"}}, + }, + }, + { + Labels: map[string]string{"__name__": "metric2", "job": "test"}, + Versions: []*ResourceVersionData{ + {MinTimeMs: 7000, MaxTimeMs: 8000, Identifying: map[string]string{"service.name": "svc2"}}, + }, + }, + } + + storeSeries := []*SeriesResourceAttributesData{ + { + Labels: map[string]string{"__name__": "metric1", "job": "test"}, // Same as ingester + Versions: []*ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000, Identifying: map[string]string{"service.name": "svc1-old"}}, + {MinTimeMs: 3000, MaxTimeMs: 4000, Identifying: map[string]string{"service.name": "svc1"}}, + }, + }, + { + Labels: map[string]string{"__name__": "metric3", "job": "test"}, // Only in store + Versions: []*ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000, Identifying: map[string]string{"service.name": "svc3"}}, + }, + }, + } + + result := mergeResourceAttributesSeries(ingesterSeries, storeSeries) + + // Should have 3 series (metric1, metric2, metric3) + assert.Len(t, result, 3) + + // Find metric1 - should have merged versions from both sources + var metric1 *SeriesResourceAttributesData + for _, s := range result { + if s.Labels["__name__"] == "metric1" { + metric1 = s + break + } + } + require.NotNil(t, metric1) + // Should have 3 versions (2 from store + 1 from ingester, sorted by time) + assert.Len(t, metric1.Versions, 3) + assert.Equal(t, int64(1000), metric1.Versions[0].MinTimeMs) + assert.Equal(t, int64(3000), metric1.Versions[1].MinTimeMs) + assert.Equal(t, int64(5000), metric1.Versions[2].MinTimeMs) +} + +func TestMergeResourceVersions(t *testing.T) { + // Test deduplication of overlapping time ranges + a := []*ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000, Identifying: map[string]string{"a": "1"}}, + {MinTimeMs: 3000, MaxTimeMs: 4000, Identifying: map[string]string{"a": "2"}}, + } + + b := []*ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000, Identifying: map[string]string{"b": "1"}}, // Same time range as a[0] + {MinTimeMs: 5000, MaxTimeMs: 6000, Identifying: map[string]string{"b": "2"}}, + } + + result := mergeResourceVersions(a, b) + + // Should have 3 versions (deduplicating the ones with same time range) + assert.Len(t, result, 3) + assert.Equal(t, int64(1000), result[0].MinTimeMs) + assert.Equal(t, int64(3000), result[1].MinTimeMs) + assert.Equal(t, int64(5000), result[2].MinTimeMs) + + // The first one should be from 'a' (ingesters take precedence) + assert.Equal(t, "1", result[0].Identifying["a"]) +} + +func TestLabelsMapToKey(t *testing.T) { + labels1 := map[string]string{"__name__": "metric", "job": "test", "instance": "localhost"} + labels2 := map[string]string{"instance": "localhost", "__name__": "metric", "job": "test"} + + // Same labels in different order should produce the same key + assert.Equal(t, labelsMapToKey(labels1), labelsMapToKey(labels2)) + + // Different labels should produce different keys + labels3 := map[string]string{"__name__": "metric", "job": "other"} + assert.NotEqual(t, labelsMapToKey(labels1), labelsMapToKey(labels3)) +} diff --git a/pkg/querier/resource_querier_cache.go b/pkg/querier/resource_querier_cache.go new file mode 100644 index 00000000000..fbbf291c9ea --- /dev/null +++ b/pkg/querier/resource_querier_cache.go @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "context" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" + "github.com/prometheus/prometheus/util/annotations" + + ingester_client "github.com/grafana/mimir/pkg/ingester/client" + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/storegateway/storepb" +) + +// ResourceAttributesFetcher abstracts the RPC call to fetch resource attributes. +type ResourceAttributesFetcher interface { + // FetchResourceAttributes fetches resource attributes for the given time range. + // Returns nil, nil if no resource attributes are available. + FetchResourceAttributes(ctx context.Context, minT, maxT int64) ([]*ResourceAttributesData, error) +} + +// ResourceAttributesData represents resource attributes for a single series. +type ResourceAttributesData struct { + LabelsHash uint64 + Versions []*seriesmetadata.ResourceVersion +} + +// resourceQuerierCache wraps a querier to provide cached ResourceQuerier functionality. +// It pre-fetches resource attributes on first access and serves GetResourceAt from cache. +type resourceQuerierCache struct { + storage.Querier + + // Source for fetching resource attributes + fetcher ResourceAttributesFetcher + + // Time range for this querier + minT, maxT int64 + + // Logger for debugging + logger log.Logger + + // Stored context from Select() for use in GetResourceAt() + // This is needed because the ResourceQuerier interface doesn't pass context + storedCtx context.Context + storedCtxMu sync.Mutex + + // Cache state + cache map[uint64]*seriesmetadata.VersionedResource + uniqueAttrNames map[string]struct{} + cacheInitMu sync.Mutex + cacheInitialized bool +} + +// NewResourceQuerierCache creates a new resourceQuerierCache wrapping the given querier. +func NewResourceQuerierCache( + querier storage.Querier, + fetcher ResourceAttributesFetcher, + minT, maxT int64, + logger log.Logger, +) storage.Querier { + if logger == nil { + logger = log.NewNopLogger() + } + return &resourceQuerierCache{ + Querier: querier, + fetcher: fetcher, + minT: minT, + maxT: maxT, + logger: logger, + } +} + +// GetResourceAt implements storage.ResourceQuerier. +// It returns the resource version active at the given timestamp for the series. +func (q *resourceQuerierCache) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + if q.fetcher == nil { + return nil, false + } + + // Use stored context from Select() call, fall back to Background if not available + q.storedCtxMu.Lock() + ctx := q.storedCtx + q.storedCtxMu.Unlock() + if ctx == nil { + level.Debug(q.logger).Log("msg", "GetResourceAt: no stored context available") + ctx = context.Background() + } + + if err := q.ensureCacheInitialized(ctx); err != nil { + // Log warning but don't fail the query - graceful degradation + level.Warn(q.logger).Log("msg", "failed to initialize resource cache", "err", err) + return nil, false + } + + vr, ok := q.cache[labelsHash] + if !ok { + return nil, false + } + + rv, ok := vr.VersionAt(timestamp) + return rv, ok +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier. +// It iterates over all unique resource attribute names. +func (q *resourceQuerierCache) IterUniqueAttributeNames(fn func(name string)) error { + if q.fetcher == nil { + return nil + } + + // Use stored context from Select() call, fall back to Background if not available + q.storedCtxMu.Lock() + ctx := q.storedCtx + q.storedCtxMu.Unlock() + if ctx == nil { + ctx = context.Background() + } + + if err := q.ensureCacheInitialized(ctx); err != nil { + return err + } + + for name := range q.uniqueAttrNames { + fn(name) + } + return nil +} + +// ensureCacheInitialized pre-fetches resource attributes on first call (lazy loading). +func (q *resourceQuerierCache) ensureCacheInitialized(ctx context.Context) error { + q.cacheInitMu.Lock() + defer q.cacheInitMu.Unlock() + + if q.cacheInitialized { + return nil + } + + if err := q.initializeCache(ctx); err != nil { + return err + } + q.cacheInitialized = true + return nil +} + +// initializeCache performs the actual pre-fetch of resource attributes. +func (q *resourceQuerierCache) initializeCache(ctx context.Context) error { + // Fetch all resource attributes for this time range + data, err := q.fetcher.FetchResourceAttributes(ctx, q.minT, q.maxT) + if err != nil { + return err + } + + // Populate cache + q.cache = make(map[uint64]*seriesmetadata.VersionedResource, len(data)) + q.uniqueAttrNames = make(map[string]struct{}) + + for _, item := range data { + // Merge versions if the same series appears from multiple sources + // (e.g., different store-gateways serving different blocks). + if existing, ok := q.cache[item.LabelsHash]; ok { + existing.Versions = mergeCacheVersions(existing.Versions, item.Versions) + } else { + q.cache[item.LabelsHash] = &seriesmetadata.VersionedResource{ + Versions: item.Versions, + } + } + + // Collect unique attribute names + for _, version := range item.Versions { + for name := range version.Identifying { + q.uniqueAttrNames[name] = struct{}{} + } + for name := range version.Descriptive { + q.uniqueAttrNames[name] = struct{}{} + } + } + } + + level.Debug(q.logger).Log( + "msg", "initialized resource cache", + "series_count", len(q.cache), + "unique_attrs", len(q.uniqueAttrNames), + ) + + return nil +} + +// Select implements storage.Querier and captures the context for later use in GetResourceAt. +func (q *resourceQuerierCache) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + // Store the context for later use in GetResourceAt + q.storedCtxMu.Lock() + if q.storedCtx == nil { + q.storedCtx = ctx + } + q.storedCtxMu.Unlock() + + return q.Querier.Select(ctx, sortSeries, hints, matchers...) +} + +// LabelValues implements storage.Querier and captures the context for later use. +func (q *resourceQuerierCache) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + // Store the context for later use in GetResourceAt + q.storedCtxMu.Lock() + if q.storedCtx == nil { + q.storedCtx = ctx + } + q.storedCtxMu.Unlock() + + return q.Querier.LabelValues(ctx, name, hints, matchers...) +} + +// LabelNames implements storage.Querier and captures the context for later use. +func (q *resourceQuerierCache) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + // Store the context for later use in GetResourceAt + q.storedCtxMu.Lock() + if q.storedCtx == nil { + q.storedCtx = ctx + } + q.storedCtxMu.Unlock() + + return q.Querier.LabelNames(ctx, hints, matchers...) +} + +// Close releases resources. +func (q *resourceQuerierCache) Close() error { + return q.Querier.Close() +} + +// distributorResourceFetcher implements ResourceAttributesFetcher for distributorQuerier. +type distributorResourceFetcher struct { + distributor Distributor +} + +// FetchResourceAttributes fetches resource attributes from ingesters via the distributor. +func (f *distributorResourceFetcher) FetchResourceAttributes(ctx context.Context, minT, maxT int64) ([]*ResourceAttributesData, error) { + // Use a matcher that matches all series (required for getPostings to return results) + allSeriesMatcher, _ := labels.NewMatcher(labels.MatchNotEqual, model.MetricNameLabel, "") + results, err := f.distributor.ResourceAttributes(ctx, minT, maxT, []*labels.Matcher{allSeriesMatcher}, 0, nil) + if err != nil { + return nil, err + } + + data := make([]*ResourceAttributesData, 0, len(results)) + for _, item := range results { + labelsHash := convertIngesterLabelsToHash(item.Labels) + versions := convertIngesterVersions(item.Versions) + data = append(data, &ResourceAttributesData{ + LabelsHash: labelsHash, + Versions: versions, + }) + } + return data, nil +} + +// blocksResourceFetcher implements ResourceAttributesFetcher for blocksStoreQuerier. +type blocksResourceFetcher struct { + blocksQueryable ResourceAttributesBlocksQueryable +} + +// FetchResourceAttributes fetches resource attributes from store-gateways. +func (f *blocksResourceFetcher) FetchResourceAttributes(ctx context.Context, minT, maxT int64) ([]*ResourceAttributesData, error) { + // Use a matcher that matches all series (required for getPostings to return results) + allSeriesMatcher, _ := labels.NewMatcher(labels.MatchNotEqual, model.MetricNameLabel, "") + results, err := f.blocksQueryable.ResourceAttributes(ctx, minT, maxT, []*labels.Matcher{allSeriesMatcher}, 0, nil) + if err != nil { + return nil, err + } + + data := make([]*ResourceAttributesData, 0, len(results)) + for _, item := range results { + labelsHash := convertStoreLabelsToHash(item.Labels) + versions := convertStoreVersions(item.Versions) + data = append(data, &ResourceAttributesData{ + LabelsHash: labelsHash, + Versions: versions, + }) + } + return data, nil +} + +// convertIngesterLabelsToHash converts ingester labels to a hash. +// Uses StableHash to match the hash used when storing resource attributes in the ingester. +func convertIngesterLabelsToHash(lbls []mimirpb.LabelAdapter) uint64 { + return labels.StableHash(mimirpb.FromLabelAdaptersToLabels(lbls)) +} + +// convertIngesterVersions converts ingester version data to seriesmetadata.ResourceVersion. +func convertIngesterVersions(versions []*ingester_client.ResourceVersionData) []*seriesmetadata.ResourceVersion { + result := make([]*seriesmetadata.ResourceVersion, 0, len(versions)) + for _, v := range versions { + rv := &seriesmetadata.ResourceVersion{ + Identifying: v.Identifying, + Descriptive: v.Descriptive, + MinTime: v.MinTimeMs, + MaxTime: v.MaxTimeMs, + } + + // Convert entities + if len(v.Entities) > 0 { + rv.Entities = make([]*seriesmetadata.Entity, 0, len(v.Entities)) + for _, e := range v.Entities { + rv.Entities = append(rv.Entities, &seriesmetadata.Entity{ + Type: e.Type, + ID: e.Id, + Description: e.Description, + }) + } + } + + result = append(result, rv) + } + return result +} + +// mergeCacheVersions merges two slices of resource versions, deduplicating by time range. +func mergeCacheVersions(a, b []*seriesmetadata.ResourceVersion) []*seriesmetadata.ResourceVersion { + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + type versionKey struct { + minTime int64 + maxTime int64 + } + + seen := make(map[versionKey]bool, len(a)) + result := make([]*seriesmetadata.ResourceVersion, 0, len(a)+len(b)) + + for _, v := range a { + key := versionKey{v.MinTime, v.MaxTime} + if !seen[key] { + seen[key] = true + result = append(result, v) + } + } + + for _, v := range b { + key := versionKey{v.MinTime, v.MaxTime} + if !seen[key] { + seen[key] = true + result = append(result, v) + } + } + + return result +} + +// convertStoreLabelsToHash converts store-gateway labels (map) to a hash. +// Uses StableHash to match the hash used when storing resource attributes. +func convertStoreLabelsToHash(lbls map[string]string) uint64 { + return labels.StableHash(labels.FromMap(lbls)) +} + +// convertStoreVersions converts store-gateway version data to seriesmetadata.ResourceVersion. +func convertStoreVersions(versions []*storepb.ResourceVersionData) []*seriesmetadata.ResourceVersion { + result := make([]*seriesmetadata.ResourceVersion, 0, len(versions)) + for _, v := range versions { + rv := &seriesmetadata.ResourceVersion{ + Identifying: v.Identifying, + Descriptive: v.Descriptive, + MinTime: v.MinTimeMs, + MaxTime: v.MaxTimeMs, + } + + // Convert entities + if len(v.Entities) > 0 { + rv.Entities = make([]*seriesmetadata.Entity, 0, len(v.Entities)) + for _, e := range v.Entities { + rv.Entities = append(rv.Entities, &seriesmetadata.Entity{ + Type: e.Type, + ID: e.Id, + Description: e.Description, + }) + } + } + + result = append(result, rv) + } + return result +} diff --git a/pkg/querier/resource_querier_cache_test.go b/pkg/querier/resource_querier_cache_test.go new file mode 100644 index 00000000000..9232804dfd7 --- /dev/null +++ b/pkg/querier/resource_querier_cache_test.go @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "context" + "errors" + "testing" + + "github.com/go-kit/log" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" + "github.com/prometheus/prometheus/util/annotations" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// noopQuerier is a minimal implementation of storage.Querier for testing. +type noopQuerier struct{} + +func (m *noopQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return storage.EmptySeriesSet() +} + +func (m *noopQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return nil, nil, nil +} + +func (m *noopQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return nil, nil, nil +} + +func (m *noopQuerier) Close() error { + return nil +} + +// mockResourceFetcher implements ResourceAttributesFetcher for testing. +type mockResourceFetcher struct { + data []*ResourceAttributesData + err error +} + +func (m *mockResourceFetcher) FetchResourceAttributes(ctx context.Context, minT, maxT int64) ([]*ResourceAttributesData, error) { + return m.data, m.err +} + +func TestResourceQuerierCache_GetResourceAt(t *testing.T) { + testCases := []struct { + name string + fetcherData []*ResourceAttributesData + fetcherErr error + labelsHash uint64 + timestamp int64 + expectFound bool + expectVersion *seriesmetadata.ResourceVersion + }{ + { + name: "no fetcher returns false", + labelsHash: 12345, + timestamp: 1000, + expectFound: false, + }, + { + name: "fetcher error returns false", + fetcherErr: errors.New("fetch error"), + labelsHash: 12345, + timestamp: 1000, + expectFound: false, + }, + { + name: "cache miss returns false", + fetcherData: []*ResourceAttributesData{ + { + LabelsHash: 11111, + Versions: []*seriesmetadata.ResourceVersion{ + {MinTime: 1000, MaxTime: 2000, Identifying: map[string]string{"service.name": "test"}}, + }, + }, + }, + labelsHash: 99999, // Different hash + timestamp: 1500, + expectFound: false, + }, + { + name: "cache hit returns version", + fetcherData: []*ResourceAttributesData{ + { + LabelsHash: 12345, + Versions: []*seriesmetadata.ResourceVersion{ + {MinTime: 1000, MaxTime: 2000, Identifying: map[string]string{"service.name": "test"}}, + }, + }, + }, + labelsHash: 12345, + timestamp: 1500, + expectFound: true, + expectVersion: &seriesmetadata.ResourceVersion{ + MinTime: 1000, + MaxTime: 2000, + Identifying: map[string]string{"service.name": "test"}, + }, + }, + { + name: "timestamp before MinTime returns not found", + fetcherData: []*ResourceAttributesData{ + { + LabelsHash: 12345, + Versions: []*seriesmetadata.ResourceVersion{ + {MinTime: 1000, MaxTime: 2000, Identifying: map[string]string{"service.name": "test"}}, + }, + }, + }, + labelsHash: 12345, + timestamp: 500, // Before MinTime - VersionAt returns nil + expectFound: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var fetcher ResourceAttributesFetcher + if tc.fetcherData != nil || tc.fetcherErr != nil { + fetcher = &mockResourceFetcher{data: tc.fetcherData, err: tc.fetcherErr} + } + + cache := NewResourceQuerierCache( + &noopQuerier{}, + fetcher, + 0, // minT + 10000, // maxT + log.NewNopLogger(), + ) + + rv, found := cache.(*resourceQuerierCache).GetResourceAt(tc.labelsHash, tc.timestamp) + + assert.Equal(t, tc.expectFound, found) + if tc.expectFound { + require.NotNil(t, rv) + assert.Equal(t, tc.expectVersion.MinTime, rv.MinTime) + assert.Equal(t, tc.expectVersion.MaxTime, rv.MaxTime) + assert.Equal(t, tc.expectVersion.Identifying, rv.Identifying) + } + }) + } +} + +func TestResourceQuerierCache_IterUniqueAttributeNames(t *testing.T) { + fetcher := &mockResourceFetcher{ + data: []*ResourceAttributesData{ + { + LabelsHash: 12345, + Versions: []*seriesmetadata.ResourceVersion{ + { + Identifying: map[string]string{"service.name": "test", "service.namespace": "prod"}, + Descriptive: map[string]string{"service.version": "1.0.0"}, + }, + }, + }, + { + LabelsHash: 67890, + Versions: []*seriesmetadata.ResourceVersion{ + { + Identifying: map[string]string{"service.name": "other"}, + Descriptive: map[string]string{"host.name": "localhost"}, + }, + }, + }, + }, + } + + cache := NewResourceQuerierCache( + &noopQuerier{}, + fetcher, + 0, + 10000, + log.NewNopLogger(), + ) + + var names []string + err := cache.(*resourceQuerierCache).IterUniqueAttributeNames(func(name string) { + names = append(names, name) + }) + + require.NoError(t, err) + assert.ElementsMatch(t, []string{ + "service.name", + "service.namespace", + "service.version", + "host.name", + }, names) +} + +func TestResourceQuerierCache_Close(t *testing.T) { + cache := NewResourceQuerierCache( + &noopQuerier{}, + nil, + 0, + 10000, + log.NewNopLogger(), + ) + + err := cache.Close() + assert.NoError(t, err) +} diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index b8c74956c8a..c425b87e4ed 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -75,8 +75,7 @@ func TestStatsRenderer(t *testing.T) { api := v1.NewAPI( engine, storage, - nil, - nil, + nil, nil, // No remote write support (Appendable, AppendableV2). nil, func(context.Context) v1.ScrapePoolsRetriever { return &DummyTargetRetriever{} }, func(context.Context) v1.TargetRetriever { return &DummyTargetRetriever{} }, @@ -109,8 +108,9 @@ func TestStatsRenderer(t *testing.T) { 5*time.Minute, false, false, - nil, - nil, + nil, // overrideErrorCode + false, // enableNativeMetadata + nil, // featureRegistry v1.OpenAPIOptions{}, nil, ) diff --git a/pkg/querier/store_gateway_client_test.go b/pkg/querier/store_gateway_client_test.go index 60ef392f166..5d1f88f1127 100644 --- a/pkg/querier/store_gateway_client_test.go +++ b/pkg/querier/store_gateway_client_test.go @@ -109,3 +109,8 @@ func (m *mockStoreGatewayServer) LabelValues(ctx context.Context, req *storepb.L return nil, nil } + +func (m *mockStoreGatewayServer) ResourceAttributes(req *storepb.ResourceAttributesRequest, srv storegatewaypb.StoreGateway_ResourceAttributesServer) error { + // Not implemented for tests + return nil +} diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index d8e5611c53d..53e86dc4886 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -103,6 +103,10 @@ func (a *PusherAppender) AppendHistogramSTZeroSample(storage.SeriesRef, labels.L return 0, errors.New("ST zero samples are unsupported") } +func (a *PusherAppender) UpdateResource(_ storage.SeriesRef, _ labels.Labels, _, _ map[string]string, _ []storage.EntityData, _ int64) (storage.SeriesRef, error) { + return 0, errors.New("resource updates are unsupported") +} + func (a *PusherAppender) Commit() error { a.totalWrites.WithLabelValues(a.userID).Inc() @@ -192,6 +196,10 @@ func (a *NoopAppender) AppendHistogramSTZeroSample(storage.SeriesRef, labels.Lab return 0, errors.New("ST zero samples are unsupported") } +func (a *NoopAppender) UpdateResource(_ storage.SeriesRef, _ labels.Labels, _, _ map[string]string, _ []storage.EntityData, _ int64) (storage.SeriesRef, error) { + return 0, errors.New("resource updates are unsupported") +} + func (a *NoopAppender) Commit() error { return nil } diff --git a/pkg/storage/lazyquery/lazyquery.go b/pkg/storage/lazyquery/lazyquery.go index f3d5d1cdfe2..2c37ce7f755 100644 --- a/pkg/storage/lazyquery/lazyquery.go +++ b/pkg/storage/lazyquery/lazyquery.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -85,6 +86,22 @@ func (l LazyQuerier) Close() error { return l.next.Close() } +// GetResourceAt implements storage.ResourceQuerier by delegating to the underlying querier if it supports it. +func (l LazyQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + if rq, ok := l.next.(storage.ResourceQuerier); ok { + return rq.GetResourceAt(labelsHash, timestamp) + } + return nil, false +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier by delegating to the underlying querier if it supports it. +func (l LazyQuerier) IterUniqueAttributeNames(fn func(name string)) error { + if rq, ok := l.next.(storage.ResourceQuerier); ok { + return rq.IterUniqueAttributeNames(fn) + } + return nil +} + type lazySeriesSet struct { next storage.SeriesSet future chan storage.SeriesSet diff --git a/pkg/storage/tsdb/block/block.go b/pkg/storage/tsdb/block/block.go index 33330b0af4a..eb381e2ec91 100644 --- a/pkg/storage/tsdb/block/block.go +++ b/pkg/storage/tsdb/block/block.go @@ -37,6 +37,8 @@ const ( IndexHeaderFilename = "index-header" // SparseIndexHeaderFilename is the canonical name for sparse index header file that stores abbreviated slices of index-header. SparseIndexHeaderFilename = "sparse-index-header" + // SeriesMetadataFilename is the known parquet filename for series metadata (metric metadata and resource attributes). + SeriesMetadataFilename = "series_metadata.parquet" // ChunksDirname is the known dir name for chunks with compressed samples. ChunksDirname = "chunks" @@ -50,6 +52,7 @@ const ( FileTypeMeta FileType = "meta" FileTypeIndex FileType = "index" FileTypeSparseIndexHeader FileType = "sparse_index_header" + FileTypeSeriesMetadata FileType = "series_metadata" FileTypeChunks FileType = "chunks" FileTypeUnknown FileType = "unknown" ) @@ -179,6 +182,25 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, blockDi level.Debug(logger).Log("msg", "sparse index header entry not found, skipping upload", "block", id.String()) } + hasSeriesMetadata := false + for _, f := range meta.Thanos.Files { + if f.RelPath == SeriesMetadataFilename { + hasSeriesMetadata = true + break + } + } + + if hasSeriesMetadata { + eg.Go(func() (err error) { + if err := objstore.UploadFile(uctx, logger, bkt, filepath.Join(blockDir, SeriesMetadataFilename), path.Join(id.String(), SeriesMetadataFilename)); err != nil { + return &UploadError{err, FileTypeSeriesMetadata} + } + return nil + }) + } else { + level.Debug(logger).Log("msg", "series metadata entry not found, skipping upload", "block", id.String()) + } + if err := eg.Wait(); err != nil { return nil, cleanUp(logger, bkt, id, err) } @@ -385,6 +407,17 @@ func GatherFileStats(blockDir string) (res []File, _ error) { res = append(res, File{RelPath: sparseHeaderInfo.Name(), SizeBytes: sparseHeaderInfo.Size()}) } + // series metadata files are optional, they contain metric metadata and resource attributes + // not adding entry if file does not exist, Upload of series metadata is skipped in this case + seriesMetadataInfo, err := os.Stat(filepath.Join(blockDir, SeriesMetadataFilename)) + if err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, SeriesMetadataFilename)) + } + } else { + res = append(res, File{RelPath: seriesMetadataInfo.Name(), SizeBytes: seriesMetadataInfo.Size()}) + } + metaFile, err := os.Stat(filepath.Join(blockDir, MetaFilename)) if err != nil { return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, MetaFilename)) diff --git a/pkg/storage/tsdb/block/block_test.go b/pkg/storage/tsdb/block/block_test.go index a825ee242d5..449e9cd11a2 100644 --- a/pkg/storage/tsdb/block/block_test.go +++ b/pkg/storage/tsdb/block/block_test.go @@ -101,7 +101,7 @@ func TestDelete(t *testing.T) { require.NoError(t, err) _, err = Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.String()), nil) require.NoError(t, err) - require.Equal(t, 3, len(bkt.Objects())) + require.Equal(t, 4, len(bkt.Objects())) // chunks/000001, index, meta.json, series_metadata.parquet markedForDeletion := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) require.NoError(t, MarkForDeletion(ctx, log.NewNopLogger(), bkt, b1, "", markedForDeletion)) @@ -116,7 +116,7 @@ func TestDelete(t *testing.T) { require.NoError(t, err) _, err = Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.String()), nil) require.NoError(t, err) - require.Equal(t, 3, len(bkt.Objects())) + require.Equal(t, 4, len(bkt.Objects())) // chunks/000001, index, meta.json, series_metadata.parquet // Remove meta.json and check if delete can delete it. require.NoError(t, bkt.Delete(ctx, path.Join(b2.String(), MetaFilename))) @@ -258,11 +258,11 @@ func TestUpload(t *testing.T) { require.NoError(t, err) chunkFileSize := getFileSize(t, filepath.Join(tmpDir, b2.String(), ChunksDirname, "000001")) - require.Equal(t, 6, len(bkt.Objects())) // 3 from b1, 3 from b2 + require.Equal(t, 7, len(bkt.Objects())) // 3 from b1 (manually created), 4 from b2 (CreateBlock includes series_metadata.parquet) require.Equal(t, chunkFileSize, int64(len(bkt.Objects()[path.Join(b2.String(), ChunksDirname, "000001")]))) indexFileSize := getFileSize(t, path.Join(tmpDir, b2.String(), IndexFilename)) require.Equal(t, indexFileSize, int64(len(bkt.Objects()[path.Join(b2.String(), IndexFilename)]))) - require.Equal(t, 603, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)])) + require.Greater(t, len(bkt.Objects()[path.Join(b2.String(), MetaFilename)]), 0) // meta.json size varies based on file entries origMeta, err := ReadMetaFromDir(path.Join(tmpDir, b2.String())) require.NoError(t, err) @@ -502,9 +502,10 @@ func TestUploadCleanup(t *testing.T) { require.ErrorAs(t, uploadErr, uerr) // If upload of meta.json fails, nothing is cleaned up. - require.Equal(t, 3, len(bkt.Objects())) + require.Equal(t, 4, len(bkt.Objects())) // chunks/000001, index, series_metadata.parquet uploaded before meta.json require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), ChunksDirname, "000001")]), 0) require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), IndexFilename)]), 0) + require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), SeriesMetadataFilename)]), 0) require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), MetaFilename)]), 0) require.Equal(t, 0, len(bkt.Objects()[path.Join(DebugMetas, fmt.Sprintf("%s.json", b1.String()))])) } diff --git a/pkg/storage/tsdb/bucketindex/index.go b/pkg/storage/tsdb/bucketindex/index.go index e8ccced0b8e..7d008518622 100644 --- a/pkg/storage/tsdb/bucketindex/index.go +++ b/pkg/storage/tsdb/bucketindex/index.go @@ -115,6 +115,10 @@ type Block struct { // Labels contains the external labels from the block's metadata. Labels map[string]string `json:"labels,omitempty"` + + // SeriesMetadata contains optional stats about the OTel series metadata Parquet file. + // Nil when no metadata is present. + SeriesMetadata *tsdb.BlockSeriesMetadata `json:"series_metadata,omitempty"` } // Within returns whether the block contains samples within the provided range. @@ -155,6 +159,7 @@ func (m *Block) ThanosMeta() *block.Meta { Level: m.CompactionLevel, Hints: compactionHints, }, + SeriesMetadata: m.SeriesMetadata, }, Thanos: block.ThanosMeta{ Version: block.ThanosVersion1, @@ -201,6 +206,7 @@ func BlockFromThanosMeta(meta block.Meta) *Block { CompactionLevel: meta.Compaction.Level, OutOfOrder: meta.Compaction.FromOutOfOrder(), Labels: maps.Clone(meta.Thanos.Labels), + SeriesMetadata: meta.SeriesMetadata, } } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 9a68f497930..3c7902c0584 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -11,6 +11,7 @@ import ( "context" "fmt" "io" + "log/slog" "math" "os" "path" @@ -34,6 +35,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/hashcache" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/thanos-io/objstore" "go.opentelemetry.io/otel/attribute" "go.uber.org/atomic" @@ -1588,6 +1590,513 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR }, nil } +// resourceAttributesMaxSizeBytes is the max size per streaming response batch. +const resourceAttributesMaxSizeBytes = 1 * 1024 * 1024 + +// ResourceAttributes returns OTel resource attributes for series matching the matchers. +// It reads the series_metadata.parquet files from compacted blocks in object storage. +func (s *BucketStore) ResourceAttributes(req *storepb.ResourceAttributesRequest, srv storegatewaypb.StoreGateway_ResourceAttributesServer) error { + ctx := srv.Context() + spanLog := spanlogger.FromContext(ctx, s.logger) + + matchers, err := storepb.MatchersToPromMatchers(req.Matchers...) + if err != nil { + return status.Error(codes.InvalidArgument, errors.Wrap(err, "translate matchers").Error()) + } + + g, gctx := errgroup.WithContext(ctx) + + var resultsMtx sync.Mutex + var allItems []*storepb.ResourceAttributesSeriesData + limit := req.Limit + + resourceAttrFilters := req.ResourceAttrFilters + + // Iterate over blocks matching the time range + s.blockSet.filter(req.Start, req.End, nil, func(b *bucketBlock) { + g.Go(func() error { + var items []*storepb.ResourceAttributesSeriesData + var err error + if len(resourceAttrFilters) > 0 { + items, err = s.blockResourceAttributesByFilter(gctx, b, resourceAttrFilters, req.Start, req.End, limit) + } else { + items, err = s.blockResourceAttributes(gctx, b, matchers, req.Start, req.End, limit) + } + if err != nil { + return errors.Wrapf(err, "block %s", b.meta.ULID) + } + + if len(items) > 0 { + resultsMtx.Lock() + allItems = append(allItems, items...) + resultsMtx.Unlock() + } + return nil + }) + }) + + if err := g.Wait(); err != nil { + if errors.Is(err, context.Canceled) { + return status.Error(codes.Canceled, err.Error()) + } + return status.Error(codes.Internal, err.Error()) + } + + spanLog.DebugLog("msg", "collected resource attributes from blocks", "count", len(allItems)) + + // Apply limit and deduplicate by labels hash (keeping all versions) + deduped := deduplicateResourceAttributeItems(allItems) + + // Apply limit after deduplication + if limit > 0 && int64(len(deduped)) > limit { + deduped = deduped[:limit] + } + + // Send results in batches + return sendResourceAttributesBatched(srv, deduped, resourceAttributesMaxSizeBytes) +} + +// bucketReaderAt implements io.ReaderAt over an object storage bucket, +// translating ReadAt calls into GetRange requests. +type bucketReaderAt struct { + ctx context.Context + bkt objstore.BucketReader + name string +} + +func (r *bucketReaderAt) ReadAt(p []byte, off int64) (int, error) { + rc, err := r.bkt.GetRange(r.ctx, r.name, off, int64(len(p))) + if err != nil { + return 0, err + } + defer rc.Close() + return io.ReadFull(rc, p) +} + +// blockResourceAttributes reads resource attributes for matching series from a single block. +func (s *BucketStore) blockResourceAttributes(ctx context.Context, b *bucketBlock, matchers []*labels.Matcher, startMs, endMs int64, limit int64) ([]*storepb.ResourceAttributesSeriesData, error) { + // Read the series metadata parquet file from object storage + parquetPath := path.Join(b.meta.ULID.String(), seriesmetadata.SeriesMetadataFilename) + + // Get file size via Attributes (needed by parquet reader for footer). + attrs, err := b.bkt.Attributes(ctx, parquetPath) + if err != nil { + if b.bkt.IsObjNotFoundErr(err) { + // No series metadata file - this is expected for older blocks + return nil, nil + } + return nil, errors.Wrap(err, "get series metadata attributes") + } + if attrs.Size == 0 { + return nil, nil + } + + // Stream parquet via range reads — avoids loading entire file into memory. + readerAt := &bucketReaderAt{ctx: ctx, bkt: b.bkt, name: parquetPath} + + // Parse the parquet file + resourcesByHash, err := parseResourceAttributesParquet(ctx, readerAt, attrs.Size) + if err != nil { + return nil, errors.Wrap(err, "parse parquet file") + } + + if len(resourcesByHash) == 0 { + return nil, nil + } + + // Get matching series from the block index using the series set iterator pattern + indexr := b.indexReader(s.postingsStrategy) + defer runutil.CloseWithLogOnErr(b.logger, indexr, "close block index reader") + + stats := newSafeQueryStats() + postings, pendingMatchers, err := indexr.ExpandedPostings(ctx, matchers, stats) + if err != nil { + return nil, errors.Wrap(err, "expanded postings") + } + + // Build series iterator to get labels + var iterator iterator[seriesChunkRefsSet] + iterator = newLoadingSeriesChunkRefsSetIterator( + ctx, + newPostingsSetsIterator(postings, s.maxSeriesPerBatch), + indexr, + b.indexCache, + stats, + b.meta, + nil, // No shard selector + nil, // No series hash cache + noChunkRefs, + b.meta.MinTime, + b.meta.MaxTime, + b.userID, + b.logger, + ) + if len(pendingMatchers) > 0 { + iterator = newFilteringSeriesChunkRefsSetIterator(pendingMatchers, iterator, stats) + } + seriesSet := newSeriesSetWithoutChunks(ctx, iterator, stats) + + // For each matching series, look up its resource attributes by labels hash + var result []*storepb.ResourceAttributesSeriesData + count := int64(0) + + for seriesSet.Next() { + if limit > 0 && count >= limit { + break + } + + lbls, _ := seriesSet.At() + labelsHash := labels.StableHash(lbls) + + versions, ok := resourcesByHash[labelsHash] + if !ok || len(versions) == 0 { + continue + } + + // Filter versions by time range + var filteredVersions []*storepb.ResourceVersionData + for _, ver := range versions { + // Check if version overlaps with requested time range + if endMs > 0 && ver.MinTimeMs > endMs { + continue + } + if startMs > 0 && ver.MaxTimeMs < startMs { + continue + } + filteredVersions = append(filteredVersions, ver) + } + + if len(filteredVersions) == 0 { + continue + } + + // Convert labels to map + labelsMap := make(map[string]string, lbls.Len()) + lbls.Range(func(l labels.Label) { + labelsMap[l.Name] = l.Value + }) + + result = append(result, &storepb.ResourceAttributesSeriesData{ + Labels: labelsMap, + Versions: filteredVersions, + }) + count++ + } + + if err := seriesSet.Err(); err != nil { + return nil, errors.Wrap(err, "iterating series") + } + + return result, nil +} + +// blockResourceAttributesByFilter reads resource attributes for series matching resource attribute filters from a single block. +// It uses the inverted index (resource_attr_index namespace) for reverse lookup. +func (s *BucketStore) blockResourceAttributesByFilter(ctx context.Context, b *bucketBlock, filters []*storepb.ResourceAttrFilter, startMs, endMs int64, limit int64) ([]*storepb.ResourceAttributesSeriesData, error) { + // Read the series metadata parquet file with full resource data (includes inverted index) + parquetPath := path.Join(b.meta.ULID.String(), seriesmetadata.SeriesMetadataFilename) + + attrs, err := b.bkt.Attributes(ctx, parquetPath) + if err != nil { + if b.bkt.IsObjNotFoundErr(err) { + return nil, nil + } + return nil, errors.Wrap(err, "get series metadata attributes") + } + if attrs.Size == 0 { + return nil, nil + } + + readerAt := &bucketReaderAt{ctx: ctx, bkt: b.bkt, name: parquetPath} + + smReader, err := seriesmetadata.ReadSeriesMetadataFromReaderAt( + slog.Default(), + readerAt, + attrs.Size, + seriesmetadata.WithFullResourceData(), + ) + if err != nil { + return nil, errors.Wrap(err, "read series metadata with full resource data") + } + defer smReader.Close() + + // Intersect results from all filters (AND semantics). + var matchingHashes []uint64 + for idx, filter := range filters { + hashes := smReader.LookupResourceAttr(filter.GetKey(), filter.GetValue()) + if idx == 0 { + matchingHashes = hashes + } else { + matchingHashes = intersectSortedUint64SG(matchingHashes, hashes) + } + if len(matchingHashes) == 0 { + return nil, nil + } + } + + var result []*storepb.ResourceAttributesSeriesData + count := int64(0) + + for _, labelsHash := range matchingHashes { + if limit > 0 && count >= limit { + break + } + + versionedResource, found := smReader.GetVersionedResource(labelsHash) + if !found || versionedResource == nil || len(versionedResource.Versions) == 0 { + continue + } + + // Filter versions by time range + var filteredVersions []*storepb.ResourceVersionData + for _, ver := range versionedResource.Versions { + if endMs > 0 && ver.MinTime > endMs { + continue + } + if startMs > 0 && ver.MaxTime < startMs { + continue + } + + version := &storepb.ResourceVersionData{ + Identifying: make(map[string]string), + Descriptive: make(map[string]string), + MinTimeMs: ver.MinTime, + MaxTimeMs: ver.MaxTime, + } + for k, v := range ver.Identifying { + version.Identifying[k] = v + } + for k, v := range ver.Descriptive { + version.Descriptive[k] = v + } + for _, ent := range ver.Entities { + entity := &storepb.EntityData{ + Type: ent.Type, + Id: make(map[string]string), + Description: make(map[string]string), + } + for k, v := range ent.ID { + entity.Id[k] = v + } + for k, v := range ent.Description { + entity.Description[k] = v + } + version.Entities = append(version.Entities, entity) + } + + filteredVersions = append(filteredVersions, version) + } + + if len(filteredVersions) == 0 { + continue + } + + // Get labels for this hash if available + labelsMap := make(map[string]string) + if lbls, ok := smReader.LabelsForHash(labelsHash); ok { + lbls.Range(func(l labels.Label) { + labelsMap[l.Name] = l.Value + }) + } + + result = append(result, &storepb.ResourceAttributesSeriesData{ + Labels: labelsMap, + Versions: filteredVersions, + }) + count++ + } + + return result, nil +} + +// intersectSortedUint64SG returns the intersection of two sorted uint64 slices. +func intersectSortedUint64SG(a, b []uint64) []uint64 { + if len(a) == 0 || len(b) == 0 { + return nil + } + result := make([]uint64, 0, min(len(a), len(b))) + i, j := 0, 0 + for i < len(a) && j < len(b) { + if a[i] == b[j] { + result = append(result, a[i]) + i++ + j++ + } else if a[i] < b[j] { + i++ + } else { + j++ + } + } + return result +} + +// parseResourceAttributesParquet parses a series_metadata.parquet file and returns +// resource versions indexed by labels hash. +func parseResourceAttributesParquet(ctx context.Context, r io.ReaderAt, size int64) (map[uint64][]*storepb.ResourceVersionData, error) { + if r == nil || size == 0 { + return nil, nil + } + smReader, err := seriesmetadata.ReadSeriesMetadataFromReaderAt( + slog.Default(), + r, + size, + seriesmetadata.WithNamespaceFilter( + seriesmetadata.NamespaceResource, + seriesmetadata.NamespaceResourceTable, + seriesmetadata.NamespaceResourceMapping, + ), + ) + if err != nil { + return nil, errors.Wrap(err, "read series metadata") + } + defer smReader.Close() + + resourcesByHash := make(map[uint64][]*storepb.ResourceVersionData) + err = smReader.IterVersionedResources(ctx, func(labelsHash uint64, resources *seriesmetadata.VersionedResource) error { + for _, rv := range resources.Versions { + var entities []*storepb.EntityData + for _, e := range rv.Entities { + entities = append(entities, &storepb.EntityData{ + Type: e.Type, + Id: e.ID, + Description: e.Description, + }) + } + resourcesByHash[labelsHash] = append(resourcesByHash[labelsHash], &storepb.ResourceVersionData{ + Identifying: rv.Identifying, + Descriptive: rv.Descriptive, + Entities: entities, + MinTimeMs: rv.MinTime, + MaxTimeMs: rv.MaxTime, + }) + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "iterate versioned resources") + } + return resourcesByHash, nil +} + +// deduplicateResourceAttributeItems deduplicates items by series labels, +// merging resource versions from different blocks. +func deduplicateResourceAttributeItems(items []*storepb.ResourceAttributesSeriesData) []*storepb.ResourceAttributesSeriesData { + if len(items) == 0 { + return items + } + + // Build a key from labels for deduplication + type seriesKey string + makeKey := func(labels map[string]string) seriesKey { + // Sort labels for consistent key + keys := make([]string, 0, len(labels)) + for k := range labels { + keys = append(keys, k) + } + slices.Sort(keys) + + var buf bytes.Buffer + for _, k := range keys { + buf.WriteString(k) + buf.WriteByte(0) + buf.WriteString(labels[k]) + buf.WriteByte(0) + } + return seriesKey(buf.String()) + } + + byKey := make(map[seriesKey]*storepb.ResourceAttributesSeriesData) + + for _, item := range items { + key := makeKey(item.Labels) + existing, ok := byKey[key] + if !ok { + byKey[key] = item + continue + } + + // Merge versions, deduplicating by time range + existing.Versions = mergeResourceVersions(existing.Versions, item.Versions) + } + + result := make([]*storepb.ResourceAttributesSeriesData, 0, len(byKey)) + for _, item := range byKey { + result = append(result, item) + } + return result +} + +// mergeResourceVersions merges two slices of resource versions, deduplicating by time range. +func mergeResourceVersions(a, b []*storepb.ResourceVersionData) []*storepb.ResourceVersionData { + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + // Use min/max time as key for deduplication + type versionKey struct { + minTime int64 + maxTime int64 + } + + seen := make(map[versionKey]bool) + result := make([]*storepb.ResourceVersionData, 0, len(a)+len(b)) + + for _, v := range a { + key := versionKey{v.MinTimeMs, v.MaxTimeMs} + if !seen[key] { + seen[key] = true + result = append(result, v) + } + } + + for _, v := range b { + key := versionKey{v.MinTimeMs, v.MaxTimeMs} + if !seen[key] { + seen[key] = true + result = append(result, v) + } + } + + return result +} + +// sendResourceAttributesBatched sends resource attributes in batches to avoid exceeding message size limits. +func sendResourceAttributesBatched(srv storegatewaypb.StoreGateway_ResourceAttributesServer, items []*storepb.ResourceAttributesSeriesData, maxBatchSize int) error { + if len(items) == 0 { + return nil + } + + resp := &storepb.ResourceAttributesResponse{} + currentSize := 0 + + for _, item := range items { + itemSize := item.Size() + + if currentSize+itemSize > maxBatchSize && len(resp.Items) > 0 { + if err := srv.Send(resp); err != nil { + return errors.Wrap(err, "send response batch") + } + resp = &storepb.ResourceAttributesResponse{} + currentSize = 0 + } + + resp.Items = append(resp.Items, item) + currentSize += itemSize + } + + // Send remaining items + if len(resp.Items) > 0 { + if err := srv.Send(resp); err != nil { + return errors.Wrap(err, "send final response batch") + } + } + + return nil +} + // blockLabelValues returns sorted values of the label with requested name, // optionally restricting the search to the series that match the matchers provided. // - First we fetch all possible values for this label from the index. diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index 272da8b2c6a..61f7e4cc977 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -397,6 +397,24 @@ func (u *BucketStores) LabelValues(ctx context.Context, req *storepb.LabelValues return store.LabelValues(ctx, req) } +// ResourceAttributes returns OTel resource attributes for series matching the matchers. +func (u *BucketStores) ResourceAttributes(req *storepb.ResourceAttributesRequest, srv storegatewaypb.StoreGateway_ResourceAttributesServer) error { + spanLog, ctx := spanlogger.New(srv.Context(), u.logger, tracer, "BucketStores.ResourceAttributes") + defer spanLog.Finish() + + userID := getUserIDFromGRPCContext(ctx) + if userID == "" { + return fmt.Errorf("no userID") + } + + store := u.getStore(userID) + if store == nil { + return nil + } + + return store.ResourceAttributes(req, srv) +} + // scanUsers in the bucket and return the list of found users, respecting any specifically // enabled or disabled users. func (u *BucketStores) scanUsers(ctx context.Context) ([]string, error) { diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 4bd9e3c52bc..b44cf6bb240 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -3155,3 +3155,110 @@ func BenchmarkFilterPostingsByCachedShardHash_NoPostingsShifted(b *testing.B) { filterPostingsByCachedShardHash(ps, shard, cachedSeriesHasher{cache}, nil) } } + +func TestParseResourceAttributesParquet_EmptyData(t *testing.T) { + // Empty data should return empty map without error + result, err := parseResourceAttributesParquet(context.Background(), nil, 0) + require.NoError(t, err) + assert.Nil(t, result) +} + +func TestDeduplicateResourceAttributeItems(t *testing.T) { + // Test deduplication logic + items := []*storepb.ResourceAttributesSeriesData{ + { + Labels: map[string]string{"__name__": "metric1", "job": "test"}, + Versions: []*storepb.ResourceVersionData{ + {Identifying: map[string]string{"service.name": "svc1"}, MinTimeMs: 1000, MaxTimeMs: 2000}, + }, + }, + { + Labels: map[string]string{"__name__": "metric1", "job": "test"}, + Versions: []*storepb.ResourceVersionData{ + {Identifying: map[string]string{"service.name": "svc1"}, MinTimeMs: 3000, MaxTimeMs: 4000}, + }, + }, + { + Labels: map[string]string{"__name__": "metric2", "job": "test"}, + Versions: []*storepb.ResourceVersionData{ + {Identifying: map[string]string{"service.name": "svc2"}, MinTimeMs: 1000, MaxTimeMs: 2000}, + }, + }, + } + + result := deduplicateResourceAttributeItems(items) + + // Should have 2 unique series (metric1 and metric2) + assert.Len(t, result, 2) + + // Find the metric1 series and verify versions were merged + for _, item := range result { + if item.Labels["__name__"] == "metric1" { + assert.Len(t, item.Versions, 2, "metric1 should have 2 merged versions") + } + if item.Labels["__name__"] == "metric2" { + assert.Len(t, item.Versions, 1, "metric2 should have 1 version") + } + } +} + +func TestMergeResourceVersions(t *testing.T) { + cases := []struct { + name string + a []*storepb.ResourceVersionData + b []*storepb.ResourceVersionData + expected int + }{ + { + name: "both empty", + a: nil, + b: nil, + expected: 0, + }, + { + name: "a empty", + a: nil, + b: []*storepb.ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000}, + }, + expected: 1, + }, + { + name: "b empty", + a: []*storepb.ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000}, + }, + b: nil, + expected: 1, + }, + { + name: "no duplicates", + a: []*storepb.ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000}, + }, + b: []*storepb.ResourceVersionData{ + {MinTimeMs: 3000, MaxTimeMs: 4000}, + }, + expected: 2, + }, + { + name: "with duplicates", + a: []*storepb.ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000}, + {MinTimeMs: 3000, MaxTimeMs: 4000}, + }, + b: []*storepb.ResourceVersionData{ + {MinTimeMs: 1000, MaxTimeMs: 2000}, // duplicate + {MinTimeMs: 5000, MaxTimeMs: 6000}, + }, + expected: 3, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + result := mergeResourceVersions(tc.a, tc.b) + assert.Len(t, result, tc.expected) + }) + } +} diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index 013db16aa82..d1dd7c247ee 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -379,6 +379,16 @@ func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValues return g.stores.LabelValues(ctx, req) } +// ResourceAttributes implements the storegatewaypb.StoreGatewayServer interface. +func (g *StoreGateway) ResourceAttributes(req *storepb.ResourceAttributesRequest, srv storegatewaypb.StoreGateway_ResourceAttributesServer) error { + ix := g.tracker.Insert(func() string { + return requestActivity(srv.Context(), "StoreGateway/ResourceAttributes", req) + }) + defer g.tracker.Delete(ix) + + return g.stores.ResourceAttributes(req, srv) +} + func requestActivity(ctx context.Context, name string, req interface{}) string { user := getUserIDFromGRPCContext(ctx) traceID, _ := tracing.ExtractSampledTraceID(ctx) diff --git a/pkg/storegateway/hintspb/hints.pb.go b/pkg/storegateway/hintspb/hints.pb.go index 88f0e6cf9fc..1dfb4ec4a2f 100644 --- a/pkg/storegateway/hintspb/hints.pb.go +++ b/pkg/storegateway/hintspb/hints.pb.go @@ -27,18 +27,12 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type SeriesRequestHints struct { - // block_matchers is a list of label matchers that are evaluated against each single block's - // labels to filter which blocks get queried. If the list is empty, no per-block filtering - // is applied. - BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` - // projection_include indicates if label projection hints are including only specific labels - // or excluding them. The combination of default values of projection_include (false) and - // projection_labels (empty) disables use of projections. - ProjectionInclude bool `protobuf:"varint,2,opt,name=projection_include,json=projectionInclude,proto3" json:"projection_include,omitempty"` - // projection_labels is the set of labels required (projection_include = true) or not required - // (projection_include = false) to satisfy a query. The combination of default values of - // projection_include (false) and projection_labels (empty) disables use of projections. - ProjectionLabels []string `protobuf:"bytes,3,rep,name=projection_labels,json=projectionLabels,proto3" json:"projection_labels,omitempty"` + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` + ProjectionInclude bool `protobuf:"varint,2,opt,name=projection_include,json=projectionInclude,proto3" json:"projection_include,omitempty"` + ProjectionLabels []string `protobuf:"bytes,3,rep,name=projection_labels,json=projectionLabels,proto3" json:"projection_labels,omitempty"` } func (m *SeriesRequestHints) Reset() { *m = SeriesRequestHints{} } @@ -74,7 +68,7 @@ func (m *SeriesRequestHints) XXX_DiscardUnknown() { var xxx_messageInfo_SeriesRequestHints proto.InternalMessageInfo type SeriesResponseHints struct { - // queried_blocks is the list of blocks that have been queried. + /// queried_blocks is the list of blocks that have been queried. QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` } @@ -147,9 +141,9 @@ func (m *Block) XXX_DiscardUnknown() { var xxx_messageInfo_Block proto.InternalMessageInfo type LabelNamesRequestHints struct { - // block_matchers is a list of label matchers that are evaluated against each single block's - // labels to filter which blocks get queried. If the list is empty, no per-block filtering - // is applied. + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` } @@ -186,7 +180,7 @@ func (m *LabelNamesRequestHints) XXX_DiscardUnknown() { var xxx_messageInfo_LabelNamesRequestHints proto.InternalMessageInfo type LabelNamesResponseHints struct { - // queried_blocks is the list of blocks that have been queried. + /// queried_blocks is the list of blocks that have been queried. QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` } @@ -223,9 +217,9 @@ func (m *LabelNamesResponseHints) XXX_DiscardUnknown() { var xxx_messageInfo_LabelNamesResponseHints proto.InternalMessageInfo type LabelValuesRequestHints struct { - // block_matchers is a list of label matchers that are evaluated against each single block's - // labels to filter which blocks get queried. If the list is empty, no per-block filtering - // is applied. + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` } @@ -262,7 +256,7 @@ func (m *LabelValuesRequestHints) XXX_DiscardUnknown() { var xxx_messageInfo_LabelValuesRequestHints proto.InternalMessageInfo type LabelValuesResponseHints struct { - // queried_blocks is the list of blocks that have been queried. + /// queried_blocks is the list of blocks that have been queried. QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` } @@ -298,6 +292,82 @@ func (m *LabelValuesResponseHints) XXX_DiscardUnknown() { var xxx_messageInfo_LabelValuesResponseHints proto.InternalMessageInfo +type ResourceAttributesRequestHints struct { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` +} + +func (m *ResourceAttributesRequestHints) Reset() { *m = ResourceAttributesRequestHints{} } +func (*ResourceAttributesRequestHints) ProtoMessage() {} +func (*ResourceAttributesRequestHints) Descriptor() ([]byte, []int) { + return fileDescriptor_522be8e0d2634375, []int{7} +} +func (m *ResourceAttributesRequestHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesRequestHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesRequestHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAttributesRequestHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesRequestHints.Merge(m, src) +} +func (m *ResourceAttributesRequestHints) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesRequestHints) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesRequestHints.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesRequestHints proto.InternalMessageInfo + +type ResourceAttributesResponseHints struct { + /// queried_blocks is the list of blocks that have been queried. + QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` +} + +func (m *ResourceAttributesResponseHints) Reset() { *m = ResourceAttributesResponseHints{} } +func (*ResourceAttributesResponseHints) ProtoMessage() {} +func (*ResourceAttributesResponseHints) Descriptor() ([]byte, []int) { + return fileDescriptor_522be8e0d2634375, []int{8} +} +func (m *ResourceAttributesResponseHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesResponseHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesResponseHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAttributesResponseHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesResponseHints.Merge(m, src) +} +func (m *ResourceAttributesResponseHints) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesResponseHints) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesResponseHints.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesResponseHints proto.InternalMessageInfo + func init() { proto.RegisterType((*SeriesRequestHints)(nil), "hintspb.SeriesRequestHints") proto.RegisterType((*SeriesResponseHints)(nil), "hintspb.SeriesResponseHints") @@ -306,38 +376,38 @@ func init() { proto.RegisterType((*LabelNamesResponseHints)(nil), "hintspb.LabelNamesResponseHints") proto.RegisterType((*LabelValuesRequestHints)(nil), "hintspb.LabelValuesRequestHints") proto.RegisterType((*LabelValuesResponseHints)(nil), "hintspb.LabelValuesResponseHints") + proto.RegisterType((*ResourceAttributesRequestHints)(nil), "hintspb.ResourceAttributesRequestHints") + proto.RegisterType((*ResourceAttributesResponseHints)(nil), "hintspb.ResourceAttributesResponseHints") } func init() { proto.RegisterFile("hints.proto", fileDescriptor_522be8e0d2634375) } var fileDescriptor_522be8e0d2634375 = []byte{ - // 406 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0xae, 0x93, 0x40, - 0x14, 0xc6, 0x67, 0x5a, 0xff, 0xdd, 0xb9, 0x91, 0x28, 0xde, 0x78, 0x49, 0x17, 0x23, 0x61, 0x45, - 0x62, 0x84, 0x44, 0x97, 0xc6, 0x45, 0xbb, 0xd2, 0x44, 0x5d, 0x60, 0x52, 0x13, 0x35, 0x21, 0x03, - 0x8c, 0x30, 0x16, 0x18, 0xca, 0x0c, 0x31, 0xdd, 0xf9, 0x08, 0x3e, 0x86, 0x7b, 0x5f, 0xa2, 0xcb, - 0x2e, 0xbb, 0x32, 0x42, 0x37, 0x2e, 0xfb, 0x08, 0x86, 0x01, 0xd2, 0xde, 0x3d, 0xbb, 0x73, 0xbe, - 0x0f, 0xbe, 0xf3, 0x9b, 0x93, 0x19, 0x74, 0x99, 0xb0, 0x5c, 0x0a, 0xa7, 0x28, 0xb9, 0xe4, 0xfa, - 0x5d, 0xd5, 0x14, 0xc1, 0xec, 0x55, 0xcc, 0x64, 0x52, 0x05, 0x4e, 0xc8, 0x33, 0x37, 0x2e, 0xc9, - 0x57, 0x92, 0x13, 0x37, 0x63, 0x19, 0x2b, 0xdd, 0x62, 0x15, 0xbb, 0x42, 0xf2, 0x92, 0xc6, 0x44, - 0xd2, 0xef, 0x64, 0xd3, 0x35, 0x45, 0xe0, 0xca, 0x4d, 0x41, 0xfb, 0x9c, 0xd9, 0x55, 0xcc, 0x63, - 0xae, 0x4a, 0xb7, 0xad, 0x3a, 0xd5, 0xfa, 0x0d, 0x91, 0xfe, 0x81, 0x96, 0x8c, 0x0a, 0x8f, 0xae, - 0x2b, 0x2a, 0xe4, 0xeb, 0x76, 0x9a, 0x3e, 0x47, 0x5a, 0x90, 0xf2, 0x70, 0xe5, 0x67, 0x44, 0x86, - 0x09, 0x2d, 0x85, 0x01, 0xcd, 0xa9, 0x7d, 0xf9, 0xfc, 0xca, 0x91, 0x09, 0xc9, 0xb9, 0x70, 0xde, - 0x92, 0x80, 0xa6, 0xef, 0x3a, 0x73, 0x71, 0x6b, 0xfb, 0xe7, 0x09, 0xf0, 0xee, 0xab, 0x3f, 0x7a, - 0x4d, 0xe8, 0xcf, 0x90, 0x5e, 0x94, 0xfc, 0x1b, 0x0d, 0x25, 0xe3, 0xb9, 0xcf, 0xf2, 0x30, 0xad, - 0x22, 0x6a, 0x4c, 0x4c, 0x68, 0xdf, 0xf3, 0x1e, 0x9e, 0x9c, 0x37, 0x9d, 0xa1, 0x3f, 0x45, 0x67, - 0xa2, 0x9f, 0xb6, 0xf1, 0xc2, 0x98, 0x9a, 0x53, 0xfb, 0xc2, 0x7b, 0x70, 0x32, 0xd4, 0x58, 0x61, - 0x79, 0xe8, 0xd1, 0x00, 0x2d, 0x0a, 0x9e, 0x0b, 0xda, 0x51, 0xbf, 0x44, 0xda, 0xba, 0x6a, 0xf5, - 0xc8, 0x57, 0x2c, 0x03, 0xb5, 0xe6, 0xf4, 0x3b, 0x74, 0x16, 0xad, 0x3c, 0xf0, 0xf6, 0xdf, 0x2a, - 0x4d, 0x58, 0xd7, 0xe8, 0xb6, 0xaa, 0x74, 0x0d, 0x4d, 0x58, 0x64, 0x40, 0x13, 0xda, 0x17, 0xde, - 0x84, 0x45, 0xd6, 0x67, 0xf4, 0x58, 0x8d, 0x7d, 0x4f, 0xb2, 0xd1, 0xb7, 0x64, 0x2d, 0xd1, 0xf5, - 0x79, 0xf8, 0x68, 0xa7, 0xf9, 0xd2, 0xe7, 0x2e, 0x49, 0x5a, 0x8d, 0x4f, 0xfd, 0x11, 0x19, 0x37, - 0xd2, 0xc7, 0xc2, 0x5e, 0xcc, 0xb7, 0x35, 0x06, 0xbb, 0x1a, 0x83, 0x7d, 0x8d, 0xc1, 0xb1, 0xc6, - 0xf0, 0x47, 0x83, 0xe1, 0xaf, 0x06, 0xc3, 0x6d, 0x83, 0xe1, 0xae, 0xc1, 0xf0, 0x6f, 0x83, 0xe1, - 0xbf, 0x06, 0x83, 0x63, 0x83, 0xe1, 0xcf, 0x03, 0x06, 0xbb, 0x03, 0x06, 0xfb, 0x03, 0x06, 0x9f, - 0x86, 0x67, 0x12, 0xdc, 0x51, 0x17, 0xfb, 0xc5, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x94, - 0xdc, 0x07, 0x45, 0x03, 0x00, 0x00, + // 377 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xbf, 0x6e, 0xea, 0x30, + 0x14, 0xc6, 0x6d, 0xee, 0x3f, 0x5d, 0xa3, 0x9b, 0x21, 0x17, 0x5d, 0x10, 0x83, 0x41, 0x99, 0x98, + 0x12, 0xe9, 0xde, 0xf1, 0xaa, 0x03, 0x4c, 0x1d, 0xda, 0x0e, 0xa9, 0x04, 0x52, 0x5b, 0x15, 0x39, + 0xc1, 0x4d, 0x2c, 0x48, 0x1c, 0x6c, 0x47, 0x15, 0x5b, 0x1f, 0xa1, 0x8f, 0xd1, 0x47, 0x61, 0x64, + 0x64, 0xaa, 0x9a, 0xb0, 0x74, 0xe4, 0x11, 0xaa, 0x38, 0x89, 0xd4, 0x4a, 0x1d, 0xb3, 0x9d, 0xf3, + 0xf9, 0xf8, 0x77, 0xbe, 0x6f, 0x38, 0xa8, 0x1d, 0xb2, 0x58, 0x49, 0x3b, 0x11, 0x5c, 0x71, 0xf3, + 0x87, 0x6e, 0x12, 0xaf, 0x7f, 0x12, 0x30, 0x15, 0xa6, 0x9e, 0xed, 0xf3, 0xc8, 0x09, 0x04, 0xb9, + 0x23, 0x31, 0x71, 0x22, 0x16, 0x31, 0xe1, 0x24, 0xcb, 0xc0, 0x91, 0x8a, 0x0b, 0x1a, 0x10, 0x45, + 0xef, 0xc9, 0xa6, 0x6c, 0x12, 0xcf, 0x51, 0x9b, 0x84, 0x56, 0x9c, 0x7e, 0x27, 0xe0, 0x01, 0xd7, + 0xa5, 0x53, 0x54, 0xa5, 0x6a, 0xcd, 0x90, 0x79, 0x49, 0x05, 0xa3, 0xd2, 0xa5, 0xeb, 0x94, 0x4a, + 0x75, 0x5a, 0x2c, 0x33, 0xc7, 0xc8, 0xf0, 0x56, 0xdc, 0x5f, 0xce, 0x23, 0xa2, 0xfc, 0x90, 0x0a, + 0xd9, 0x83, 0xc3, 0x2f, 0xa3, 0xf6, 0xdf, 0x8e, 0xad, 0x42, 0x12, 0x73, 0x69, 0x9f, 0x11, 0x8f, + 0xae, 0xce, 0xcb, 0xc7, 0xc9, 0xd7, 0xed, 0xf3, 0x00, 0xb8, 0xbf, 0xf4, 0x8f, 0x4a, 0x93, 0x96, + 0x8b, 0x7e, 0xd7, 0x60, 0x99, 0xf0, 0x58, 0xd2, 0x92, 0xfc, 0x1f, 0x19, 0xeb, 0xb4, 0xd0, 0x17, + 0x73, 0x3d, 0x5f, 0x93, 0x0d, 0xbb, 0x8a, 0x69, 0x4f, 0x0a, 0xb9, 0x66, 0x56, 0xb3, 0x5a, 0x93, + 0x56, 0x17, 0x7d, 0xd3, 0x95, 0x69, 0xa0, 0x16, 0x5b, 0xf4, 0xe0, 0x10, 0x8e, 0x7e, 0xba, 0x2d, + 0xb6, 0xb0, 0xae, 0xd1, 0x1f, 0xed, 0xe8, 0x82, 0x44, 0xcd, 0x27, 0x99, 0xa2, 0xee, 0x7b, 0x78, + 0x63, 0x69, 0x6e, 0x2a, 0xee, 0x94, 0xac, 0xd2, 0xe6, 0x5d, 0xcf, 0x50, 0xef, 0x03, 0xbd, 0x31, + 0xdb, 0x3e, 0xc2, 0x2e, 0x95, 0x3c, 0x15, 0x3e, 0x1d, 0x2b, 0x25, 0x98, 0x97, 0xaa, 0xe6, 0xdd, + 0xdf, 0xa2, 0xc1, 0x67, 0x4b, 0x9a, 0x0a, 0x31, 0x19, 0x6f, 0x33, 0x0c, 0x76, 0x19, 0x06, 0xfb, + 0x0c, 0x83, 0x63, 0x86, 0xe1, 0x43, 0x8e, 0xe1, 0x53, 0x8e, 0xe1, 0x36, 0xc7, 0x70, 0x97, 0x63, + 0xf8, 0x92, 0x63, 0xf8, 0x9a, 0x63, 0x70, 0xcc, 0x31, 0x7c, 0x3c, 0x60, 0xb0, 0x3b, 0x60, 0xb0, + 0x3f, 0x60, 0x70, 0x55, 0x9f, 0xa3, 0xf7, 0x5d, 0x1f, 0xd0, 0xbf, 0xb7, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x16, 0xc9, 0x6d, 0xec, 0xad, 0x03, 0x00, 0x00, } func (this *SeriesRequestHints) Equal(that interface{}) bool { @@ -549,11 +619,69 @@ func (this *LabelValuesResponseHints) Equal(that interface{}) bool { } return true } +func (this *ResourceAttributesRequestHints) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceAttributesRequestHints) + if !ok { + that2, ok := that.(ResourceAttributesRequestHints) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.BlockMatchers) != len(that1.BlockMatchers) { + return false + } + for i := range this.BlockMatchers { + if !this.BlockMatchers[i].Equal(&that1.BlockMatchers[i]) { + return false + } + } + return true +} +func (this *ResourceAttributesResponseHints) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceAttributesResponseHints) + if !ok { + that2, ok := that.(ResourceAttributesResponseHints) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.QueriedBlocks) != len(that1.QueriedBlocks) { + return false + } + for i := range this.QueriedBlocks { + if !this.QueriedBlocks[i].Equal(&that1.QueriedBlocks[i]) { + return false + } + } + return true +} func (this *SeriesRequestHints) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 5) s = append(s, "&hintspb.SeriesRequestHints{") if this.BlockMatchers != nil { vs := make([]storepb.LabelMatcher, len(this.BlockMatchers)) @@ -562,8 +690,6 @@ func (this *SeriesRequestHints) GoString() string { } s = append(s, "BlockMatchers: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "ProjectionInclude: "+fmt.Sprintf("%#v", this.ProjectionInclude)+",\n") - s = append(s, "ProjectionLabels: "+fmt.Sprintf("%#v", this.ProjectionLabels)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -657,6 +783,38 @@ func (this *LabelValuesResponseHints) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ResourceAttributesRequestHints) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&hintspb.ResourceAttributesRequestHints{") + if this.BlockMatchers != nil { + vs := make([]storepb.LabelMatcher, len(this.BlockMatchers)) + for i := range vs { + vs[i] = this.BlockMatchers[i] + } + s = append(s, "BlockMatchers: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceAttributesResponseHints) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&hintspb.ResourceAttributesResponseHints{") + if this.QueriedBlocks != nil { + vs := make([]Block, len(this.QueriedBlocks)) + for i := range vs { + vs[i] = this.QueriedBlocks[i] + } + s = append(s, "QueriedBlocks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringHints(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -936,6 +1094,80 @@ func (m *LabelValuesResponseHints) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *ResourceAttributesRequestHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributesRequestHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributesRequestHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for iNdEx := len(m.BlockMatchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockMatchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAttributesResponseHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributesResponseHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributesResponseHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for iNdEx := len(m.QueriedBlocks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.QueriedBlocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintHints(dAtA []byte, offset int, v uint64) int { offset -= sovHints(v) base := offset @@ -960,7 +1192,7 @@ func (m *SeriesRequestHints) Size() (n int) { } } if m.ProjectionInclude { - n += 2 + n += 1 + 1 } if len(m.ProjectionLabels) > 0 { for _, s := range m.ProjectionLabels { @@ -1059,6 +1291,36 @@ func (m *LabelValuesResponseHints) Size() (n int) { return n } +func (m *ResourceAttributesRequestHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for _, e := range m.BlockMatchers { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributesResponseHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for _, e := range m.QueriedBlocks { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + func sovHints(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1167,6 +1429,36 @@ func (this *LabelValuesResponseHints) String() string { }, "") return s } +func (this *ResourceAttributesRequestHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForBlockMatchers := "[]LabelMatcher{" + for _, f := range this.BlockMatchers { + repeatedStringForBlockMatchers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBlockMatchers += "}" + s := strings.Join([]string{`&ResourceAttributesRequestHints{`, + `BlockMatchers:` + repeatedStringForBlockMatchers + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributesResponseHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForQueriedBlocks := "[]Block{" + for _, f := range this.QueriedBlocks { + repeatedStringForQueriedBlocks += strings.Replace(strings.Replace(f.String(), "Block", "Block", 1), `&`, ``, 1) + "," + } + repeatedStringForQueriedBlocks += "}" + s := strings.Join([]string{`&ResourceAttributesResponseHints{`, + `QueriedBlocks:` + repeatedStringForQueriedBlocks + `,`, + `}`, + }, "") + return s +} func valueToStringHints(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1813,6 +2105,174 @@ func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceAttributesRequestHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributesRequestHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockMatchers = append(m.BlockMatchers, storepb.LabelMatcher{}) + if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributesResponseHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributesResponseHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueriedBlocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueriedBlocks = append(m.QueriedBlocks, Block{}) + if err := m.QueriedBlocks[len(m.QueriedBlocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipHints(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/pkg/storegateway/hintspb/hints.proto b/pkg/storegateway/hintspb/hints.proto index 5495b199e5d..871ae7ca7fc 100644 --- a/pkg/storegateway/hintspb/hints.proto +++ b/pkg/storegateway/hintspb/hints.proto @@ -67,3 +67,15 @@ message LabelValuesResponseHints { // queried_blocks is the list of blocks that have been queried. repeated Block queried_blocks = 1 [(gogoproto.nullable) = false]; } + +message ResourceAttributesRequestHints { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + repeated thanos.LabelMatcher block_matchers = 1 [(gogoproto.nullable) = false]; +} + +message ResourceAttributesResponseHints { + /// queried_blocks is the list of blocks that have been queried. + repeated Block queried_blocks = 1 [(gogoproto.nullable) = false]; +} diff --git a/pkg/storegateway/storegatewaypb/custom.go b/pkg/storegateway/storegatewaypb/custom.go index ac6292a5692..e27784c6bba 100644 --- a/pkg/storegateway/storegatewaypb/custom.go +++ b/pkg/storegateway/storegatewaypb/custom.go @@ -45,6 +45,15 @@ func (c *customStoreGatewayClient) LabelValues(ctx context.Context, in *storepb. return res, globalerror.WrapGRPCErrorWithContextError(ctx, err) } +// ResourceAttributes implements StoreGatewayClient. +func (c *customStoreGatewayClient) ResourceAttributes(ctx context.Context, in *storepb.ResourceAttributesRequest, opts ...grpc.CallOption) (StoreGateway_ResourceAttributesClient, error) { + client, err := c.wrapped.ResourceAttributes(ctx, in, opts...) + if err != nil { + return client, globalerror.WrapGRPCErrorWithContextError(ctx, err) + } + return newCustomResourceAttributesClient(client), nil +} + // customStoreGatewayClient is a custom StoreGateway_SeriesClient which wraps well known gRPC errors into standard golang errors. type customSeriesClient struct { *customClientStream @@ -100,3 +109,22 @@ func (c *customClientStream) SendMsg(m any) error { func (c *customClientStream) RecvMsg(m any) error { return globalerror.WrapGRPCErrorWithContextError(c.Context(), c.wrapped.RecvMsg(m)) } + +// customResourceAttributesClient is a custom StoreGateway_ResourceAttributesClient which wraps well known gRPC errors into standard golang errors. +type customResourceAttributesClient struct { + *customClientStream + + wrapped StoreGateway_ResourceAttributesClient +} + +func newCustomResourceAttributesClient(client StoreGateway_ResourceAttributesClient) *customResourceAttributesClient { + return &customResourceAttributesClient{ + customClientStream: &customClientStream{client}, + wrapped: client, + } +} + +func (c *customResourceAttributesClient) Recv() (*storepb.ResourceAttributesResponse, error) { + res, err := c.wrapped.Recv() + return res, globalerror.WrapGRPCErrorWithContextError(c.Context(), err) +} diff --git a/pkg/storegateway/storegatewaypb/gateway.pb.go b/pkg/storegateway/storegatewaypb/gateway.pb.go index 0bb5696eb92..232bc5a8c62 100644 --- a/pkg/storegateway/storegatewaypb/gateway.pb.go +++ b/pkg/storegateway/storegatewaypb/gateway.pb.go @@ -28,24 +28,26 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 263 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbd, 0x4a, 0x04, 0x31, - 0x14, 0x46, 0x93, 0x66, 0xc1, 0xf8, 0x53, 0x04, 0x14, 0x5c, 0xe1, 0x3e, 0xc2, 0x44, 0xb4, 0x12, - 0x3b, 0x15, 0x6d, 0xc4, 0xc2, 0x05, 0x0b, 0xbb, 0x9b, 0xe5, 0x3a, 0x3b, 0xb8, 0x33, 0x89, 0x49, - 0x06, 0xb1, 0xf3, 0x11, 0x7c, 0x0c, 0x1f, 0xc5, 0x72, 0x2a, 0xd9, 0xd2, 0xc9, 0x34, 0x96, 0xfb, - 0x08, 0xe2, 0x66, 0x07, 0x7f, 0xd8, 0xf2, 0x3b, 0xf7, 0x70, 0x8a, 0x2b, 0x36, 0x73, 0x0c, 0xf4, - 0x88, 0x4f, 0x99, 0x75, 0x26, 0x18, 0xb9, 0xb6, 0x9c, 0x56, 0x0f, 0x8f, 0xf3, 0x22, 0x4c, 0x6a, - 0x9d, 0x8d, 0x4d, 0xa9, 0x72, 0x87, 0x77, 0x58, 0xa1, 0x2a, 0x8b, 0xb2, 0x70, 0xca, 0xde, 0xe7, - 0xca, 0x07, 0xe3, 0x68, 0x29, 0xa7, 0x61, 0xb5, 0x72, 0x76, 0x9c, 0x3a, 0x07, 0xef, 0x5c, 0x6c, - 0x8c, 0xbe, 0xe9, 0x45, 0x52, 0xe4, 0x91, 0x18, 0x8c, 0xc8, 0x15, 0xe4, 0xe5, 0x76, 0x16, 0x26, - 0x58, 0x19, 0x9f, 0xa5, 0x7d, 0x4d, 0x0f, 0x35, 0xf9, 0x30, 0xdc, 0xf9, 0x8f, 0xbd, 0x35, 0x95, - 0xa7, 0x7d, 0x2e, 0x4f, 0x85, 0xb8, 0x44, 0x4d, 0xd3, 0x2b, 0x2c, 0xc9, 0xcb, 0xdd, 0xde, 0xfb, - 0x61, 0x7d, 0x62, 0xb8, 0xea, 0x94, 0x32, 0xf2, 0x5c, 0xac, 0x2f, 0xe8, 0x0d, 0x4e, 0x6b, 0xf2, - 0xf2, 0xaf, 0x9a, 0x60, 0x9f, 0xd9, 0x5b, 0x79, 0x4b, 0x9d, 0x93, 0xb3, 0xa6, 0x05, 0x36, 0x6b, - 0x81, 0xcd, 0x5b, 0xe0, 0xcf, 0x11, 0xf8, 0x6b, 0x04, 0xfe, 0x16, 0x81, 0x37, 0x11, 0xf8, 0x47, - 0x04, 0xfe, 0x19, 0x81, 0xcd, 0x23, 0xf0, 0x97, 0x0e, 0x58, 0xd3, 0x01, 0x9b, 0x75, 0xc0, 0x6e, - 0xb7, 0x7e, 0xbf, 0xcb, 0x6a, 0x3d, 0x58, 0x7c, 0xe9, 0xf0, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xb9, - 0xbe, 0x15, 0x37, 0x7e, 0x01, 0x00, 0x00, + // 296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3d, 0x4a, 0x04, 0x31, + 0x18, 0x86, 0x13, 0x8b, 0x05, 0xe3, 0x4f, 0x11, 0x50, 0x70, 0x85, 0x0f, 0xf4, 0x00, 0x33, 0xa2, + 0x95, 0x58, 0xf9, 0x83, 0x36, 0x62, 0xb1, 0x0b, 0x16, 0x82, 0x45, 0xb2, 0x7c, 0xce, 0x0e, 0xee, + 0x6c, 0x62, 0x92, 0x41, 0xec, 0x3c, 0x82, 0xc7, 0xf0, 0x02, 0xde, 0xc1, 0x72, 0xca, 0x2d, 0x9d, + 0x4c, 0x63, 0xb9, 0x47, 0x10, 0x37, 0x13, 0xfc, 0x5b, 0x2c, 0xdf, 0xf7, 0x7d, 0xf2, 0x40, 0xf8, + 0xd8, 0x4a, 0x26, 0x1c, 0xde, 0x8b, 0x87, 0x44, 0x1b, 0xe5, 0x14, 0x5f, 0x6c, 0xa3, 0x96, 0xdd, + 0x83, 0x2c, 0x77, 0xc3, 0x52, 0x26, 0x03, 0x55, 0xa4, 0x99, 0x11, 0x37, 0x62, 0x2c, 0xd2, 0x22, + 0x2f, 0x72, 0x93, 0xea, 0xdb, 0x2c, 0xb5, 0x4e, 0x19, 0x6c, 0xe1, 0x10, 0xb4, 0x4c, 0x8d, 0x1e, + 0x04, 0xcf, 0xee, 0xcb, 0x02, 0x5b, 0xee, 0x7f, 0xb6, 0x67, 0x01, 0xe1, 0xfb, 0xac, 0xd3, 0x47, + 0x93, 0xa3, 0xe5, 0x6b, 0x89, 0x1b, 0x8a, 0xb1, 0xb2, 0x49, 0xc8, 0x3d, 0xbc, 0x2b, 0xd1, 0xba, + 0xee, 0xfa, 0xef, 0xda, 0x6a, 0x35, 0xb6, 0xb8, 0x43, 0xf9, 0x31, 0x63, 0xe7, 0x42, 0xe2, 0xe8, + 0x42, 0x14, 0x68, 0xf9, 0x46, 0xe4, 0xbe, 0xba, 0xa8, 0xe8, 0xce, 0x9b, 0x82, 0x86, 0x9f, 0xb2, + 0xa5, 0x59, 0x7b, 0x29, 0x46, 0x25, 0x5a, 0xfe, 0x13, 0x0d, 0x65, 0xd4, 0x6c, 0xce, 0xdd, 0x5a, + 0xcf, 0x35, 0xe3, 0x3d, 0xb4, 0xaa, 0x34, 0x03, 0x3c, 0x74, 0xce, 0xe4, 0xb2, 0x74, 0x68, 0xf9, + 0x56, 0x7c, 0xf2, 0x77, 0x8b, 0xd6, 0xed, 0xff, 0x90, 0xf8, 0xd7, 0xa3, 0x93, 0xaa, 0x06, 0x32, + 0xa9, 0x81, 0x4c, 0x6b, 0xa0, 0x8f, 0x1e, 0xe8, 0xb3, 0x07, 0xfa, 0xea, 0x81, 0x56, 0x1e, 0xe8, + 0x9b, 0x07, 0xfa, 0xee, 0x81, 0x4c, 0x3d, 0xd0, 0xa7, 0x06, 0x48, 0xd5, 0x00, 0x99, 0x34, 0x40, + 0xae, 0x56, 0xbf, 0x5f, 0x43, 0x4b, 0xd9, 0x99, 0x1d, 0x61, 0xef, 0x23, 0x00, 0x00, 0xff, 0xff, + 0x59, 0x53, 0x72, 0x2e, 0xdd, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -72,6 +74,8 @@ type StoreGatewayClient interface { LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) // LabelValues returns all label values for given label name. LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + ResourceAttributes(ctx context.Context, in *storepb.ResourceAttributesRequest, opts ...grpc.CallOption) (StoreGateway_ResourceAttributesClient, error) } type storeGatewayClient struct { @@ -132,6 +136,38 @@ func (c *storeGatewayClient) LabelValues(ctx context.Context, in *storepb.LabelV return out, nil } +func (c *storeGatewayClient) ResourceAttributes(ctx context.Context, in *storepb.ResourceAttributesRequest, opts ...grpc.CallOption) (StoreGateway_ResourceAttributesClient, error) { + stream, err := c.cc.NewStream(ctx, &_StoreGateway_serviceDesc.Streams[1], "/gatewaypb.StoreGateway/ResourceAttributes", opts...) + if err != nil { + return nil, err + } + x := &storeGatewayResourceAttributesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StoreGateway_ResourceAttributesClient interface { + Recv() (*storepb.ResourceAttributesResponse, error) + grpc.ClientStream +} + +type storeGatewayResourceAttributesClient struct { + grpc.ClientStream +} + +func (x *storeGatewayResourceAttributesClient) Recv() (*storepb.ResourceAttributesResponse, error) { + m := new(storepb.ResourceAttributesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // StoreGatewayServer is the server API for StoreGateway service. type StoreGatewayServer interface { // Series streams each Series for given label matchers and time range. @@ -146,6 +182,8 @@ type StoreGatewayServer interface { LabelNames(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) // LabelValues returns all label values for given label name. LabelValues(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + ResourceAttributes(*storepb.ResourceAttributesRequest, StoreGateway_ResourceAttributesServer) error } // UnimplementedStoreGatewayServer can be embedded to have forward compatible implementations. @@ -161,6 +199,9 @@ func (*UnimplementedStoreGatewayServer) LabelNames(ctx context.Context, req *sto func (*UnimplementedStoreGatewayServer) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") } +func (*UnimplementedStoreGatewayServer) ResourceAttributes(req *storepb.ResourceAttributesRequest, srv StoreGateway_ResourceAttributesServer) error { + return status.Errorf(codes.Unimplemented, "method ResourceAttributes not implemented") +} func RegisterStoreGatewayServer(s *grpc.Server, srv StoreGatewayServer) { s.RegisterService(&_StoreGateway_serviceDesc, srv) @@ -223,6 +264,27 @@ func _StoreGateway_LabelValues_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _StoreGateway_ResourceAttributes_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(storepb.ResourceAttributesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StoreGatewayServer).ResourceAttributes(m, &storeGatewayResourceAttributesServer{stream}) +} + +type StoreGateway_ResourceAttributesServer interface { + Send(*storepb.ResourceAttributesResponse) error + grpc.ServerStream +} + +type storeGatewayResourceAttributesServer struct { + grpc.ServerStream +} + +func (x *storeGatewayResourceAttributesServer) Send(m *storepb.ResourceAttributesResponse) error { + return x.ServerStream.SendMsg(m) +} + var _StoreGateway_serviceDesc = grpc.ServiceDesc{ ServiceName: "gatewaypb.StoreGateway", HandlerType: (*StoreGatewayServer)(nil), @@ -242,6 +304,11 @@ var _StoreGateway_serviceDesc = grpc.ServiceDesc{ Handler: _StoreGateway_Series_Handler, ServerStreams: true, }, + { + StreamName: "ResourceAttributes", + Handler: _StoreGateway_ResourceAttributes_Handler, + ServerStreams: true, + }, }, Metadata: "gateway.proto", } diff --git a/pkg/storegateway/storegatewaypb/gateway.proto b/pkg/storegateway/storegatewaypb/gateway.proto index 4ebceb57447..064f236c534 100644 --- a/pkg/storegateway/storegatewaypb/gateway.proto +++ b/pkg/storegateway/storegatewaypb/gateway.proto @@ -26,5 +26,8 @@ service StoreGateway { // LabelValues returns all label values for given label name. rpc LabelValues(thanos.LabelValuesRequest) returns (thanos.LabelValuesResponse); + // ResourceAttributes returns OTel resource attributes for series matching the matchers. + rpc ResourceAttributes(thanos.ResourceAttributesRequest) returns (stream thanos.ResourceAttributesResponse); + // When adding more read-path methods here, please update store_gateway_read_path_routes_regex in operations/mimir-mixin/config.libsonnet as well as needed. } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index 5cc9be01516..bd24cb688cb 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -7,6 +7,7 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" types "github.com/gogo/protobuf/types" "github.com/grafana/mimir/pkg/mimirpb" @@ -732,312 +733,518 @@ func (m *LabelValuesResponseHints) XXX_DiscardUnknown() { var xxx_messageInfo_LabelValuesResponseHints proto.InternalMessageInfo -func init() { - proto.RegisterType((*SeriesRequest)(nil), "thanos.SeriesRequest") - proto.RegisterType((*SeriesRequestHints)(nil), "thanos.SeriesRequestHints") - proto.RegisterType((*Stats)(nil), "thanos.Stats") - proto.RegisterType((*SeriesResponse)(nil), "thanos.SeriesResponse") - proto.RegisterType((*SeriesResponseHints)(nil), "thanos.SeriesResponseHints") - proto.RegisterType((*Block)(nil), "thanos.Block") - proto.RegisterType((*LabelNamesRequest)(nil), "thanos.LabelNamesRequest") - proto.RegisterType((*LabelNamesRequestHints)(nil), "thanos.LabelNamesRequestHints") - proto.RegisterType((*LabelNamesResponse)(nil), "thanos.LabelNamesResponse") - proto.RegisterType((*LabelNamesResponseHints)(nil), "thanos.LabelNamesResponseHints") - proto.RegisterType((*LabelValuesRequest)(nil), "thanos.LabelValuesRequest") - proto.RegisterType((*LabelValuesRequestHints)(nil), "thanos.LabelValuesRequestHints") - proto.RegisterType((*LabelValuesResponse)(nil), "thanos.LabelValuesResponse") - proto.RegisterType((*LabelValuesResponseHints)(nil), "thanos.LabelValuesResponseHints") -} - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } - -var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 981 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xbf, 0x6f, 0x23, 0x45, - 0x14, 0xc7, 0xf7, 0xc7, 0xac, 0x3d, 0x1e, 0xc7, 0x66, 0x33, 0xb1, 0x2e, 0x1b, 0x1f, 0xda, 0x58, - 0x2b, 0x21, 0x45, 0xfc, 0x70, 0x24, 0x90, 0x40, 0x42, 0x42, 0x28, 0xbe, 0x03, 0x9c, 0x15, 0x87, - 0xc4, 0x04, 0xae, 0xe0, 0x87, 0x56, 0x6b, 0x7b, 0xce, 0x19, 0x62, 0xef, 0xfa, 0x76, 0xd6, 0x90, - 0x5c, 0x45, 0x45, 0xcd, 0x5f, 0x40, 0x4d, 0x4f, 0x47, 0x41, 0x9d, 0x82, 0x22, 0xe5, 0x55, 0x88, - 0x38, 0x0d, 0xe5, 0xd5, 0x54, 0x68, 0x67, 0xc6, 0x3f, 0x36, 0xb6, 0x89, 0xee, 0x2e, 0xdd, 0xbe, - 0xf7, 0x7d, 0xf3, 0xf6, 0xcd, 0x7b, 0x9f, 0x79, 0xa8, 0x94, 0x8c, 0xba, 0xcd, 0x51, 0x12, 0xa7, - 0x31, 0x2e, 0xa4, 0xc7, 0x61, 0x14, 0xf3, 0x7a, 0xad, 0x1f, 0xf7, 0x63, 0xe1, 0xda, 0xcf, 0xbe, - 0xa4, 0x5a, 0xdf, 0xe9, 0xc7, 0x71, 0x7f, 0x40, 0xf7, 0x85, 0xd5, 0x19, 0x3f, 0xda, 0x0f, 0xa3, - 0x33, 0x25, 0x95, 0xd3, 0xb3, 0x11, 0xe5, 0xd2, 0xf0, 0xfe, 0x35, 0x50, 0xe5, 0x88, 0x26, 0x8c, - 0x72, 0x42, 0x1f, 0x8f, 0x29, 0x4f, 0xf1, 0x0e, 0x82, 0x43, 0x16, 0x05, 0x29, 0x1b, 0x52, 0x47, - 0x6f, 0xe8, 0x7b, 0x26, 0x29, 0x0e, 0x59, 0xf4, 0x05, 0x1b, 0x52, 0x21, 0x85, 0xa7, 0x52, 0x32, - 0x94, 0x14, 0x9e, 0x0a, 0xe9, 0xdd, 0x4c, 0x4a, 0xbb, 0xc7, 0x34, 0xe1, 0x8e, 0xd9, 0x30, 0xf7, - 0xca, 0x6f, 0xd7, 0x9a, 0xb2, 0xc0, 0xe6, 0xa7, 0x61, 0x87, 0x0e, 0x1e, 0x48, 0xb1, 0x05, 0xce, - 0xff, 0xda, 0xd5, 0xc8, 0x2c, 0x16, 0xef, 0xa2, 0x32, 0x3f, 0x61, 0xa3, 0xa0, 0x7b, 0x3c, 0x8e, - 0x4e, 0xb8, 0x03, 0x1b, 0xfa, 0x1e, 0x24, 0x28, 0x73, 0xdd, 0x13, 0x1e, 0xfc, 0x3a, 0xb2, 0x8e, - 0x59, 0x94, 0x72, 0xa7, 0xd4, 0xd0, 0x45, 0x56, 0x79, 0xb1, 0xe6, 0xf4, 0x62, 0xcd, 0x83, 0xe8, - 0x8c, 0xc8, 0x10, 0xfc, 0x21, 0xaa, 0x24, 0xf2, 0x16, 0x81, 0x3c, 0x53, 0x15, 0x67, 0xea, 0xd3, - 0x4a, 0x72, 0x17, 0x6d, 0x67, 0x11, 0x64, 0x23, 0x59, 0xb0, 0xf0, 0x07, 0xe8, 0x2e, 0x4f, 0x13, - 0x1a, 0x0e, 0x59, 0xd4, 0x57, 0x25, 0x05, 0x9d, 0xac, 0xd4, 0x80, 0xb3, 0x27, 0xd4, 0xe9, 0x35, - 0xf4, 0x3d, 0x40, 0x9c, 0x59, 0x88, 0x2c, 0xb1, 0x95, 0x05, 0x1c, 0xb1, 0x27, 0xd4, 0x07, 0x10, - 0xd8, 0x96, 0x0f, 0xa0, 0x65, 0x17, 0x7c, 0x00, 0x0b, 0x76, 0xd1, 0x07, 0xb0, 0x68, 0x43, 0x1f, - 0x40, 0x64, 0x97, 0x7d, 0x00, 0xcb, 0xf6, 0x86, 0x0f, 0xe0, 0x86, 0x5d, 0xf1, 0x01, 0xac, 0xd8, - 0x55, 0xef, 0x37, 0x1d, 0xe1, 0xe5, 0x9a, 0xf0, 0x01, 0xaa, 0x76, 0x06, 0x71, 0xf7, 0x24, 0x98, - 0x75, 0x54, 0xbf, 0xb1, 0xa3, 0x15, 0x71, 0xe2, 0xc1, 0xb4, 0xad, 0x6f, 0x21, 0x3c, 0x4a, 0xe2, - 0xef, 0x68, 0x37, 0x65, 0x71, 0x14, 0xb0, 0xa8, 0x3b, 0x18, 0xf7, 0xe4, 0xcc, 0x20, 0xd9, 0x9c, - 0x2b, 0x87, 0x52, 0xc0, 0x6f, 0xa0, 0x05, 0x67, 0x30, 0xc8, 0xd2, 0xcb, 0x31, 0x96, 0x88, 0x3d, - 0x17, 0xc4, 0x6f, 0xb9, 0xf7, 0x1e, 0xb2, 0x8e, 0xd2, 0x30, 0xe5, 0xb8, 0x89, 0xb6, 0x1e, 0xd1, - 0xec, 0x87, 0xbd, 0x80, 0x45, 0x3d, 0x7a, 0x1a, 0x74, 0xce, 0x52, 0xca, 0x05, 0x34, 0x80, 0x6c, - 0x2a, 0xe9, 0x30, 0x53, 0x5a, 0x99, 0xe0, 0xfd, 0x69, 0xa2, 0xea, 0xf4, 0xba, 0x7c, 0x14, 0x47, - 0x9c, 0xe2, 0x3a, 0x2a, 0xfe, 0x10, 0x26, 0x11, 0x8b, 0xfa, 0xa2, 0xb8, 0x52, 0x5b, 0x23, 0x53, - 0x07, 0x7e, 0x73, 0x3a, 0x79, 0x73, 0xfd, 0xe4, 0xdb, 0xda, 0x74, 0xf6, 0xaf, 0x21, 0x8b, 0x67, - 0x55, 0x39, 0x40, 0x44, 0x57, 0x66, 0x33, 0xcf, 0x9c, 0x59, 0x98, 0x50, 0xf1, 0x21, 0xb2, 0xe7, - 0x13, 0xe6, 0xa2, 0x18, 0xc7, 0x12, 0x27, 0x5e, 0x9d, 0x9f, 0x50, 0xba, 0xac, 0x55, 0x8c, 0xb7, - 0xad, 0x91, 0x57, 0x78, 0xde, 0x9f, 0x4f, 0xa5, 0xf8, 0x2d, 0xac, 0x49, 0xb5, 0x40, 0x4a, 0x2e, - 0x95, 0x82, 0xfc, 0x5b, 0xb4, 0xb3, 0xc4, 0x1d, 0xe5, 0x29, 0x1b, 0x86, 0x29, 0x75, 0x8a, 0x22, - 0xe7, 0xee, 0x9a, 0x9c, 0x1f, 0xa9, 0xb0, 0xb6, 0x46, 0xb6, 0xf9, 0x6a, 0x09, 0xdf, 0x47, 0xd5, - 0x44, 0x75, 0x5c, 0x3d, 0x0c, 0x28, 0x72, 0xde, 0xbd, 0xfe, 0x30, 0x64, 0x8c, 0xa0, 0xb0, 0xad, - 0x91, 0x4a, 0xb2, 0xe8, 0x68, 0x41, 0x54, 0x48, 0x28, 0x1f, 0x0f, 0x52, 0x1f, 0x40, 0xdd, 0x36, - 0xbc, 0xcf, 0xd1, 0xd6, 0x8a, 0x73, 0xf8, 0x7d, 0x54, 0x7d, 0x3c, 0xce, 0xfc, 0xbd, 0x40, 0x30, - 0x39, 0xa5, 0x77, 0x36, 0x91, 0x56, 0xe6, 0x9d, 0x62, 0xab, 0x42, 0x85, 0x8f, 0x7b, 0xdb, 0xc8, - 0x12, 0x5f, 0xb8, 0x8a, 0x0c, 0xd6, 0x13, 0x24, 0x95, 0x88, 0xc1, 0x7a, 0xde, 0x4f, 0x06, 0xda, - 0x14, 0xf8, 0x7d, 0x16, 0x0e, 0xe7, 0xab, 0xaa, 0x26, 0x66, 0x9e, 0xa4, 0x82, 0x10, 0x93, 0x48, - 0x03, 0xdb, 0xc8, 0xa4, 0x51, 0x4f, 0x70, 0x60, 0x92, 0xec, 0x73, 0xbe, 0x43, 0xac, 0x9b, 0x77, - 0xc8, 0xe2, 0x22, 0x2b, 0x3c, 0xc7, 0x22, 0xab, 0x21, 0x6b, 0xc0, 0x86, 0x2c, 0x15, 0xe3, 0x32, - 0x89, 0x34, 0xf0, 0xbd, 0xeb, 0x1b, 0x49, 0x36, 0xde, 0xcd, 0xa5, 0x5c, 0xbc, 0xd3, 0x8a, 0xad, - 0x24, 0xdb, 0xed, 0x03, 0x68, 0xd8, 0xa6, 0xf7, 0x35, 0xba, 0xb3, 0xfa, 0xcc, 0x2d, 0x6c, 0x0d, - 0xef, 0x77, 0x1d, 0xe1, 0xc5, 0xec, 0xea, 0x91, 0xd6, 0x90, 0x15, 0x65, 0x0e, 0x91, 0xb0, 0x44, - 0xa4, 0x81, 0xeb, 0x08, 0xaa, 0x97, 0xca, 0x1d, 0x43, 0x08, 0x33, 0x7b, 0xde, 0x70, 0xf3, 0xe6, - 0x86, 0x7f, 0xbc, 0x04, 0x27, 0xc8, 0x03, 0xbf, 0x5c, 0x91, 0x6c, 0x52, 0x1e, 0x4f, 0xef, 0x4b, - 0xb4, 0xbd, 0x26, 0xf2, 0xa5, 0x90, 0xfc, 0xc5, 0x50, 0x3d, 0x79, 0x18, 0x0e, 0xc6, 0x39, 0xf4, - 0xc4, 0x9a, 0x54, 0x8c, 0x4a, 0x63, 0x0e, 0x24, 0x58, 0x01, 0xa4, 0xb5, 0x02, 0xc8, 0xc2, 0xf3, - 0x01, 0x59, 0x7c, 0x11, 0x20, 0xe1, 0x22, 0x90, 0xf7, 0xaf, 0x03, 0x59, 0x5a, 0xd1, 0xec, 0xdc, - 0x55, 0x57, 0x13, 0x69, 0xd8, 0xa6, 0x0f, 0xa0, 0x69, 0x03, 0xef, 0x1b, 0xd5, 0xf7, 0xe5, 0x43, - 0xb7, 0x81, 0xe4, 0x1f, 0x3a, 0xda, 0xca, 0xa5, 0x57, 0x4c, 0xde, 0x41, 0x85, 0xef, 0x85, 0x47, - 0x41, 0xa9, 0xac, 0x5b, 0xa3, 0xf2, 0x93, 0x35, 0x54, 0x36, 0x56, 0x36, 0xea, 0x7f, 0xb0, 0x7c, - 0x88, 0x9c, 0x75, 0xa1, 0x2f, 0xc3, 0x65, 0xeb, 0xe0, 0xfc, 0xd2, 0xd5, 0x2e, 0x2e, 0x5d, 0xed, - 0xe9, 0xa5, 0xab, 0x3d, 0xbb, 0x74, 0xf5, 0x1f, 0x27, 0xae, 0xfe, 0xeb, 0xc4, 0xd5, 0xcf, 0x27, - 0xae, 0x7e, 0x31, 0x71, 0xf5, 0xbf, 0x27, 0xae, 0xfe, 0xcf, 0xc4, 0xd5, 0x9e, 0x4d, 0x5c, 0xfd, - 0xe7, 0x2b, 0x57, 0xbb, 0xb8, 0x72, 0xb5, 0xa7, 0x57, 0xae, 0xf6, 0x55, 0x91, 0xa7, 0x71, 0x42, - 0x47, 0x9d, 0x4e, 0x41, 0x5c, 0xfc, 0x9d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x5d, 0xf1, - 0xb9, 0x55, 0x0a, 0x00, 0x00, +// ResourceAttrFilter specifies a resource attribute key:value pair for reverse lookup. +type ResourceAttrFilter struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (this *SeriesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } +func (m *ResourceAttrFilter) Reset() { *m = ResourceAttrFilter{} } +func (m *ResourceAttrFilter) String() string { return fmt.Sprintf("ResourceAttrFilter{Key:%s, Value:%s}", m.Key, m.Value) } +func (*ResourceAttrFilter) ProtoMessage() {} - that1, ok := that.(*SeriesRequest) - if !ok { - that2, ok := that.(SeriesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.MinTime != that1.MinTime { - return false - } - if this.MaxTime != that1.MaxTime { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(&that1.Matchers[i]) { - return false - } - } - if this.SkipChunks != that1.SkipChunks { - return false - } - if !this.Hints.Equal(that1.Hints) { - return false - } - if !this.RequestHints.Equal(that1.RequestHints) { - return false - } - if this.StreamingChunksBatchSize != that1.StreamingChunksBatchSize { - return false +func (m *ResourceAttrFilter) GetKey() string { + if m != nil { + return m.Key } - return true + return "" } -func (this *SeriesRequestHints) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*SeriesRequestHints) - if !ok { - that2, ok := that.(SeriesRequestHints) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.BlockMatchers) != len(that1.BlockMatchers) { - return false - } - for i := range this.BlockMatchers { - if !this.BlockMatchers[i].Equal(&that1.BlockMatchers[i]) { - return false - } - } - if this.ProjectionInclude != that1.ProjectionInclude { - return false - } - if len(this.ProjectionLabels) != len(that1.ProjectionLabels) { - return false - } - for i := range this.ProjectionLabels { - if this.ProjectionLabels[i] != that1.ProjectionLabels[i] { - return false - } +func (m *ResourceAttrFilter) GetValue() string { + if m != nil { + return m.Value } - return true + return "" } -func (this *Stats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Stats) - if !ok { - that2, ok := that.(Stats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FetchedIndexBytes != that1.FetchedIndexBytes { - return false +func (m *ResourceAttrFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return true + return dAtA[:n], nil } -func (this *SeriesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*SeriesResponse) - if !ok { - that2, ok := that.(SeriesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Result == nil { - if this.Result != nil { - return false - } - } else if this.Result == nil { - return false - } else if !this.Result.Equal(that1.Result) { - return false - } - return true +func (m *ResourceAttrFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *SeriesResponse_Warning) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*SeriesResponse_Warning) - if !ok { - that2, ok := that.(SeriesResponse_Warning) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false +func (m *ResourceAttrFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 } - if this.Warning != that1.Warning { - return false + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa } - return true + return len(dAtA) - i, nil } -func (this *SeriesResponse_Hints) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*SeriesResponse_Hints) - if !ok { - that2, ok := that.(SeriesResponse_Hints) - if ok { - that1 = &that2 - } else { - return false - } +func (m *ResourceAttrFilter) Size() (n int) { + if m == nil { + return 0 } - if that1 == nil { - return this == nil - } else if this == nil { - return false + l := len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) } - if !this.Hints.Equal(that1.Hints) { - return false + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) } - return true + return n } -func (this *SeriesResponse_Stats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*SeriesResponse_Stats) - if !ok { - that2, ok := that.(SeriesResponse_Stats) - if ok { - that1 = &that2 - } else { - return false +func (m *ResourceAttrFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttrFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttrFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Stats.Equal(that1.Stats) { - return false - } - return true -} -func (this *SeriesResponse_StreamingSeries) Equal(that interface{}) bool { - if that == nil { - return this == nil - } + return nil +} - that1, ok := that.(*SeriesResponse_StreamingSeries) +// ResourceAttributesRequest queries OTel resource attributes for series matching matchers. +type ResourceAttributesRequest struct { + Start int64 `protobuf:"varint,3,opt,name=start,proto3" json:"start,omitempty"` + End int64 `protobuf:"varint,4,opt,name=end,proto3" json:"end,omitempty"` + // hints is an opaque data structure that can be used to carry additional information. + Hints *types.Any `protobuf:"bytes,5,opt,name=hints,proto3" json:"hints,omitempty"` + Matchers []LabelMatcher `protobuf:"bytes,6,rep,name=matchers,proto3" json:"matchers"` + Limit int64 `protobuf:"varint,7,opt,name=limit,proto3" json:"limit,omitempty"` + ResourceAttrFilters []*ResourceAttrFilter `protobuf:"bytes,8,rep,name=resource_attr_filters,json=resourceAttrFilters,proto3" json:"resource_attr_filters,omitempty"` +} + +func (m *ResourceAttributesRequest) Reset() { *m = ResourceAttributesRequest{} } +func (*ResourceAttributesRequest) ProtoMessage() {} +func (*ResourceAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{14} +} +func (m *ResourceAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesRequest.Merge(m, src) +} +func (m *ResourceAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesRequest proto.InternalMessageInfo + +// ResourceAttributesResponse contains batches of series with their resource attributes. +type ResourceAttributesResponse struct { + Items []*ResourceAttributesSeriesData `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + /// hints is an opaque data structure that can be used to carry additional information from the store. + Hints *types.Any `protobuf:"bytes,3,opt,name=hints,proto3" json:"hints,omitempty"` +} + +func (m *ResourceAttributesResponse) Reset() { *m = ResourceAttributesResponse{} } +func (*ResourceAttributesResponse) ProtoMessage() {} +func (*ResourceAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{15} +} +func (m *ResourceAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesResponse.Merge(m, src) +} +func (m *ResourceAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesResponse proto.InternalMessageInfo + +// ResourceAttributesSeriesData contains resource data for a single series. +type ResourceAttributesSeriesData struct { + // The series labels as a map of name->value. + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Resource versions for this series. + Versions []*ResourceVersionData `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"` +} + +func (m *ResourceAttributesSeriesData) Reset() { *m = ResourceAttributesSeriesData{} } +func (*ResourceAttributesSeriesData) ProtoMessage() {} +func (*ResourceAttributesSeriesData) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{16} +} +func (m *ResourceAttributesSeriesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributesSeriesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceAttributesSeriesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceAttributesSeriesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributesSeriesData.Merge(m, src) +} +func (m *ResourceAttributesSeriesData) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributesSeriesData) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributesSeriesData.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributesSeriesData proto.InternalMessageInfo + +// ResourceVersionData represents a snapshot of resource data at a point in time. +type ResourceVersionData struct { + Identifying map[string]string `protobuf:"bytes,1,rep,name=identifying,proto3" json:"identifying,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Descriptive map[string]string `protobuf:"bytes,2,rep,name=descriptive,proto3" json:"descriptive,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Entities []*EntityData `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities,omitempty"` + MinTimeMs int64 `protobuf:"varint,4,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` + MaxTimeMs int64 `protobuf:"varint,5,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` +} + +func (m *ResourceVersionData) Reset() { *m = ResourceVersionData{} } +func (*ResourceVersionData) ProtoMessage() {} +func (*ResourceVersionData) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{17} +} +func (m *ResourceVersionData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceVersionData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceVersionData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceVersionData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceVersionData.Merge(m, src) +} +func (m *ResourceVersionData) XXX_Size() int { + return m.Size() +} +func (m *ResourceVersionData) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceVersionData.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceVersionData proto.InternalMessageInfo + +// EntityData represents a typed OTel entity. +type EntityData struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id map[string]string `protobuf:"bytes,2,rep,name=id,proto3" json:"id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Description map[string]string `protobuf:"bytes,3,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *EntityData) Reset() { *m = EntityData{} } +func (*EntityData) ProtoMessage() {} +func (*EntityData) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{18} +} +func (m *EntityData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntityData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntityData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntityData) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityData.Merge(m, src) +} +func (m *EntityData) XXX_Size() int { + return m.Size() +} +func (m *EntityData) XXX_DiscardUnknown() { + xxx_messageInfo_EntityData.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityData proto.InternalMessageInfo + +func init() { + proto.RegisterType((*SeriesRequest)(nil), "thanos.SeriesRequest") + proto.RegisterType((*SeriesRequestHints)(nil), "thanos.SeriesRequestHints") + proto.RegisterType((*Stats)(nil), "thanos.Stats") + proto.RegisterType((*SeriesResponse)(nil), "thanos.SeriesResponse") + proto.RegisterType((*SeriesResponseHints)(nil), "thanos.SeriesResponseHints") + proto.RegisterType((*Block)(nil), "thanos.Block") + proto.RegisterType((*LabelNamesRequest)(nil), "thanos.LabelNamesRequest") + proto.RegisterType((*LabelNamesRequestHints)(nil), "thanos.LabelNamesRequestHints") + proto.RegisterType((*LabelNamesResponse)(nil), "thanos.LabelNamesResponse") + proto.RegisterType((*LabelNamesResponseHints)(nil), "thanos.LabelNamesResponseHints") + proto.RegisterType((*LabelValuesRequest)(nil), "thanos.LabelValuesRequest") + proto.RegisterType((*LabelValuesRequestHints)(nil), "thanos.LabelValuesRequestHints") + proto.RegisterType((*LabelValuesResponse)(nil), "thanos.LabelValuesResponse") + proto.RegisterType((*LabelValuesResponseHints)(nil), "thanos.LabelValuesResponseHints") + proto.RegisterType((*ResourceAttributesRequest)(nil), "thanos.ResourceAttributesRequest") + proto.RegisterType((*ResourceAttributesResponse)(nil), "thanos.ResourceAttributesResponse") + proto.RegisterType((*ResourceAttributesSeriesData)(nil), "thanos.ResourceAttributesSeriesData") + proto.RegisterMapType((map[string]string)(nil), "thanos.ResourceAttributesSeriesData.LabelsEntry") + proto.RegisterType((*ResourceVersionData)(nil), "thanos.ResourceVersionData") + proto.RegisterMapType((map[string]string)(nil), "thanos.ResourceVersionData.DescriptiveEntry") + proto.RegisterMapType((map[string]string)(nil), "thanos.ResourceVersionData.IdentifyingEntry") + proto.RegisterType((*EntityData)(nil), "thanos.EntityData") + proto.RegisterMapType((map[string]string)(nil), "thanos.EntityData.DescriptionEntry") + proto.RegisterMapType((map[string]string)(nil), "thanos.EntityData.IdEntry") +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 1295 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x6f, 0x1b, 0xd5, + 0x17, 0x9e, 0xa7, 0x3d, 0x3e, 0xae, 0xfd, 0x9b, 0xde, 0x44, 0xad, 0xe3, 0x56, 0xd3, 0x68, 0x7e, + 0x20, 0x45, 0xa5, 0xb8, 0x08, 0x04, 0x85, 0x4a, 0x80, 0x92, 0x26, 0xe0, 0x58, 0xa4, 0x12, 0x53, + 0xe8, 0x82, 0x87, 0xac, 0xb1, 0x7d, 0x9b, 0x5c, 0x62, 0xcf, 0xb8, 0x73, 0xaf, 0x4b, 0xdd, 0x15, + 0x2b, 0x16, 0xac, 0xf8, 0x0b, 0x10, 0x1b, 0x24, 0xf6, 0xec, 0x58, 0xb0, 0x2e, 0x12, 0x8b, 0x2e, + 0xbb, 0x40, 0x88, 0xb8, 0x1b, 0x96, 0x5d, 0xb3, 0x42, 0xf7, 0x31, 0xf6, 0x38, 0xb6, 0x13, 0xd2, + 0x66, 0xc3, 0x6e, 0xee, 0x79, 0x7c, 0x3e, 0xe7, 0x3b, 0xdf, 0x3d, 0x9e, 0x81, 0x42, 0xd2, 0x6f, + 0xd7, 0xfa, 0x49, 0xcc, 0x62, 0x94, 0x63, 0x7b, 0x61, 0x14, 0xd3, 0xea, 0xf2, 0x6e, 0xbc, 0x1b, + 0x0b, 0xd3, 0x55, 0xfe, 0x24, 0xbd, 0xd5, 0x95, 0xdd, 0x38, 0xde, 0xed, 0xe2, 0xab, 0xe2, 0xd4, + 0x1a, 0xdc, 0xb9, 0x1a, 0x46, 0x43, 0xe5, 0x2a, 0xb2, 0x61, 0x1f, 0x53, 0x79, 0xf0, 0xff, 0x36, + 0xa0, 0x74, 0x0b, 0x27, 0x04, 0xd3, 0x00, 0xdf, 0x1d, 0x60, 0xca, 0xd0, 0x0a, 0x38, 0x3d, 0x12, + 0x35, 0x19, 0xe9, 0xe1, 0x8a, 0xbe, 0xaa, 0xaf, 0x99, 0x41, 0xbe, 0x47, 0xa2, 0x8f, 0x48, 0x0f, + 0x0b, 0x57, 0x78, 0x5f, 0xba, 0x0c, 0xe5, 0x0a, 0xef, 0x0b, 0xd7, 0x1b, 0xdc, 0xc5, 0xda, 0x7b, + 0x38, 0xa1, 0x15, 0x73, 0xd5, 0x5c, 0x2b, 0xbe, 0xba, 0x5c, 0x93, 0x05, 0xd6, 0x3e, 0x08, 0x5b, + 0xb8, 0xbb, 0x23, 0x9d, 0x1b, 0xd6, 0xc3, 0x3f, 0x2e, 0x69, 0xc1, 0x38, 0x16, 0x5d, 0x82, 0x22, + 0xdd, 0x27, 0xfd, 0x66, 0x7b, 0x6f, 0x10, 0xed, 0xd3, 0x8a, 0xb3, 0xaa, 0xaf, 0x39, 0x01, 0x70, + 0xd3, 0x0d, 0x61, 0x41, 0x97, 0xc1, 0xde, 0x23, 0x11, 0xa3, 0x95, 0xc2, 0xaa, 0x2e, 0x50, 0x65, + 0x63, 0xb5, 0xb4, 0xb1, 0xda, 0x7a, 0x34, 0x0c, 0x64, 0x08, 0x7a, 0x17, 0x4a, 0x89, 0xec, 0xa2, + 0x29, 0x73, 0xca, 0x22, 0xa7, 0x9a, 0x56, 0x32, 0xd5, 0x68, 0x9d, 0x47, 0x04, 0x67, 0x92, 0xcc, + 0x09, 0xbd, 0x0d, 0x17, 0x28, 0x4b, 0x70, 0xd8, 0x23, 0xd1, 0xae, 0x2a, 0xa9, 0xd9, 0xe2, 0xa5, + 0x36, 0x29, 0x79, 0x80, 0x2b, 0x9d, 0x55, 0x7d, 0xcd, 0x0a, 0x2a, 0xe3, 0x10, 0x59, 0xe2, 0x06, + 0x0f, 0xb8, 0x45, 0x1e, 0xe0, 0x86, 0xe5, 0x58, 0xae, 0xdd, 0xb0, 0x1c, 0xdb, 0xcd, 0x35, 0x2c, + 0x27, 0xe7, 0xe6, 0x1b, 0x96, 0x93, 0x77, 0x9d, 0x86, 0xe5, 0x80, 0x5b, 0x6c, 0x58, 0x4e, 0xd1, + 0x3d, 0xd3, 0xb0, 0x9c, 0x33, 0x6e, 0xa9, 0x61, 0x39, 0x25, 0xb7, 0xec, 0xff, 0xa4, 0x03, 0x9a, + 0xad, 0x09, 0xad, 0x43, 0xb9, 0xd5, 0x8d, 0xdb, 0xfb, 0xcd, 0x31, 0xa3, 0xfa, 0xb1, 0x8c, 0x96, + 0x44, 0xc6, 0x4e, 0x4a, 0xeb, 0xcb, 0x80, 0xfa, 0x49, 0xfc, 0x05, 0x6e, 0x33, 0x12, 0x47, 0x4d, + 0x12, 0xb5, 0xbb, 0x83, 0x8e, 0x9c, 0x99, 0x13, 0x9c, 0x9d, 0x78, 0xb6, 0xa5, 0x03, 0xbd, 0x04, + 0x19, 0x63, 0xb3, 0xcb, 0xe1, 0xe5, 0x18, 0x0b, 0x81, 0x3b, 0x71, 0x88, 0x9f, 0xa5, 0xfe, 0x35, + 0xb0, 0x6f, 0xb1, 0x90, 0x51, 0x54, 0x83, 0xa5, 0x3b, 0x98, 0xff, 0x60, 0xa7, 0x49, 0xa2, 0x0e, + 0xbe, 0xdf, 0x6c, 0x0d, 0x19, 0xa6, 0x42, 0x34, 0x56, 0x70, 0x56, 0xb9, 0xb6, 0xb9, 0x67, 0x83, + 0x3b, 0xfc, 0xdf, 0x4c, 0x28, 0xa7, 0xed, 0xd2, 0x7e, 0x1c, 0x51, 0x8c, 0xaa, 0x90, 0xff, 0x32, + 0x4c, 0x22, 0x12, 0xed, 0x8a, 0xe2, 0x0a, 0x75, 0x2d, 0x48, 0x0d, 0xe8, 0x4a, 0x3a, 0x79, 0x73, + 0xf1, 0xe4, 0xeb, 0x5a, 0x3a, 0xfb, 0x17, 0xc1, 0xa6, 0xbc, 0xaa, 0x8a, 0x25, 0xa2, 0x4b, 0xe3, + 0x99, 0x73, 0x23, 0x0f, 0x13, 0x5e, 0xb4, 0x0d, 0xee, 0x64, 0xc2, 0x54, 0x14, 0x53, 0xb1, 0x45, + 0xc6, 0xc5, 0x49, 0x86, 0xf2, 0xcb, 0x5a, 0xc5, 0x78, 0xeb, 0x5a, 0xf0, 0x3f, 0x3a, 0x6d, 0x9f, + 0x86, 0x52, 0xfa, 0xcd, 0x2d, 0x80, 0xca, 0x28, 0x65, 0x0a, 0x4a, 0x89, 0xfc, 0x73, 0x58, 0x99, + 0xd1, 0x1d, 0xa6, 0x8c, 0xf4, 0x42, 0x86, 0x2b, 0x79, 0x81, 0x79, 0x69, 0x01, 0xe6, 0x96, 0x0a, + 0xab, 0x6b, 0xc1, 0x79, 0x3a, 0xdf, 0x85, 0x36, 0xa1, 0x9c, 0x28, 0xc6, 0xd5, 0xc5, 0x70, 0x04, + 0xe6, 0x85, 0xc3, 0x17, 0x43, 0xc6, 0x08, 0x15, 0xd6, 0xb5, 0xa0, 0x94, 0x64, 0x0d, 0x1b, 0x0e, + 0xe4, 0x12, 0x4c, 0x07, 0x5d, 0xd6, 0xb0, 0x1c, 0xdd, 0x35, 0xfc, 0x0f, 0x61, 0x69, 0x4e, 0x1e, + 0xba, 0x0e, 0xe5, 0xbb, 0x03, 0x6e, 0xef, 0x34, 0x85, 0x26, 0x53, 0xf5, 0x8e, 0x27, 0xb2, 0xc1, + 0xad, 0xa9, 0x6c, 0x55, 0xa8, 0xb0, 0x51, 0xff, 0x3c, 0xd8, 0xe2, 0x09, 0x95, 0xc1, 0x20, 0x1d, + 0xa1, 0xa4, 0x42, 0x60, 0x90, 0x8e, 0xff, 0xb5, 0x01, 0x67, 0x85, 0xfc, 0x6e, 0x86, 0xbd, 0xc9, + 0xaa, 0x5a, 0x16, 0x33, 0x4f, 0x98, 0x50, 0x88, 0x19, 0xc8, 0x03, 0x72, 0xc1, 0xc4, 0x51, 0x47, + 0xe8, 0xc0, 0x0c, 0xf8, 0xe3, 0x64, 0x87, 0xd8, 0xc7, 0xef, 0x90, 0xec, 0x22, 0xcb, 0x9d, 0x60, + 0x91, 0x2d, 0x83, 0xdd, 0x25, 0x3d, 0xc2, 0xc4, 0xb8, 0xcc, 0x40, 0x1e, 0xd0, 0x8d, 0xc3, 0x1b, + 0x49, 0x12, 0xef, 0x4d, 0x41, 0x66, 0x7b, 0x9a, 0xb3, 0x95, 0x24, 0xdd, 0x0d, 0xcb, 0x31, 0x5c, + 0xd3, 0xff, 0x14, 0xce, 0xcd, 0xcf, 0x39, 0x85, 0xad, 0xe1, 0xff, 0xac, 0x03, 0xca, 0xa2, 0xab, + 0x4b, 0xba, 0x0c, 0x76, 0xc4, 0x0d, 0x02, 0xb0, 0x10, 0xc8, 0x03, 0xaa, 0x82, 0xa3, 0x6e, 0x2a, + 0xad, 0x18, 0xc2, 0x31, 0x3e, 0x4f, 0x08, 0x37, 0x8f, 0x27, 0xfc, 0xbd, 0x19, 0x71, 0x5a, 0xd3, + 0x82, 0x9f, 0xad, 0x48, 0x92, 0x34, 0x2d, 0x4f, 0xff, 0x63, 0x38, 0xbf, 0x20, 0xf2, 0xb9, 0x24, + 0xf9, 0x9d, 0xa1, 0x38, 0xb9, 0x1d, 0x76, 0x07, 0x53, 0xd2, 0x13, 0x6b, 0x52, 0x69, 0x54, 0x1e, + 0x26, 0x82, 0xb4, 0xe6, 0x08, 0xd2, 0x9e, 0x23, 0xc8, 0xdc, 0xc9, 0x04, 0x99, 0x7f, 0x16, 0x41, + 0x3a, 0x59, 0x41, 0x6e, 0x1e, 0x16, 0x64, 0x61, 0x0e, 0xd9, 0x53, 0xad, 0xce, 0x57, 0xa4, 0xe1, + 0x9a, 0x0d, 0xcb, 0x31, 0x5d, 0xcb, 0xff, 0x4c, 0xf1, 0x3e, 0x9b, 0x74, 0x1a, 0x92, 0xfc, 0x45, + 0x87, 0xa5, 0x29, 0x78, 0xa5, 0xc9, 0x73, 0x90, 0xbb, 0x27, 0x2c, 0x4a, 0x94, 0xea, 0x74, 0x6a, + 0xaa, 0x7c, 0x7f, 0x81, 0x2a, 0x57, 0xe7, 0x12, 0x75, 0x84, 0x2c, 0x6f, 0x43, 0x65, 0x51, 0xe8, + 0x73, 0xe9, 0xf2, 0x57, 0x1d, 0x56, 0x02, 0x4c, 0xe3, 0x41, 0xd2, 0xc6, 0xeb, 0x8c, 0x25, 0xa4, + 0x35, 0x60, 0xff, 0xa1, 0xcd, 0x38, 0xb5, 0xd4, 0xbe, 0xd7, 0xa1, 0x3a, 0xaf, 0x17, 0x35, 0xeb, + 0xeb, 0x60, 0x13, 0x86, 0x7b, 0x29, 0x3b, 0x2f, 0xa4, 0xbf, 0x3a, 0x9b, 0x22, 0xff, 0x8f, 0x36, + 0x43, 0x16, 0x06, 0x32, 0xe5, 0xb4, 0xf4, 0xe0, 0xff, 0xae, 0xc3, 0xc5, 0xa3, 0x7e, 0x0f, 0xd5, + 0x21, 0xa7, 0xde, 0x9b, 0x64, 0x95, 0xaf, 0xfc, 0x9b, 0x2a, 0x25, 0x71, 0x74, 0x2b, 0x62, 0xc9, + 0x30, 0x50, 0xf9, 0xe8, 0x1a, 0x38, 0xf7, 0x70, 0x42, 0x49, 0x1c, 0xc9, 0x92, 0x33, 0xff, 0xd3, + 0x29, 0xd6, 0x6d, 0xe9, 0x17, 0x8d, 0x8e, 0x83, 0xab, 0x6f, 0x41, 0x31, 0x83, 0xc7, 0xa7, 0xbd, + 0x8f, 0x87, 0x6a, 0x41, 0xf1, 0x47, 0x3e, 0x09, 0x71, 0x4d, 0xe4, 0xbb, 0x56, 0x20, 0x0f, 0xd7, + 0x8d, 0x37, 0x75, 0xff, 0x07, 0x13, 0x96, 0xe6, 0x80, 0xa3, 0x9b, 0x50, 0x24, 0x1d, 0x1c, 0x31, + 0x72, 0x67, 0xc8, 0xdf, 0xd1, 0x64, 0x6b, 0x57, 0x8e, 0x28, 0xa7, 0xb6, 0x3d, 0x09, 0x97, 0x6d, + 0x65, 0x01, 0x38, 0x5e, 0x07, 0xd3, 0x76, 0x42, 0xfa, 0x8c, 0xdc, 0xc3, 0xaa, 0xbd, 0x23, 0xf1, + 0x36, 0x27, 0xe1, 0x0a, 0x2f, 0x03, 0x80, 0x6a, 0xe0, 0x70, 0x70, 0xc6, 0x5f, 0xe3, 0xe4, 0x67, + 0x07, 0x4a, 0xc1, 0xb6, 0xb8, 0x7d, 0x28, 0x29, 0x4a, 0x63, 0x90, 0x07, 0xc5, 0xf4, 0xe3, 0xa6, + 0xd9, 0xa3, 0xea, 0x26, 0x14, 0xd4, 0xf7, 0xcd, 0x8e, 0xf4, 0xab, 0x2f, 0x1c, 0xee, 0xb7, 0x95, + 0x5f, 0x7e, 0xe4, 0xec, 0xd0, 0xea, 0x3b, 0xe0, 0x1e, 0x6e, 0xf0, 0x24, 0x3c, 0xf3, 0xfc, 0xc3, + 0x0d, 0x9d, 0x68, 0x4e, 0xdf, 0x18, 0x00, 0x93, 0xc6, 0x10, 0x02, 0x8b, 0x7f, 0xcc, 0xa9, 0x5c, + 0xf1, 0x8c, 0x2e, 0x8b, 0x57, 0x27, 0xc9, 0x6c, 0x75, 0x96, 0x8c, 0xda, 0x76, 0x47, 0xf2, 0x68, + 0x90, 0x0e, 0xda, 0xca, 0x8c, 0x23, 0x8e, 0x14, 0x83, 0xff, 0x9f, 0x93, 0xb4, 0x39, 0x89, 0x3a, + 0x3c, 0x85, 0x38, 0xaa, 0xbe, 0x0e, 0x79, 0x85, 0xfa, 0xcc, 0x64, 0x28, 0xdc, 0x93, 0xe4, 0x6f, + 0xac, 0x3f, 0x3c, 0xf0, 0xb4, 0x47, 0x07, 0x9e, 0xf6, 0xf8, 0xc0, 0xd3, 0x9e, 0x1e, 0x78, 0xfa, + 0x57, 0x23, 0x4f, 0xff, 0x71, 0xe4, 0xe9, 0x0f, 0x47, 0x9e, 0xfe, 0x68, 0xe4, 0xe9, 0x7f, 0x8e, + 0x3c, 0xfd, 0xaf, 0x91, 0xa7, 0x3d, 0x1d, 0x79, 0xfa, 0xb7, 0x4f, 0x3c, 0xed, 0xd1, 0x13, 0x4f, + 0x7b, 0xfc, 0xc4, 0xd3, 0x3e, 0xc9, 0x53, 0x16, 0x27, 0xb8, 0xdf, 0x6a, 0xe5, 0xc4, 0x5d, 0x7f, + 0xed, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x3f, 0xc9, 0x73, 0x58, 0x0f, 0x00, 0x00, +} + +func (this *SeriesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesRequest) if !ok { - that2, ok := that.(SeriesResponse_StreamingSeries) + that2, ok := that.(SeriesRequest) if ok { that1 = &that2 } else { @@ -1049,19 +1256,232 @@ func (this *SeriesResponse_StreamingSeries) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.StreamingSeries.Equal(that1.StreamingSeries) { + if this.MinTime != that1.MinTime { + return false + } + if this.MaxTime != that1.MaxTime { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(&that1.Matchers[i]) { + return false + } + } + if this.SkipChunks != that1.SkipChunks { + return false + } + if !this.Hints.Equal(that1.Hints) { + return false + } + if !this.RequestHints.Equal(that1.RequestHints) { + return false + } + if this.StreamingChunksBatchSize != that1.StreamingChunksBatchSize { return false } return true } -func (this *SeriesResponse_StreamingChunks) Equal(that interface{}) bool { +func (this *SeriesRequestHints) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SeriesResponse_StreamingChunks) + that1, ok := that.(*SeriesRequestHints) if !ok { - that2, ok := that.(SeriesResponse_StreamingChunks) + that2, ok := that.(SeriesRequestHints) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.BlockMatchers) != len(that1.BlockMatchers) { + return false + } + for i := range this.BlockMatchers { + if !this.BlockMatchers[i].Equal(&that1.BlockMatchers[i]) { + return false + } + } + if this.ProjectionInclude != that1.ProjectionInclude { + return false + } + if len(this.ProjectionLabels) != len(that1.ProjectionLabels) { + return false + } + for i := range this.ProjectionLabels { + if this.ProjectionLabels[i] != that1.ProjectionLabels[i] { + return false + } + } + return true +} +func (this *Stats) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Stats) + if !ok { + that2, ok := that.(Stats) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FetchedIndexBytes != that1.FetchedIndexBytes { + return false + } + return true +} +func (this *SeriesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse) + if !ok { + that2, ok := that.(SeriesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Result == nil { + if this.Result != nil { + return false + } + } else if this.Result == nil { + return false + } else if !this.Result.Equal(that1.Result) { + return false + } + return true +} +func (this *SeriesResponse_Warning) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_Warning) + if !ok { + that2, ok := that.(SeriesResponse_Warning) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Warning != that1.Warning { + return false + } + return true +} +func (this *SeriesResponse_Hints) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_Hints) + if !ok { + that2, ok := that.(SeriesResponse_Hints) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Hints.Equal(that1.Hints) { + return false + } + return true +} +func (this *SeriesResponse_Stats) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_Stats) + if !ok { + that2, ok := that.(SeriesResponse_Stats) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Stats.Equal(that1.Stats) { + return false + } + return true +} +func (this *SeriesResponse_StreamingSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_StreamingSeries) + if !ok { + that2, ok := that.(SeriesResponse_StreamingSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamingSeries.Equal(that1.StreamingSeries) { + return false + } + return true +} +func (this *SeriesResponse_StreamingChunks) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_StreamingChunks) + if !ok { + that2, ok := that.(SeriesResponse_StreamingChunks) if ok { that1 = &that2 } else { @@ -1472,53 +1892,262 @@ func (this *LabelValuesResponseHints) Equal(that interface{}) bool { } return true } -func (this *SeriesRequest) GoString() string { - if this == nil { - return "nil" +func (this *ResourceAttributesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 11) - s = append(s, "&storepb.SeriesRequest{") - s = append(s, "MinTime: "+fmt.Sprintf("%#v", this.MinTime)+",\n") - s = append(s, "MaxTime: "+fmt.Sprintf("%#v", this.MaxTime)+",\n") - if this.Matchers != nil { - vs := make([]LabelMatcher, len(this.Matchers)) - for i := range vs { - vs[i] = this.Matchers[i] + + that1, ok := that.(*ResourceAttributesRequest) + if !ok { + that2, ok := that.(ResourceAttributesRequest) + if ok { + that1 = &that2 + } else { + return false } - s = append(s, "Matchers: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "SkipChunks: "+fmt.Sprintf("%#v", this.SkipChunks)+",\n") - if this.Hints != nil { - s = append(s, "Hints: "+fmt.Sprintf("%#v", this.Hints)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - if this.RequestHints != nil { - s = append(s, "RequestHints: "+fmt.Sprintf("%#v", this.RequestHints)+",\n") + if this.Start != that1.Start { + return false } - s = append(s, "StreamingChunksBatchSize: "+fmt.Sprintf("%#v", this.StreamingChunksBatchSize)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SeriesRequestHints) GoString() string { - if this == nil { - return "nil" + if this.End != that1.End { + return false } - s := make([]string, 0, 7) - s = append(s, "&storepb.SeriesRequestHints{") - if this.BlockMatchers != nil { - vs := make([]LabelMatcher, len(this.BlockMatchers)) - for i := range vs { - vs[i] = this.BlockMatchers[i] + if !this.Hints.Equal(that1.Hints) { + return false + } + if len(this.Matchers) != len(that1.Matchers) { + return false + } + for i := range this.Matchers { + if !this.Matchers[i].Equal(&that1.Matchers[i]) { + return false } - s = append(s, "BlockMatchers: "+fmt.Sprintf("%#v", vs)+",\n") } - s = append(s, "ProjectionInclude: "+fmt.Sprintf("%#v", this.ProjectionInclude)+",\n") - s = append(s, "ProjectionLabels: "+fmt.Sprintf("%#v", this.ProjectionLabels)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Stats) GoString() string { - if this == nil { - return "nil" + if this.Limit != that1.Limit { + return false + } + return true +} +func (this *ResourceAttributesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceAttributesResponse) + if !ok { + that2, ok := that.(ResourceAttributesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Items) != len(that1.Items) { + return false + } + for i := range this.Items { + if !this.Items[i].Equal(that1.Items[i]) { + return false + } + } + if len(this.Warnings) != len(that1.Warnings) { + return false + } + for i := range this.Warnings { + if this.Warnings[i] != that1.Warnings[i] { + return false + } + } + if !this.Hints.Equal(that1.Hints) { + return false + } + return true +} +func (this *ResourceAttributesSeriesData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceAttributesSeriesData) + if !ok { + that2, ok := that.(ResourceAttributesSeriesData) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if this.Labels[i] != that1.Labels[i] { + return false + } + } + if len(this.Versions) != len(that1.Versions) { + return false + } + for i := range this.Versions { + if !this.Versions[i].Equal(that1.Versions[i]) { + return false + } + } + return true +} +func (this *ResourceVersionData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceVersionData) + if !ok { + that2, ok := that.(ResourceVersionData) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Identifying) != len(that1.Identifying) { + return false + } + for i := range this.Identifying { + if this.Identifying[i] != that1.Identifying[i] { + return false + } + } + if len(this.Descriptive) != len(that1.Descriptive) { + return false + } + for i := range this.Descriptive { + if this.Descriptive[i] != that1.Descriptive[i] { + return false + } + } + if len(this.Entities) != len(that1.Entities) { + return false + } + for i := range this.Entities { + if !this.Entities[i].Equal(that1.Entities[i]) { + return false + } + } + if this.MinTimeMs != that1.MinTimeMs { + return false + } + if this.MaxTimeMs != that1.MaxTimeMs { + return false + } + return true +} +func (this *EntityData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EntityData) + if !ok { + that2, ok := that.(EntityData) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if len(this.Id) != len(that1.Id) { + return false + } + for i := range this.Id { + if this.Id[i] != that1.Id[i] { + return false + } + } + if len(this.Description) != len(that1.Description) { + return false + } + for i := range this.Description { + if this.Description[i] != that1.Description[i] { + return false + } + } + return true +} +func (this *SeriesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&storepb.SeriesRequest{") + s = append(s, "MinTime: "+fmt.Sprintf("%#v", this.MinTime)+",\n") + s = append(s, "MaxTime: "+fmt.Sprintf("%#v", this.MaxTime)+",\n") + if this.Matchers != nil { + vs := make([]LabelMatcher, len(this.Matchers)) + for i := range vs { + vs[i] = this.Matchers[i] + } + s = append(s, "Matchers: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "SkipChunks: "+fmt.Sprintf("%#v", this.SkipChunks)+",\n") + if this.Hints != nil { + s = append(s, "Hints: "+fmt.Sprintf("%#v", this.Hints)+",\n") + } + if this.RequestHints != nil { + s = append(s, "RequestHints: "+fmt.Sprintf("%#v", this.RequestHints)+",\n") + } + s = append(s, "StreamingChunksBatchSize: "+fmt.Sprintf("%#v", this.StreamingChunksBatchSize)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SeriesRequestHints) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&storepb.SeriesRequestHints{") + if this.BlockMatchers != nil { + vs := make([]LabelMatcher, len(this.BlockMatchers)) + for i := range vs { + vs[i] = this.BlockMatchers[i] + } + s = append(s, "BlockMatchers: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "ProjectionInclude: "+fmt.Sprintf("%#v", this.ProjectionInclude)+",\n") + s = append(s, "ProjectionLabels: "+fmt.Sprintf("%#v", this.ProjectionLabels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Stats) GoString() string { + if this == nil { + return "nil" } s := make([]string, 0, 5) s = append(s, "&storepb.Stats{") @@ -1769,6 +2398,145 @@ func (this *LabelValuesResponseHints) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ResourceAttributesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&storepb.ResourceAttributesRequest{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + if this.Hints != nil { + s = append(s, "Hints: "+fmt.Sprintf("%#v", this.Hints)+",\n") + } + if this.Matchers != nil { + vs := make([]LabelMatcher, len(this.Matchers)) + for i := range vs { + vs[i] = this.Matchers[i] + } + s = append(s, "Matchers: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceAttributesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&storepb.ResourceAttributesResponse{") + if this.Items != nil { + s = append(s, "Items: "+fmt.Sprintf("%#v", this.Items)+",\n") + } + s = append(s, "Warnings: "+fmt.Sprintf("%#v", this.Warnings)+",\n") + if this.Hints != nil { + s = append(s, "Hints: "+fmt.Sprintf("%#v", this.Hints)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceAttributesSeriesData) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&storepb.ResourceAttributesSeriesData{") + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + if this.Labels != nil { + s = append(s, "Labels: "+mapStringForLabels+",\n") + } + if this.Versions != nil { + s = append(s, "Versions: "+fmt.Sprintf("%#v", this.Versions)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceVersionData) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&storepb.ResourceVersionData{") + keysForIdentifying := make([]string, 0, len(this.Identifying)) + for k, _ := range this.Identifying { + keysForIdentifying = append(keysForIdentifying, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForIdentifying) + mapStringForIdentifying := "map[string]string{" + for _, k := range keysForIdentifying { + mapStringForIdentifying += fmt.Sprintf("%#v: %#v,", k, this.Identifying[k]) + } + mapStringForIdentifying += "}" + if this.Identifying != nil { + s = append(s, "Identifying: "+mapStringForIdentifying+",\n") + } + keysForDescriptive := make([]string, 0, len(this.Descriptive)) + for k, _ := range this.Descriptive { + keysForDescriptive = append(keysForDescriptive, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescriptive) + mapStringForDescriptive := "map[string]string{" + for _, k := range keysForDescriptive { + mapStringForDescriptive += fmt.Sprintf("%#v: %#v,", k, this.Descriptive[k]) + } + mapStringForDescriptive += "}" + if this.Descriptive != nil { + s = append(s, "Descriptive: "+mapStringForDescriptive+",\n") + } + if this.Entities != nil { + s = append(s, "Entities: "+fmt.Sprintf("%#v", this.Entities)+",\n") + } + s = append(s, "MinTimeMs: "+fmt.Sprintf("%#v", this.MinTimeMs)+",\n") + s = append(s, "MaxTimeMs: "+fmt.Sprintf("%#v", this.MaxTimeMs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EntityData) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&storepb.EntityData{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + keysForId := make([]string, 0, len(this.Id)) + for k, _ := range this.Id { + keysForId = append(keysForId, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForId) + mapStringForId := "map[string]string{" + for _, k := range keysForId { + mapStringForId += fmt.Sprintf("%#v: %#v,", k, this.Id[k]) + } + mapStringForId += "}" + if this.Id != nil { + s = append(s, "Id: "+mapStringForId+",\n") + } + keysForDescription := make([]string, 0, len(this.Description)) + for k, _ := range this.Description { + keysForDescription = append(keysForDescription, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + mapStringForDescription := "map[string]string{" + for _, k := range keysForDescription { + mapStringForDescription += fmt.Sprintf("%#v: %#v,", k, this.Description[k]) + } + mapStringForDescription += "}" + if this.Description != nil { + s = append(s, "Description: "+mapStringForDescription+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringRpc(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2625,149 +3393,494 @@ func (m *LabelValuesResponseHints) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - offset -= sovRpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ResourceAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *SeriesRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ResourceAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.MinTime != 0 { - n += 1 + sovRpc(uint64(m.MinTime)) + if len(m.ResourceAttrFilters) > 0 { + for iNdEx := len(m.ResourceAttrFilters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceAttrFilters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } } - if m.MaxTime != 0 { - n += 1 + sovRpc(uint64(m.MaxTime)) + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 } if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if m.SkipChunks { - n += 2 - } if m.Hints != nil { - l = m.Hints.Size() - n += 1 + l + sovRpc(uint64(l)) + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.RequestHints != nil { - l = m.RequestHints.Size() - n += 1 + l + sovRpc(uint64(l)) + if m.End != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x20 } - if m.StreamingChunksBatchSize != 0 { - n += 2 + sovRpc(uint64(m.StreamingChunksBatchSize)) + if m.Start != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x18 } - return n + return len(dAtA) - i, nil } -func (m *SeriesRequestHints) Size() (n int) { - if m == nil { - return 0 +func (m *ResourceAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResourceAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.BlockMatchers) > 0 { - for _, e := range m.BlockMatchers { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.ProjectionInclude { - n += 2 + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if len(m.ProjectionLabels) > 0 { - for _, s := range m.ProjectionLabels { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *Stats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FetchedIndexBytes != 0 { - n += 1 + sovRpc(uint64(m.FetchedIndexBytes)) +func (m *ResourceAttributesSeriesData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SeriesResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourceAttributesSeriesData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributesSeriesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Result != nil { - n += m.Result.Size() + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - return n + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRpc(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *SeriesResponse_Warning) Size() (n int) { - if m == nil { - return 0 +func (m *ResourceVersionData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.Warning) - n += 1 + l + sovRpc(uint64(l)) - return n + return dAtA[:n], nil } -func (m *SeriesResponse_Hints) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ResourceVersionData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceVersionData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Hints != nil { - l = m.Hints.Size() - n += 1 + l + sovRpc(uint64(l)) + if m.MaxTimeMs != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTimeMs)) + i-- + dAtA[i] = 0x28 } - return n -} -func (m *SeriesResponse_Stats) Size() (n int) { - if m == nil { - return 0 + if m.MinTimeMs != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTimeMs)) + i-- + dAtA[i] = 0x20 } - var l int - _ = l - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovRpc(uint64(l)) + if len(m.Entities) > 0 { + for iNdEx := len(m.Entities) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entities[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - return n -} -func (m *SeriesResponse_StreamingSeries) Size() (n int) { - if m == nil { - return 0 + if len(m.Descriptive) > 0 { + for k := range m.Descriptive { + v := m.Descriptive[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRpc(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - var l int - _ = l - if m.StreamingSeries != nil { - l = m.StreamingSeries.Size() - n += 1 + l + sovRpc(uint64(l)) + if len(m.Identifying) > 0 { + for k := range m.Identifying { + v := m.Identifying[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRpc(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *SeriesResponse_StreamingChunks) Size() (n int) { - if m == nil { - return 0 + +func (m *EntityData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntityData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EntityData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + for k := range m.Description { + v := m.Description[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRpc(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Id) > 0 { + for k := range m.Id { + v := m.Id[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRpc(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRpc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRpc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + offset -= sovRpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SeriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.SkipChunks { + n += 2 + } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RequestHints != nil { + l = m.RequestHints.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.StreamingChunksBatchSize != 0 { + n += 2 + sovRpc(uint64(m.StreamingChunksBatchSize)) + } + return n +} + +func (m *SeriesRequestHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for _, e := range m.BlockMatchers { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.ProjectionInclude { + n += 2 + } + if len(m.ProjectionLabels) > 0 { + for _, s := range m.ProjectionLabels { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *Stats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FetchedIndexBytes != 0 { + n += 1 + sovRpc(uint64(m.FetchedIndexBytes)) + } + return n +} + +func (m *SeriesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + n += m.Result.Size() + } + return n +} + +func (m *SeriesResponse_Warning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Warning) + n += 1 + l + sovRpc(uint64(l)) + return n +} +func (m *SeriesResponse_Hints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *SeriesResponse_Stats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *SeriesResponse_StreamingSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingSeries != nil { + l = m.StreamingSeries.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *SeriesResponse_StreamingChunks) Size() (n int) { + if m == nil { + return 0 } var l int _ = l @@ -3015,66 +4128,214 @@ func (m *LabelValuesResponseHints) Size() (n int) { return n } -func sovRpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *SeriesRequest) String() string { - if this == nil { - return "nil" +func (m *ResourceAttributesRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForMatchers := "[]LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += fmt.Sprintf("%v", f) + "," + var l int + _ = l + if m.Start != 0 { + n += 1 + sovRpc(uint64(m.Start)) } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&SeriesRequest{`, - `MinTime:` + fmt.Sprintf("%v", this.MinTime) + `,`, - `MaxTime:` + fmt.Sprintf("%v", this.MaxTime) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `SkipChunks:` + fmt.Sprintf("%v", this.SkipChunks) + `,`, - `Hints:` + strings.Replace(fmt.Sprintf("%v", this.Hints), "Any", "types.Any", 1) + `,`, - `RequestHints:` + strings.Replace(this.RequestHints.String(), "SeriesRequestHints", "SeriesRequestHints", 1) + `,`, - `StreamingChunksBatchSize:` + fmt.Sprintf("%v", this.StreamingChunksBatchSize) + `,`, - `}`, - }, "") - return s -} -func (this *SeriesRequestHints) String() string { - if this == nil { - return "nil" + if m.End != 0 { + n += 1 + sovRpc(uint64(m.End)) } - repeatedStringForBlockMatchers := "[]LabelMatcher{" - for _, f := range this.BlockMatchers { - repeatedStringForBlockMatchers += fmt.Sprintf("%v", f) + "," + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) } - repeatedStringForBlockMatchers += "}" - s := strings.Join([]string{`&SeriesRequestHints{`, - `BlockMatchers:` + repeatedStringForBlockMatchers + `,`, - `ProjectionInclude:` + fmt.Sprintf("%v", this.ProjectionInclude) + `,`, - `ProjectionLabels:` + fmt.Sprintf("%v", this.ProjectionLabels) + `,`, - `}`, - }, "") - return s -} -func (this *Stats) String() string { - if this == nil { - return "nil" + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } } - s := strings.Join([]string{`&Stats{`, - `FetchedIndexBytes:` + fmt.Sprintf("%v", this.FetchedIndexBytes) + `,`, - `}`, - }, "") - return s -} -func (this *SeriesResponse) String() string { - if this == nil { - return "nil" + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) } - s := strings.Join([]string{`&SeriesResponse{`, - `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + if len(m.ResourceAttrFilters) > 0 { + for _, e := range m.ResourceAttrFilters { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *ResourceAttributesSeriesData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + 1 + len(v) + sovRpc(uint64(len(v))) + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *ResourceVersionData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Identifying) > 0 { + for k, v := range m.Identifying { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + 1 + len(v) + sovRpc(uint64(len(v))) + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + if len(m.Descriptive) > 0 { + for k, v := range m.Descriptive { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + 1 + len(v) + sovRpc(uint64(len(v))) + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + if len(m.Entities) > 0 { + for _, e := range m.Entities { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.MinTimeMs != 0 { + n += 1 + sovRpc(uint64(m.MinTimeMs)) + } + if m.MaxTimeMs != 0 { + n += 1 + sovRpc(uint64(m.MaxTimeMs)) + } + return n +} + +func (m *EntityData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Id) > 0 { + for k, v := range m.Id { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + 1 + len(v) + sovRpc(uint64(len(v))) + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRpc(uint64(len(k))) + 1 + len(v) + sovRpc(uint64(len(v))) + n += mapEntrySize + 1 + sovRpc(uint64(mapEntrySize)) + } + } + return n +} + +func sovRpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SeriesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&SeriesRequest{`, + `MinTime:` + fmt.Sprintf("%v", this.MinTime) + `,`, + `MaxTime:` + fmt.Sprintf("%v", this.MaxTime) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `SkipChunks:` + fmt.Sprintf("%v", this.SkipChunks) + `,`, + `Hints:` + strings.Replace(fmt.Sprintf("%v", this.Hints), "Any", "types.Any", 1) + `,`, + `RequestHints:` + strings.Replace(this.RequestHints.String(), "SeriesRequestHints", "SeriesRequestHints", 1) + `,`, + `StreamingChunksBatchSize:` + fmt.Sprintf("%v", this.StreamingChunksBatchSize) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesRequestHints) String() string { + if this == nil { + return "nil" + } + repeatedStringForBlockMatchers := "[]LabelMatcher{" + for _, f := range this.BlockMatchers { + repeatedStringForBlockMatchers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBlockMatchers += "}" + s := strings.Join([]string{`&SeriesRequestHints{`, + `BlockMatchers:` + repeatedStringForBlockMatchers + `,`, + `ProjectionInclude:` + fmt.Sprintf("%v", this.ProjectionInclude) + `,`, + `ProjectionLabels:` + fmt.Sprintf("%v", this.ProjectionLabels) + `,`, + `}`, + }, "") + return s +} +func (this *Stats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Stats{`, + `FetchedIndexBytes:` + fmt.Sprintf("%v", this.FetchedIndexBytes) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesResponse{`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, `}`, }, "") return s @@ -3301,6 +4562,139 @@ func (this *LabelValuesResponseHints) String() string { }, "") return s } +func (this *ResourceAttributesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchers := "[]LabelMatcher{" + for _, f := range this.Matchers { + repeatedStringForMatchers += fmt.Sprintf("%v", f) + "," + } + repeatedStringForMatchers += "}" + s := strings.Join([]string{`&ResourceAttributesRequest{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `Hints:` + strings.Replace(fmt.Sprintf("%v", this.Hints), "Any", "types.Any", 1) + `,`, + `Matchers:` + repeatedStringForMatchers + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]*ResourceAttributesSeriesData{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(f.String(), "ResourceAttributesSeriesData", "ResourceAttributesSeriesData", 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceAttributesResponse{`, + `Items:` + repeatedStringForItems + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `Hints:` + strings.Replace(fmt.Sprintf("%v", this.Hints), "Any", "types.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributesSeriesData) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]*ResourceVersionData{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(f.String(), "ResourceVersionData", "ResourceVersionData", 1) + "," + } + repeatedStringForVersions += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ResourceAttributesSeriesData{`, + `Labels:` + mapStringForLabels + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceVersionData) String() string { + if this == nil { + return "nil" + } + repeatedStringForEntities := "[]*EntityData{" + for _, f := range this.Entities { + repeatedStringForEntities += strings.Replace(f.String(), "EntityData", "EntityData", 1) + "," + } + repeatedStringForEntities += "}" + keysForIdentifying := make([]string, 0, len(this.Identifying)) + for k, _ := range this.Identifying { + keysForIdentifying = append(keysForIdentifying, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForIdentifying) + mapStringForIdentifying := "map[string]string{" + for _, k := range keysForIdentifying { + mapStringForIdentifying += fmt.Sprintf("%v: %v,", k, this.Identifying[k]) + } + mapStringForIdentifying += "}" + keysForDescriptive := make([]string, 0, len(this.Descriptive)) + for k, _ := range this.Descriptive { + keysForDescriptive = append(keysForDescriptive, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescriptive) + mapStringForDescriptive := "map[string]string{" + for _, k := range keysForDescriptive { + mapStringForDescriptive += fmt.Sprintf("%v: %v,", k, this.Descriptive[k]) + } + mapStringForDescriptive += "}" + s := strings.Join([]string{`&ResourceVersionData{`, + `Identifying:` + mapStringForIdentifying + `,`, + `Descriptive:` + mapStringForDescriptive + `,`, + `Entities:` + repeatedStringForEntities + `,`, + `MinTimeMs:` + fmt.Sprintf("%v", this.MinTimeMs) + `,`, + `MaxTimeMs:` + fmt.Sprintf("%v", this.MaxTimeMs) + `,`, + `}`, + }, "") + return s +} +func (this *EntityData) String() string { + if this == nil { + return "nil" + } + keysForId := make([]string, 0, len(this.Id)) + for k, _ := range this.Id { + keysForId = append(keysForId, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForId) + mapStringForId := "map[string]string{" + for _, k := range keysForId { + mapStringForId += fmt.Sprintf("%v: %v,", k, this.Id[k]) + } + mapStringForId += "}" + keysForDescription := make([]string, 0, len(this.Description)) + for k, _ := range this.Description { + keysForDescription = append(keysForDescription, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + mapStringForDescription := "map[string]string{" + for _, k := range keysForDescription { + mapStringForDescription += fmt.Sprintf("%v: %v,", k, this.Description[k]) + } + mapStringForDescription += "}" + s := strings.Join([]string{`&EntityData{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Id:` + mapStringForId + `,`, + `Description:` + mapStringForDescription + `,`, + `}`, + }, "") + return s +} func valueToStringRpc(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -3319,30 +4713,535 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowRpc } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipChunks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipChunks = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestHints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestHints == nil { + m.RequestHints = &SeriesRequestHints{} + } + if err := m.RequestHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksBatchSize", wireType) + } + m.StreamingChunksBatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamingChunksBatchSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequestHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockMatchers = append(m.BlockMatchers, LabelMatcher{}) + if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectionInclude", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProjectionInclude = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectionLabels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectionLabels = append(m.ProjectionLabels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Stats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FetchedIndexBytes", wireType) + } + m.FetchedIndexBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FetchedIndexBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = &SeriesResponse_Warning{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &types.Any{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + m.Result = &SeriesResponse_Hints{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) } - m.MinTime = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3352,16 +5251,32 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinTime |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + if msglen < 0 { + return ErrInvalidLengthRpc } - m.MaxTime = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Stats{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_Stats{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeries", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3371,14 +5286,30 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxTime |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamingSeriesBatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_StreamingSeries{v} + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3405,16 +5336,17 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &StreamingChunksBatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Result = &SeriesResponse_StreamingChunks{v} iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipChunks", wireType) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksEstimate", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3424,15 +5356,30 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.SkipChunks = bool(v != 0) - case 9: + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamingChunksEstimate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_StreamingChunksEstimate{v} + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResponseHints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3459,16 +5406,65 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Hints == nil { - m.Hints = &types.Any{} - } - if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &SeriesResponseHints{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Result = &SeriesResponse_ResponseHints{v} iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesResponseHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestHints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QueriedBlocks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3495,18 +5491,66 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestHints == nil { - m.RequestHints = &SeriesRequestHints{} - } - if err := m.RequestHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.QueriedBlocks = append(m.QueriedBlocks, Block{}) + if err := m.QueriedBlocks[len(m.QueriedBlocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 100: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksBatchSize", wireType) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err } - m.StreamingChunksBatchSize = 0 + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3516,11 +5560,24 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StreamingChunksBatchSize |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3542,7 +5599,7 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { +func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3565,15 +5622,53 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeriesRequestHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3600,16 +5695,52 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockMatchers = append(m.BlockMatchers, LabelMatcher{}) - if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectionInclude", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var v int + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3619,17 +5750,16 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.ProjectionInclude = bool(v != 0) - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectionLabels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequestHints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3639,23 +5769,27 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - m.ProjectionLabels = append(m.ProjectionLabels, string(dAtA[iNdEx:postIndex])) + if m.RequestHints == nil { + m.RequestHints = &LabelNamesRequestHints{} + } + if err := m.RequestHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3678,7 +5812,7 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *Stats) Unmarshal(dAtA []byte) error { +func (m *LabelNamesRequestHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3701,17 +5835,17 @@ func (m *Stats) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Stats: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesRequestHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Stats: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FetchedIndexBytes", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) } - m.FetchedIndexBytes = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3721,11 +5855,26 @@ func (m *Stats) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FetchedIndexBytes |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockMatchers = append(m.BlockMatchers, LabelMatcher{}) + if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3747,7 +5896,7 @@ func (m *Stats) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeriesResponse) Unmarshal(dAtA []byte) error { +func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3770,119 +5919,17 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeriesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Result = &SeriesResponse_Warning{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &types.Any{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &SeriesResponse_Hints{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Stats{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &SeriesResponse_Stats{v} - iNdEx = postIndex - case 5: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3892,32 +5939,29 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamingSeriesBatch{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &SeriesResponse_StreamingSeries{v} + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -3927,30 +5971,27 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamingChunksBatch{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &SeriesResponse_StreamingChunks{v} + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksEstimate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3977,13 +6018,14 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamingChunksEstimate{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Result = &SeriesResponse_StreamingChunksEstimate{v} iNdEx = postIndex - case 8: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHints", wireType) } @@ -4012,11 +6054,12 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &SeriesResponseHints{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ResponseHints == nil { + m.ResponseHints = &LabelNamesResponseHints{} + } + if err := m.ResponseHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Result = &SeriesResponse_ResponseHints{v} iNdEx = postIndex default: iNdEx = preIndex @@ -4039,7 +6082,7 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { +func (m *LabelNamesResponseHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4062,10 +6105,10 @@ func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeriesResponseHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesResponseHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4123,7 +6166,7 @@ func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *Block) Unmarshal(dAtA []byte) error { +func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4146,15 +6189,15 @@ func (m *Block) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Block: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4182,59 +6225,9 @@ func (m *Block) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + m.Label = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } @@ -4253,7 +6246,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { break } } - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } @@ -4272,7 +6265,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { break } } - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) } @@ -4308,7 +6301,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 6: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } @@ -4342,7 +6335,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 7: + case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } @@ -4361,7 +6354,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { break } } - case 8: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RequestHints", wireType) } @@ -4391,7 +6384,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.RequestHints == nil { - m.RequestHints = &LabelNamesRequestHints{} + m.RequestHints = &LabelValuesRequestHints{} } if err := m.RequestHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4418,7 +6411,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesRequestHints) Unmarshal(dAtA []byte) error { +func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4441,10 +6434,10 @@ func (m *LabelNamesRequestHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequestHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesRequestHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4502,7 +6495,7 @@ func (m *LabelNamesRequestHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { +func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4525,15 +6518,15 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4561,7 +6554,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { @@ -4661,7 +6654,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ResponseHints == nil { - m.ResponseHints = &LabelNamesResponseHints{} + m.ResponseHints = &LabelValuesResponseHints{} } if err := m.ResponseHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4688,7 +6681,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelNamesResponseHints) Unmarshal(dAtA []byte) error { +func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4711,10 +6704,10 @@ func (m *LabelNamesResponseHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponseHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesResponseHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4772,7 +6765,7 @@ func (m *LabelNamesResponseHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { +func (m *ResourceAttributesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4795,17 +6788,55 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceAttributesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -4815,29 +6846,33 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - m.Label = string(dAtA[iNdEx:postIndex]) + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) } - m.Start = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -4847,16 +6882,31 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Start |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 5: + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - m.End = 0 + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -4866,14 +6916,14 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.End |= int64(b&0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 6: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttrFilters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4900,16 +6950,64 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Hints == nil { - m.Hints = &types.Any{} - } - if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceAttrFilters = append(m.ResourceAttrFilters, &ResourceAttrFilter{}) + if err := m.ResourceAttrFilters[len(m.ResourceAttrFilters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4936,16 +7034,16 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Matchers = append(m.Matchers, LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &ResourceAttributesSeriesData{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) } - m.Limit = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -4955,14 +7053,27 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 9: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestHints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4989,10 +7100,10 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestHints == nil { - m.RequestHints = &LabelValuesRequestHints{} + if m.Hints == nil { + m.Hints = &types.Any{} } - if err := m.RequestHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5017,7 +7128,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { +func (m *ResourceAttributesSeriesData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5040,15 +7151,15 @@ func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequestHints: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceAttributesSeriesData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceAttributesSeriesData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5075,8 +7186,135 @@ func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockMatchers = append(m.BlockMatchers, LabelMatcher{}) - if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, &ResourceVersionData{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5101,7 +7339,7 @@ func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { +func (m *ResourceVersionData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5124,17 +7362,17 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceVersionData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceVersionData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Identifying", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -5144,29 +7382,124 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + if m.Identifying == nil { + m.Identifying = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Identifying[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Descriptive", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -5176,27 +7509,122 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthRpc } if postIndex > l { return io.ErrUnexpectedEOF } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + if m.Descriptive == nil { + m.Descriptive = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Descriptive[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Entities", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5223,18 +7651,16 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Hints == nil { - m.Hints = &types.Any{} - } - if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Entities = append(m.Entities, &EntityData{}) + if err := m.Entities[len(m.Entities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseHints", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) } - var msglen int + m.MinTimeMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -5244,28 +7670,30 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.MinTimeMs |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResponseHints == nil { - m.ResponseHints = &LabelValuesResponseHints{} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) } - if err := m.ResponseHints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.MaxTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -5287,7 +7715,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { +func (m *EntityData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5310,15 +7738,47 @@ func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponseHints: wiretype end group for non-group") + return fmt.Errorf("proto: EntityData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EntityData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueriedBlocks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5345,10 +7805,230 @@ func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.QueriedBlocks = append(m.QueriedBlocks, Block{}) - if err := m.QueriedBlocks[len(m.QueriedBlocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Id == nil { + m.Id = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Id[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF } + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRpc + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRpc + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/storegateway/storepb/rpc.pb.go.expdiff b/pkg/storegateway/storepb/rpc.pb.go.expdiff index 4929d4cbdbe..fc94c710d2a 100644 --- a/pkg/storegateway/storepb/rpc.pb.go.expdiff +++ b/pkg/storegateway/storepb/rpc.pb.go.expdiff @@ -1,19 +1,18 @@ diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go -index 5cc9be0151..8c0ca33942 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go -@@ -8,8 +8,6 @@ import ( - _ "github.com/gogo/protobuf/gogoproto" +@@ -9,8 +9,6 @@ import ( proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" types "github.com/gogo/protobuf/types" - "github.com/grafana/mimir/pkg/mimirpb" - io "io" math "math" math_bits "math/bits" -@@ -174,9 +172,6 @@ func (m *Stats) XXX_DiscardUnknown() { +@@ -175,9 +173,6 @@ func (m *Stats) XXX_DiscardUnknown() { var xxx_messageInfo_Stats proto.InternalMessageInfo - + type SeriesResponse struct { - // Keep reference to buffer for unsafe references. - mimirpb.BufferHolder diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index ebbc595c131..fa8963c07d4 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -252,3 +252,61 @@ message LabelValuesResponseHints { // queried_blocks is the list of blocks that have been queried. repeated Block queried_blocks = 1 [(gogoproto.nullable) = false]; } + +// ResourceAttrFilter specifies a resource attribute key:value pair for reverse lookup. +message ResourceAttrFilter { + string key = 1; + string value = 2; +} + +// ResourceAttributesRequest queries OTel resource attributes for series matching matchers. +message ResourceAttributesRequest { + // Reserved for Thanos compatibility. + reserved 1; + reserved 2; + + int64 start = 3; + int64 end = 4; + + // hints is an opaque data structure that can be used to carry additional information. + google.protobuf.Any hints = 5; + + repeated LabelMatcher matchers = 6 [(gogoproto.nullable) = false]; + int64 limit = 7; + // When resource_attr_filters is non-empty, use inverted index lookup instead of series matchers. + repeated ResourceAttrFilter resource_attr_filters = 8; +} + +// ResourceAttributesResponse contains batches of series with their resource attributes. +message ResourceAttributesResponse { + repeated ResourceAttributesSeriesData items = 1; + repeated string warnings = 2; + + /// hints is an opaque data structure that can be used to carry additional information from the store. + google.protobuf.Any hints = 3; +} + +// ResourceAttributesSeriesData contains resource data for a single series. +message ResourceAttributesSeriesData { + // The series labels as a map of name->value. + map labels = 1; + + // Resource versions for this series. + repeated ResourceVersionData versions = 2; +} + +// ResourceVersionData represents a snapshot of resource data at a point in time. +message ResourceVersionData { + map identifying = 1; + map descriptive = 2; + repeated EntityData entities = 3; + int64 min_time_ms = 4; + int64 max_time_ms = 5; +} + +// EntityData represents a typed OTel entity. +message EntityData { + string type = 1; + map id = 2; + map description = 3; +} diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index beac0f4e635..d07263586af 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -319,6 +319,9 @@ type Limits struct { OTelTranslationStrategy OTelTranslationStrategyValue `yaml:"otel_translation_strategy" json:"otel_translation_strategy" category:"experimental"` OTelLabelNameUnderscoreSanitization bool `yaml:"otel_label_name_underscore_sanitization" json:"otel_label_name_underscore_sanitization" category:"advanced"` OTelLabelNamePreserveMultipleUnderscores bool `yaml:"otel_label_name_preserve_multiple_underscores" json:"otel_label_name_preserve_multiple_underscores" category:"advanced"` + OTelPersistResourceAttributes bool `yaml:"otel_persist_resource_attributes" json:"otel_persist_resource_attributes" category:"experimental"` + OTelResourceAttrIndexEnabled bool `yaml:"otel_resource_attr_index_enabled" json:"otel_resource_attr_index_enabled" category:"experimental"` + OTelIndexedResourceAttributes flagext.StringSliceCSV `yaml:"otel_indexed_resource_attributes" json:"otel_indexed_resource_attributes" category:"experimental"` // Ingest storage. IngestStorageReadConsistency string `yaml:"ingest_storage_read_consistency" json:"ingest_storage_read_consistency" category:"experimental"` @@ -376,6 +379,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Var(&l.OTelTranslationStrategy, "distributor.otel-translation-strategy", fmt.Sprintf("Translation strategy to apply in OTLP endpoint for metric and label names. If unspecified (the default), the strategy is derived from -validation.name-validation-scheme and -distributor.otel-metric-suffixes-enabled. Supported values: %s.", strings.Join([]string{`""`, string(otlptranslator.UnderscoreEscapingWithSuffixes), string(otlptranslator.UnderscoreEscapingWithoutSuffixes), string(otlptranslator.NoUTF8EscapingWithSuffixes), string(otlptranslator.NoTranslation)}, ", "))) f.BoolVar(&l.OTelLabelNameUnderscoreSanitization, "distributor.otel-label-name-underscore-sanitization", true, "If enabled, prefixes label names starting with a single underscore with `key_` when translating OTel attribute names. Defaults to true.") f.BoolVar(&l.OTelLabelNamePreserveMultipleUnderscores, "distributor.otel-label-name-preserve-underscores", true, "If enabled, keeps multiple consecutive underscores in label names when translating OTel attribute names. Defaults to true.") + f.BoolVar(&l.OTelPersistResourceAttributes, "distributor.otel-persist-resource-attributes", false, "Whether to persist OTel resource attributes per time series as metadata in Prometheus TSDB blocks. Resource attributes are stored in series_metadata.parquet files within blocks and can be queried via the /api/v1/resource_attributes endpoint.") + f.BoolVar(&l.OTelResourceAttrIndexEnabled, "ingester.otel-resource-attr-index-enabled", false, "Enable the in-memory resource attribute inverted index for O(1) reverse lookup by attribute key:value. When disabled, the index is not built in memory or written to Parquet during compaction.") + f.Var(&l.OTelIndexedResourceAttributes, "ingester.otel-indexed-resource-attributes", "Comma-separated list of additional descriptive resource attribute names to include in the inverted index. Identifying attributes (service.name, service.namespace, service.instance.id) are always indexed when the index is enabled.") f.Var(&l.IngestionArtificialDelay, "distributor.ingestion-artificial-delay", "Target ingestion delay to apply to all tenants. If set to a non-zero value, the distributor will artificially delay ingestion time-frame by the specified duration by computing the difference between actual ingestion and the target. There is no delay on actual ingestion of samples, it is only the response back to the client.") @@ -1598,6 +1604,21 @@ func (o *Overrides) OTelLabelNamePreserveMultipleUnderscores(tenantID string) bo return o.getOverridesForUser(tenantID).OTelLabelNamePreserveMultipleUnderscores } +// OTelPersistResourceAttributes returns whether OTel resource attributes should be persisted per time series. +func (o *Overrides) OTelPersistResourceAttributes(tenantID string) bool { + return o.getOverridesForUser(tenantID).OTelPersistResourceAttributes +} + +// OTelResourceAttrIndexEnabled returns whether the resource attribute inverted index is enabled. +func (o *Overrides) OTelResourceAttrIndexEnabled(tenantID string) bool { + return o.getOverridesForUser(tenantID).OTelResourceAttrIndexEnabled +} + +// OTelIndexedResourceAttributes returns additional descriptive resource attribute names to index. +func (o *Overrides) OTelIndexedResourceAttributes(tenantID string) flagext.StringSliceCSV { + return o.getOverridesForUser(tenantID).OTelIndexedResourceAttributes +} + // DistributorIngestionArtificialDelay returns the artificial ingestion latency for a given user. func (o *Overrides) DistributorIngestionArtificialDelay(tenantID string) time.Duration { return time.Duration(o.getOverridesForUser(tenantID).IngestionArtificialDelay) diff --git a/vendor/github.com/andybalholm/brotli/LICENSE b/vendor/github.com/andybalholm/brotli/LICENSE new file mode 100644 index 00000000000..33b7cdd2dba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md new file mode 100644 index 00000000000..00625211d71 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/README.md @@ -0,0 +1,14 @@ +This package is a brotli compressor and decompressor implemented in Go. +It was translated from the reference implementation (https://github.com/google/brotli) +with the `c2go` tool at https://github.com/andybalholm/c2go. + +I have been working on new compression algorithms (not translated from C) +in the matchfinder package. +You can use them with the NewWriterV2 function. +Currently they give better results than the old implementation +(at least for compressing my test file, Newton’s *Opticks*) +on levels 2 to 6. + +I am using it in production with https://github.com/andybalholm/redwood. + +API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/backward_references.go b/vendor/github.com/andybalholm/brotli/backward_references.go new file mode 100644 index 00000000000..008c054d1c0 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references.go @@ -0,0 +1,185 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find backward reference copies. */ + +func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint { + if distance <= max_distance { + var distance_plus_3 uint = distance + 3 + var offset0 uint = distance_plus_3 - uint(dist_cache[0]) + var offset1 uint = distance_plus_3 - uint(dist_cache[1]) + if distance == uint(dist_cache[0]) { + return 0 + } else if distance == uint(dist_cache[1]) { + return 1 + } else if offset0 < 7 { + return (0x9750468 >> (4 * offset0)) & 0xF + } else if offset1 < 7 { + return (0xFDB1ACE >> (4 * offset1)) & 0xF + } else if distance == uint(dist_cache[2]) { + return 2 + } else if distance == uint(dist_cache[3]) { + return 3 + } + } + + return distance + numDistanceShortCodes - 1 +} + +var hasherSearchResultPool sync.Pool + +func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var insert_length uint = *last_insert_len + var pos_end uint = position + num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params) + var apply_random_heuristics uint = position + random_heuristics_window_size + var gap uint = 0 + /* Set maximum distance, see section 9.1. of the spec. */ + + const kMinScore uint = scoreBase + 100 + + /* For speed up heuristics for random data. */ + + /* Minimum score to accept a backward reference. */ + hasher.PrepareDistanceCache(dist_cache) + sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr2 == nil { + sr2 = &hasherSearchResult{} + } + sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult) + if sr == nil { + sr = &hasherSearchResult{} + } + + for position+hasher.HashTypeLength() < pos_end { + var max_length uint = pos_end - position + var max_distance uint = brotli_min_size_t(position, max_backward_limit) + sr.len = 0 + sr.len_code_delta = 0 + sr.distance = 0 + sr.score = kMinScore + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr) + if sr.score > kMinScore { + /* Found a match. Let's look for something even better ahead. */ + var delayed_backward_references_in_row int = 0 + max_length-- + for ; ; max_length-- { + var cost_diff_lazy uint = 175 + if params.quality < minQualityForExtensiveReferenceSearch { + sr2.len = brotli_min_size_t(sr.len-1, max_length) + } else { + sr2.len = 0 + } + sr2.len_code_delta = 0 + sr2.distance = 0 + sr2.score = kMinScore + max_distance = brotli_min_size_t(position+1, max_backward_limit) + hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2) + if sr2.score >= sr.score+cost_diff_lazy { + /* Ok, let's just write one byte for now and start a match from the + next byte. */ + position++ + + insert_length++ + *sr = *sr2 + delayed_backward_references_in_row++ + if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end { + continue + } + } + + break + } + + apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size + max_distance = brotli_min_size_t(position, max_backward_limit) + { + /* The first 16 codes are special short-codes, + and the minimum offset is 1. */ + var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache) + if (sr.distance <= (max_distance + gap)) && distance_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(sr.distance) + hasher.PrepareDistanceCache(dist_cache) + } + + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)) + } + + *num_literals += insert_length + insert_length = 0 + /* Put the hash keys into the table, if there are enough bytes left. + Depending on the hasher implementation, it can push all positions + in the given range or only a subset of them. + Avoid hash poisoning with RLE data. */ + { + var range_start uint = position + 2 + var range_end uint = brotli_min_size_t(position+sr.len, store_end) + if sr.distance < sr.len>>2 { + range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2))) + } + + hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end) + } + + position += sr.len + } else { + insert_length++ + position++ + + /* If we have not seen matches for a long time, we can skip some + match lookups. Unsuccessful match lookups are very very expensive + and this kind of a heuristic speeds up compression quite + a lot. */ + if position > apply_random_heuristics { + /* Going through uncompressible data, jump. */ + if position > apply_random_heuristics+4*random_heuristics_window_size { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4) + /* It is quite a long time since we saw a copy, so we assume + that this data is not compressible, and store hashes less + often. Hashes of non compressible data are less likely to + turn out to be useful in the future, too, so we store less of + them to not to flood out the hash table of good compressible + data. */ + + var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin) + for ; position < pos_jump; position += 4 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 4 + } + } else { + var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2) + var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin) + for ; position < pos_jump; position += 2 { + hasher.Store(ringbuffer, ringbuffer_mask, position) + insert_length += 2 + } + } + } + } + } + + insert_length += pos_end - position + *last_insert_len = insert_length + + hasherSearchResultPool.Put(sr) + hasherSearchResultPool.Put(sr2) +} diff --git a/vendor/github.com/andybalholm/brotli/backward_references_hq.go b/vendor/github.com/andybalholm/brotli/backward_references_hq.go new file mode 100644 index 00000000000..21629c1cdb7 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/backward_references_hq.go @@ -0,0 +1,796 @@ +package brotli + +import "math" + +type zopfliNode struct { + length uint32 + distance uint32 + dcode_insert_length uint32 + u struct { + cost float32 + next uint32 + shortcut uint32 + } +} + +const maxEffectiveDistanceAlphabetSize = 544 + +const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ + +var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} + +var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3} + +func initZopfliNodes(array []zopfliNode, length uint) { + var stub zopfliNode + var i uint + stub.length = 1 + stub.distance = 0 + stub.dcode_insert_length = 0 + stub.u.cost = kInfinity + for i = 0; i < length; i++ { + array[i] = stub + } +} + +func zopfliNodeCopyLength(self *zopfliNode) uint32 { + return self.length & 0x1FFFFFF +} + +func zopfliNodeLengthCode(self *zopfliNode) uint32 { + var modifier uint32 = self.length >> 25 + return zopfliNodeCopyLength(self) + 9 - modifier +} + +func zopfliNodeCopyDistance(self *zopfliNode) uint32 { + return self.distance +} + +func zopfliNodeDistanceCode(self *zopfliNode) uint32 { + var short_code uint32 = self.dcode_insert_length >> 27 + if short_code == 0 { + return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1 + } else { + return short_code - 1 + } +} + +func zopfliNodeCommandLength(self *zopfliNode) uint32 { + return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF) +} + +/* Histogram based cost model for zopflification. */ +type zopfliCostModel struct { + cost_cmd_ [numCommandSymbols]float32 + cost_dist_ []float32 + distance_histogram_size uint32 + literal_costs_ []float32 + min_cost_cmd_ float32 + num_bytes_ uint +} + +func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) { + var distance_histogram_size uint32 = dist.alphabet_size + if distance_histogram_size > maxEffectiveDistanceAlphabetSize { + distance_histogram_size = maxEffectiveDistanceAlphabetSize + } + + self.num_bytes_ = num_bytes + self.literal_costs_ = make([]float32, (num_bytes + 2)) + self.cost_dist_ = make([]float32, (dist.alphabet_size)) + self.distance_histogram_size = distance_histogram_size +} + +func cleanupZopfliCostModel(self *zopfliCostModel) { + self.literal_costs_ = nil + self.cost_dist_ = nil +} + +func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) { + var sum uint = 0 + var missing_symbol_sum uint + var log2sum float32 + var missing_symbol_cost float32 + var i uint + for i = 0; i < histogram_size; i++ { + sum += uint(histogram[i]) + } + + log2sum = float32(fastLog2(sum)) + missing_symbol_sum = sum + if !literal_histogram { + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + missing_symbol_sum++ + } + } + } + + missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2 + for i = 0; i < histogram_size; i++ { + if histogram[i] == 0 { + cost[i] = missing_symbol_cost + continue + } + + /* Shannon bits for this symbol. */ + cost[i] = log2sum - float32(fastLog2(uint(histogram[i]))) + + /* Cannot be coded with less than 1 bit */ + if cost[i] < 1 { + cost[i] = 1 + } + } +} + +func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) { + var histogram_literal [numLiteralSymbols]uint32 + var histogram_cmd [numCommandSymbols]uint32 + var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32 + var cost_literal [numLiteralSymbols]float32 + var pos uint = position - last_insert_len + var min_cost_cmd float32 = kInfinity + var cost_cmd []float32 = self.cost_cmd_[:] + var literal_costs []float32 + + histogram_literal = [numLiteralSymbols]uint32{} + histogram_cmd = [numCommandSymbols]uint32{} + histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{} + + for i := range commands { + var inslength uint = uint(commands[i].insert_len_) + var copylength uint = uint(commandCopyLen(&commands[i])) + var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF + var cmdcode uint = uint(commands[i].cmd_prefix_) + var j uint + + histogram_cmd[cmdcode]++ + if cmdcode >= 128 { + histogram_dist[distcode]++ + } + + for j = 0; j < inslength; j++ { + histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++ + } + + pos += inslength + copylength + } + + setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:]) + setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd) + setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_) + + for i := 0; i < numCommandSymbols; i++ { + min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i]) + } + + self.min_cost_cmd_ = min_cost_cmd + { + literal_costs = self.literal_costs_ + var literal_carry float32 = 0.0 + num_bytes := int(self.num_bytes_) + literal_costs[0] = 0.0 + for i := 0; i < num_bytes; i++ { + literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + } +} + +func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) { + var literal_costs []float32 = self.literal_costs_ + var literal_carry float32 = 0.0 + var cost_dist []float32 = self.cost_dist_ + var cost_cmd []float32 = self.cost_cmd_[:] + var num_bytes uint = self.num_bytes_ + var i uint + estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:]) + literal_costs[0] = 0.0 + for i = 0; i < num_bytes; i++ { + literal_carry += literal_costs[i+1] + literal_costs[i+1] = literal_costs[i] + literal_carry + literal_carry -= literal_costs[i+1] - literal_costs[i] + } + + for i = 0; i < numCommandSymbols; i++ { + cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i)))) + } + + for i = 0; uint32(i) < self.distance_histogram_size; i++ { + cost_dist[i] = float32(fastLog2(uint(20 + uint32(i)))) + } + + self.min_cost_cmd_ = float32(fastLog2(11)) +} + +func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 { + return self.cost_cmd_[cmdcode] +} + +func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 { + return self.cost_dist_[distcode] +} + +func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 { + return self.literal_costs_[to] - self.literal_costs_[from] +} + +func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 { + return self.min_cost_cmd_ +} + +/* REQUIRES: len >= 2, start_pos <= pos */ +/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */ +/* Maintains the "ZopfliNode array invariant". */ +func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) { + var next *zopfliNode = &nodes[pos+len] + next.length = uint32(len | (len+9-len_code)<<25) + next.distance = uint32(dist) + next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos)) + next.u.cost = cost +} + +type posData struct { + pos uint + distance_cache [4]int + costdiff float32 + cost float32 +} + +/* Maintains the smallest 8 cost difference together with their positions */ +type startPosQueue struct { + q_ [8]posData + idx_ uint +} + +func initStartPosQueue(self *startPosQueue) { + self.idx_ = 0 +} + +func startPosQueueSize(self *startPosQueue) uint { + return brotli_min_size_t(self.idx_, 8) +} + +func startPosQueuePush(self *startPosQueue, posdata *posData) { + var offset uint = ^(self.idx_) & 7 + self.idx_++ + var len uint = startPosQueueSize(self) + var i uint + var q []posData = self.q_[:] + q[offset] = *posdata + + /* Restore the sorted order. In the list of |len| items at most |len - 1| + adjacent element comparisons / swaps are required. */ + for i = 1; i < len; i++ { + if q[offset&7].costdiff > q[(offset+1)&7].costdiff { + var tmp posData = q[offset&7] + q[offset&7] = q[(offset+1)&7] + q[(offset+1)&7] = tmp + } + + offset++ + } +} + +func startPosQueueAt(self *startPosQueue, k uint) *posData { + return &self.q_[(k-self.idx_)&7] +} + +/* Returns the minimum possible copy length that can improve the cost of any */ +/* future position. */ +func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint { + var min_cost float32 = start_cost + var len uint = 2 + var next_len_bucket uint = 4 + /* Compute the minimum possible cost of reaching any future position. */ + + var next_len_offset uint = 10 + for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost { + /* We already reached (pos + len) with no more cost than the minimum + possible cost of reaching anything from this pos, so there is no point in + looking for lengths <= len. */ + len++ + + if len == next_len_offset { + /* We reached the next copy length code bucket, so we add one more + extra bit to the minimum cost. */ + min_cost += 1.0 + + next_len_offset += next_len_bucket + next_len_bucket *= 2 + } + } + + return uint(len) +} + +/* REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 { + var clen uint = uint(zopfliNodeCopyLength(&nodes[pos])) + var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos])) + + /* Since |block_start + pos| is the end position of the command, the copy part + starts from |block_start + pos - clen|. Distances that are greater than + this or greater than |max_backward_limit| + |gap| are static dictionary + references, and do not update the last distances. + Also distance code 0 (last distance) does not update the last distances. */ + if pos == 0 { + return 0 + } else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 { + return uint32(pos) + } else { + return nodes[pos-clen-ilen].u.shortcut + } +} + +/* Fills in dist_cache[0..3] with the last four distances (as defined by + Section 4. of the Spec) that would be used at (block_start + pos) if we + used the shortest path of commands from block_start, computed from + nodes[0..pos]. The last four distances at block_start are in + starting_dist_cache[0..3]. + REQUIRES: nodes[pos].cost < kInfinity + REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ +func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) { + var idx int = 0 + var p uint = uint(nodes[pos].u.shortcut) + for idx < 4 && p > 0 { + var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF) + var clen uint = uint(zopfliNodeCopyLength(&nodes[p])) + var dist uint = uint(zopfliNodeCopyDistance(&nodes[p])) + dist_cache[idx] = int(dist) + idx++ + + /* Because of prerequisite, p >= clen + ilen >= 2. */ + p = uint(nodes[p-clen-ilen].u.shortcut) + } + + for ; idx < 4; idx++ { + dist_cache[idx] = starting_dist_cache[0] + starting_dist_cache = starting_dist_cache[1:] + } +} + +/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it + is eligible. */ +func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) { + /* Save cost, because ComputeDistanceCache invalidates it. */ + var node_cost float32 = nodes[pos].u.cost + nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes) + if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) { + var posdata posData + posdata.pos = pos + posdata.cost = node_cost + posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos) + computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:]) + startPosQueuePush(queue, &posdata) + } +} + +/* Returns longest copy length. */ +func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint { + var cur_ix uint = block_start + pos + var cur_ix_masked uint = cur_ix & ringbuffer_mask + var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit) + var max_len uint = num_bytes - pos + var max_zopfli_len uint = maxZopfliLen(params) + var max_iters uint = maxZopfliCandidates(params) + var min_len uint + var result uint = 0 + var k uint + var gap uint = 0 + + evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes) + { + var posdata *posData = startPosQueueAt(queue, 0) + var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos)) + min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos) + } + + /* Go over the command starting positions in order of increasing cost + difference. */ + for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ { + var posdata *posData = startPosQueueAt(queue, k) + var start uint = posdata.pos + var inscode uint16 = getInsertLengthCode(pos - start) + var start_costdiff float32 = posdata.costdiff + var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos) + var best_len uint = min_len - 1 + var j uint = 0 + /* Look for last distance matches using the distance cache from this + starting position. */ + for ; j < numDistanceShortCodes && best_len < max_len; j++ { + var idx uint = uint(kDistanceCacheIndex[j]) + var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j]) + var prev_ix uint = cur_ix - backward + var len uint = 0 + var continuation byte = ringbuffer[cur_ix_masked+best_len] + if cur_ix_masked+best_len > ringbuffer_mask { + break + } + + if backward > max_distance+gap { + /* Word dictionary -> ignore. */ + continue + } + + if backward <= max_distance { + /* Regular backward reference. */ + if prev_ix >= cur_ix { + continue + } + + prev_ix &= ringbuffer_mask + if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] { + continue + } + + len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len) + } else { + continue + } + { + var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j) + var l uint + for l = best_len + 1; l <= len; l++ { + var copycode uint16 = getCopyLengthCode(l) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0) + var tmp float32 + if cmdcode < 128 { + tmp = base_cost + } else { + tmp = dist_cost + } + var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+l].u.cost { + updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost) + result = brotli_max_size_t(result, l) + } + + best_len = l + } + } + } + + /* At higher iterations look only for new last distance matches, since + looking only for new command start positions with the same distances + does not help much. */ + if k >= 2 { + continue + } + { + /* Loop through all possible copy lengths at this position. */ + var len uint = min_len + for j = 0; j < num_matches; j++ { + var match backwardMatch = matches[j] + var dist uint = uint(match.distance) + var is_dictionary_match bool = (dist > max_distance+gap) + var dist_code uint = dist + numDistanceShortCodes - 1 + var dist_symbol uint16 + var distextra uint32 + var distnumextra uint32 + var dist_cost float32 + var max_match_len uint + /* We already tried all possible last distance matches, so we can use + normal distance code here. */ + prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra) + + distnumextra = uint32(dist_symbol) >> 10 + dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF) + + /* Try all copy lengths up until the maximum copy length corresponding + to this distance. If the distance refers to the static dictionary, or + the maximum length is long enough, try only one maximum length. */ + max_match_len = backwardMatchLength(&match) + + if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) { + len = max_match_len + } + + for ; len <= max_match_len; len++ { + var len_code uint + if is_dictionary_match { + len_code = backwardMatchLengthCode(&match) + } else { + len_code = len + } + var copycode uint16 = getCopyLengthCode(len_code) + var cmdcode uint16 = combineLengthCodes(inscode, copycode, false) + var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) + if cost < nodes[pos+len].u.cost { + updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost) + if len > result { + result = len + } + } + } + } + } + } + + return result +} + +func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint { + var index uint = num_bytes + var num_commands uint = 0 + for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 { + index-- + } + nodes[index].u.next = math.MaxUint32 + for index != 0 { + var len uint = uint(zopfliNodeCommandLength(&nodes[index])) + index -= uint(len) + nodes[index].u.next = uint32(len) + num_commands++ + } + + return num_commands +} + +/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */ +func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var pos uint = 0 + var offset uint32 = nodes[0].u.next + var i uint + var gap uint = 0 + for i = 0; offset != math.MaxUint32; i++ { + var next *zopfliNode = &nodes[uint32(pos)+offset] + var copy_length uint = uint(zopfliNodeCopyLength(next)) + var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF) + pos += insert_length + offset = next.u.next + if i == 0 { + insert_length += *last_insert_len + *last_insert_len = 0 + } + { + var distance uint = uint(zopfliNodeCopyDistance(next)) + var len_code uint = uint(zopfliNodeLengthCode(next)) + var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit) + var is_dictionary bool = (distance > max_distance+gap) + var dist_code uint = uint(zopfliNodeDistanceCode(next)) + *commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)) + + if !is_dictionary && dist_code > 0 { + dist_cache[3] = dist_cache[2] + dist_cache[2] = dist_cache[1] + dist_cache[1] = dist_cache[0] + dist_cache[0] = int(distance) + } + } + + *num_literals += insert_length + pos += copy_length + } + + *last_insert_len += num_bytes - pos +} + +func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var queue startPosQueue + var cur_match_pos uint = 0 + var i uint + nodes[0].length = 0 + nodes[0].u.cost = 0 + initStartPosQueue(&queue) + for i = 0; i+3 < num_bytes; i++ { + var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + cur_match_pos += uint(num_matches[i]) + if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip) + } + + if skip > 1 { + skip-- + for skip != 0 { + i++ + if i+3 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes) + cur_match_pos += uint(num_matches[i]) + skip-- + } + } + } + + return computeShortestPathFromNodes(num_bytes, nodes) +} + +/* Computes the shortest path of commands from position to at most + position + num_bytes. + + On return, path->size() is the number of commands found and path[i] is the + length of the i-th command (copy length plus insert length). + Note that the sum of the lengths of all commands can be less than num_bytes. + + On return, the nodes[0..num_bytes] array will have the following + "ZopfliNode array invariant": + For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then + (1) nodes[i].copy_length() >= 2 + (2) nodes[i].command_length() <= i and + (3) nodes[i - nodes[i].command_length()].cost < kInfinity + + REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */ +func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var max_zopfli_len uint = maxZopfliLen(params) + var model zopfliCostModel + var queue startPosQueue + var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var i uint + var gap uint = 0 + var lz_matches_offset uint = 0 + nodes[0].length = 0 + nodes[0].u.cost = 0 + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + initStartPosQueue(&queue) + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var skip uint + var num_matches uint + num_matches = findAllMatchesH10(hasher, ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:]) + if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len { + matches[0] = matches[num_matches-1] + num_matches = 1 + } + + skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes) + if skip < longCopyQuickStep { + skip = 0 + } + if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len { + skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip) + } + + if skip > 1 { + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end)) + + skip-- + for skip != 0 { + i++ + if i+hasher.HashTypeLength()-1 >= num_bytes { + break + } + evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes) + skip-- + } + } + } + + cleanupZopfliCostModel(&model) + return computeShortestPathFromNodes(num_bytes, nodes) +} + +func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var nodes []zopfliNode + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliNodes(nodes, num_bytes+1) + zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + nodes = nil +} + +func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { + var max_backward_limit uint = maxBackwardLimit(params.lgwin) + var num_matches []uint32 = make([]uint32, num_bytes) + var matches_size uint = 4 * num_bytes + var store_end uint + if num_bytes >= hasher.StoreLookahead() { + store_end = position + num_bytes - hasher.StoreLookahead() + 1 + } else { + store_end = position + } + var cur_match_pos uint = 0 + var i uint + var orig_num_literals uint + var orig_last_insert_len uint + var orig_dist_cache [4]int + var orig_num_commands int + var model zopfliCostModel + var nodes []zopfliNode + var matches []backwardMatch = make([]backwardMatch, matches_size) + var gap uint = 0 + var shadow_matches uint = 0 + var new_array []backwardMatch + for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { + var pos uint = position + i + var max_distance uint = brotli_min_size_t(pos, max_backward_limit) + var max_length uint = num_bytes - i + var num_found_matches uint + var cur_match_end uint + var j uint + + /* Ensure that we have enough free slots. */ + if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + var new_size uint = matches_size + if new_size == 0 { + new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches + } + + for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches { + new_size *= 2 + } + + new_array = make([]backwardMatch, new_size) + if matches_size != 0 { + copy(new_array, matches[:matches_size]) + } + + matches = new_array + matches_size = new_size + } + + num_found_matches = findAllMatchesH10(hasher.(*h10), ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:]) + cur_match_end = cur_match_pos + num_found_matches + for j = cur_match_pos; j+1 < cur_match_end; j++ { + assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1])) + } + + num_matches[i] = uint32(num_found_matches) + if num_found_matches > 0 { + var match_len uint = backwardMatchLength(&matches[cur_match_end-1]) + if match_len > maxZopfliLenQuality11 { + var skip uint = match_len - 1 + matches[cur_match_pos] = matches[cur_match_end-1] + cur_match_pos++ + num_matches[i] = 1 + + /* Add the tail of the copy to the hasher. */ + hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end)) + var pos uint = i + for i := 0; i < int(skip); i++ { + num_matches[pos+1:][i] = 0 + } + i += skip + } else { + cur_match_pos = cur_match_end + } + } + } + + orig_num_literals = *num_literals + orig_last_insert_len = *last_insert_len + copy(orig_dist_cache[:], dist_cache[:4]) + orig_num_commands = len(*commands) + nodes = make([]zopfliNode, (num_bytes + 1)) + initZopfliCostModel(&model, ¶ms.dist, num_bytes) + for i = 0; i < 2; i++ { + initZopfliNodes(nodes, num_bytes+1) + if i == 0 { + zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) + } else { + zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len) + } + + *commands = (*commands)[:orig_num_commands] + *num_literals = orig_num_literals + *last_insert_len = orig_last_insert_len + copy(dist_cache, orig_dist_cache[:4]) + zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes) + zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) + } + + cleanupZopfliCostModel(&model) + nodes = nil + matches = nil + num_matches = nil +} diff --git a/vendor/github.com/andybalholm/brotli/bit_cost.go b/vendor/github.com/andybalholm/brotli/bit_cost.go new file mode 100644 index 00000000000..0005fc15e63 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_cost.go @@ -0,0 +1,436 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions to estimate the bit cost of Huffman trees. */ +func shannonEntropy(population []uint32, size uint, total *uint) float64 { + var sum uint = 0 + var retval float64 = 0 + var population_end []uint32 = population[size:] + var p uint + for -cap(population) < -cap(population_end) { + p = uint(population[0]) + population = population[1:] + sum += p + retval -= float64(p) * fastLog2(p) + } + + if sum != 0 { + retval += float64(sum) * fastLog2(sum) + } + *total = sum + return retval +} + +func bitsEntropy(population []uint32, size uint) float64 { + var sum uint + var retval float64 = shannonEntropy(population, size, &sum) + if retval < float64(sum) { + /* At least one bit per literal is needed. */ + retval = float64(sum) + } + + return retval +} + +const kOneSymbolHistogramCost float64 = 12 +const kTwoSymbolHistogramCost float64 = 20 +const kThreeSymbolHistogramCost float64 = 28 +const kFourSymbolHistogramCost float64 = 37 + +func populationCostLiteral(histogram *histogramLiteral) float64 { + var data_size uint = histogramDataSizeLiteral() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostCommand(histogram *histogramCommand) float64 { + var data_size uint = histogramDataSizeCommand() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} + +func populationCostDistance(histogram *histogramDistance) float64 { + var data_size uint = histogramDataSizeDistance() + var count int = 0 + var s [5]uint + var bits float64 = 0.0 + var i uint + if histogram.total_count_ == 0 { + return kOneSymbolHistogramCost + } + + for i = 0; i < data_size; i++ { + if histogram.data_[i] > 0 { + s[count] = i + count++ + if count > 4 { + break + } + } + } + + if count == 1 { + return kOneSymbolHistogramCost + } + + if count == 2 { + return kTwoSymbolHistogramCost + float64(histogram.total_count_) + } + + if count == 3 { + var histo0 uint32 = histogram.data_[s[0]] + var histo1 uint32 = histogram.data_[s[1]] + var histo2 uint32 = histogram.data_[s[2]] + var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) + return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) + } + + if count == 4 { + var histo [4]uint32 + var h23 uint32 + var histomax uint32 + for i = 0; i < 4; i++ { + histo[i] = histogram.data_[s[i]] + } + + /* Sort */ + for i = 0; i < 4; i++ { + var j uint + for j = i + 1; j < 4; j++ { + if histo[j] > histo[i] { + var tmp uint32 = histo[j] + histo[j] = histo[i] + histo[i] = tmp + } + } + } + + h23 = histo[2] + histo[3] + histomax = brotli_max_uint32_t(h23, histo[0]) + return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) + } + { + var max_depth uint = 1 + var depth_histo = [codeLengthCodes]uint32{0} + /* In this loop we compute the entropy of the histogram and simultaneously + build a simplified histogram of the code length codes where we use the + zero repeat code 17, but we don't use the non-zero repeat code 16. */ + + var log2total float64 = fastLog2(histogram.total_count_) + for i = 0; i < data_size; { + if histogram.data_[i] > 0 { + var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) + /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = + = log2(total_count) - log2(count(symbol)) */ + + var depth uint = uint(log2p + 0.5) + /* Approximate the bit depth by round(-log2(P(symbol))) */ + bits += float64(histogram.data_[i]) * log2p + + if depth > 15 { + depth = 15 + } + + if depth > max_depth { + max_depth = depth + } + + depth_histo[depth]++ + i++ + } else { + var reps uint32 = 1 + /* Compute the run length of zeros and add the appropriate number of 0 + and 17 code length codes to the code length code histogram. */ + + var k uint + for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { + reps++ + } + + i += uint(reps) + if i == data_size { + /* Don't add any cost for the last zero run, since these are encoded + only implicitly. */ + break + } + + if reps < 3 { + depth_histo[0] += reps + } else { + reps -= 2 + for reps > 0 { + depth_histo[repeatZeroCodeLength]++ + + /* Add the 3 extra bits for the 17 code length code. */ + bits += 3 + + reps >>= 3 + } + } + } + } + + /* Add the estimated encoding cost of the code length code histogram. */ + bits += float64(18 + 2*max_depth) + + /* Add the entropy of the code length code histogram. */ + bits += bitsEntropy(depth_histo[:], codeLengthCodes) + } + + return bits +} diff --git a/vendor/github.com/andybalholm/brotli/bit_reader.go b/vendor/github.com/andybalholm/brotli/bit_reader.go new file mode 100644 index 00000000000..fba8687c69f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bit_reader.go @@ -0,0 +1,266 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Bit reading helpers */ + +const shortFillBitWindowRead = (8 >> 1) + +var kBitMask = [33]uint32{ + 0x00000000, + 0x00000001, + 0x00000003, + 0x00000007, + 0x0000000F, + 0x0000001F, + 0x0000003F, + 0x0000007F, + 0x000000FF, + 0x000001FF, + 0x000003FF, + 0x000007FF, + 0x00000FFF, + 0x00001FFF, + 0x00003FFF, + 0x00007FFF, + 0x0000FFFF, + 0x0001FFFF, + 0x0003FFFF, + 0x0007FFFF, + 0x000FFFFF, + 0x001FFFFF, + 0x003FFFFF, + 0x007FFFFF, + 0x00FFFFFF, + 0x01FFFFFF, + 0x03FFFFFF, + 0x07FFFFFF, + 0x0FFFFFFF, + 0x1FFFFFFF, + 0x3FFFFFFF, + 0x7FFFFFFF, + 0xFFFFFFFF, +} + +func bitMask(n uint32) uint32 { + return kBitMask[n] +} + +type bitReader struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +type bitReaderState struct { + val_ uint64 + bit_pos_ uint32 + input []byte + input_len uint + byte_pos uint +} + +/* Initializes the BrotliBitReader fields. */ + +/* Ensures that accumulator is not empty. + May consume up to sizeof(brotli_reg_t) - 1 bytes of input. + Returns false if data is required but there is no input available. + For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned + reading. */ +func bitReaderSaveState(from *bitReader, to *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func bitReaderRestoreState(to *bitReader, from *bitReaderState) { + to.val_ = from.val_ + to.bit_pos_ = from.bit_pos_ + to.input = from.input + to.input_len = from.input_len + to.byte_pos = from.byte_pos +} + +func getAvailableBits(br *bitReader) uint32 { + return 64 - br.bit_pos_ +} + +/* Returns amount of unread bytes the bit reader still has buffered from the + BrotliInput, including whole bytes in br->val_. */ +func getRemainingBytes(br *bitReader) uint { + return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3)) +} + +/* Checks if there is at least |num| bytes left in the input ring-buffer + (excluding the bits remaining in br->val_). */ +func checkInputAmount(br *bitReader, num uint) bool { + return br.input_len-br.byte_pos >= num +} + +/* Guarantees that there are at least |n_bits| + 1 bits in accumulator. + Precondition: accumulator contains at least 1 bit. + |n_bits| should be in the range [1..24] for regular build. For portable + non-64-bit little-endian build only 16 bits are safe to request. */ +func fillBitWindow(br *bitReader, n_bits uint32) { + if br.bit_pos_ >= 32 { + br.val_ >>= 32 + br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */ + br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32 + br.byte_pos += 4 + } +} + +/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no + more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */ +func fillBitWindow16(br *bitReader) { + fillBitWindow(br, 17) +} + +/* Tries to pull one byte of input to accumulator. + Returns false if there is no input available. */ +func pullByte(br *bitReader) bool { + if br.byte_pos == br.input_len { + return false + } + + br.val_ >>= 8 + br.val_ |= (uint64(br.input[br.byte_pos])) << 56 + br.bit_pos_ -= 8 + br.byte_pos++ + return true +} + +/* Returns currently available bits. + The number of valid bits could be calculated by BrotliGetAvailableBits. */ +func getBitsUnmasked(br *bitReader) uint64 { + return br.val_ >> br.bit_pos_ +} + +/* Like BrotliGetBits, but does not mask the result. + The result contains at least 16 valid bits. */ +func get16BitsUnmasked(br *bitReader) uint32 { + fillBitWindow(br, 16) + return uint32(getBitsUnmasked(br)) +} + +/* Returns the specified number of bits from |br| without advancing bit + position. */ +func getBits(br *bitReader, n_bits uint32) uint32 { + fillBitWindow(br, n_bits) + return uint32(getBitsUnmasked(br)) & bitMask(n_bits) +} + +/* Tries to peek the specified amount of bits. Returns false, if there + is not enough input. */ +func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + return true +} + +/* Advances the bit pos by |n_bits|. */ +func dropBits(br *bitReader, n_bits uint32) { + br.bit_pos_ += n_bits +} + +func bitReaderUnload(br *bitReader) { + var unused_bytes uint32 = getAvailableBits(br) >> 3 + var unused_bits uint32 = unused_bytes << 3 + br.byte_pos -= uint(unused_bytes) + if unused_bits == 64 { + br.val_ = 0 + } else { + br.val_ <<= unused_bits + } + + br.bit_pos_ += unused_bits +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Precondition: accumulator MUST contain at least |n_bits|. */ +func takeBits(br *bitReader, n_bits uint32, val *uint32) { + *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) + dropBits(br, n_bits) +} + +/* Reads the specified number of bits from |br| and advances the bit pos. + Assumes that there is enough input to perform BrotliFillBitWindow. */ +func readBits(br *bitReader, n_bits uint32) uint32 { + var val uint32 + fillBitWindow(br, n_bits) + takeBits(br, n_bits, &val) + return val +} + +/* Tries to read the specified amount of bits. Returns false, if there + is not enough input. |n_bits| MUST be positive. */ +func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool { + for getAvailableBits(br) < n_bits { + if !pullByte(br) { + return false + } + } + + takeBits(br, n_bits, val) + return true +} + +/* Advances the bit reader position to the next byte boundary and verifies + that any skipped bits are set to zero. */ +func bitReaderJumpToByteBoundary(br *bitReader) bool { + var pad_bits_count uint32 = getAvailableBits(br) & 0x7 + var pad_bits uint32 = 0 + if pad_bits_count != 0 { + takeBits(br, pad_bits_count, &pad_bits) + } + + return pad_bits == 0 +} + +/* Copies remaining input bytes stored in the bit reader to the output. Value + |num| may not be larger than BrotliGetRemainingBytes. The bit reader must be + warmed up again after this. */ +func copyBytes(dest []byte, br *bitReader, num uint) { + for getAvailableBits(br) >= 8 && num > 0 { + dest[0] = byte(getBitsUnmasked(br)) + dropBits(br, 8) + dest = dest[1:] + num-- + } + + copy(dest, br.input[br.byte_pos:][:num]) + br.byte_pos += num +} + +func initBitReader(br *bitReader) { + br.val_ = 0 + br.bit_pos_ = 64 +} + +func warmupBitReader(br *bitReader) bool { + /* Fixing alignment after unaligned BrotliFillWindow would result accumulator + overflow. If unalignment is caused by BrotliSafeReadBits, then there is + enough space in accumulator to fix alignment. */ + if getAvailableBits(br) == 0 { + if !pullByte(br) { + return false + } + } + + return true +} diff --git a/vendor/github.com/andybalholm/brotli/bitwriter.go b/vendor/github.com/andybalholm/brotli/bitwriter.go new file mode 100644 index 00000000000..dfc60360f36 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bitwriter.go @@ -0,0 +1,56 @@ +package brotli + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +type bitWriter struct { + dst []byte + + // Data waiting to be written is the low nbits of bits. + bits uint64 + nbits uint +} + +func (w *bitWriter) writeBits(nb uint, b uint64) { + w.bits |= b << w.nbits + w.nbits += nb + if w.nbits >= 32 { + bits := w.bits + w.bits >>= 32 + w.nbits -= 32 + w.dst = append(w.dst, + byte(bits), + byte(bits>>8), + byte(bits>>16), + byte(bits>>24), + ) + } +} + +func (w *bitWriter) writeSingleBit(bit bool) { + if bit { + w.writeBits(1, 1) + } else { + w.writeBits(1, 0) + } +} + +func (w *bitWriter) jumpToByteBoundary() { + dst := w.dst + for w.nbits != 0 { + dst = append(dst, byte(w.bits)) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + } + w.bits = 0 + w.dst = dst +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter.go b/vendor/github.com/andybalholm/brotli/block_splitter.go new file mode 100644 index 00000000000..978a1314748 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter.go @@ -0,0 +1,144 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Block split point selection utilities. */ + +type blockSplit struct { + num_types uint + num_blocks uint + types []byte + lengths []uint32 + types_alloc_size uint + lengths_alloc_size uint +} + +const ( + kMaxLiteralHistograms uint = 100 + kMaxCommandHistograms uint = 50 + kLiteralBlockSwitchCost float64 = 28.1 + kCommandBlockSwitchCost float64 = 13.5 + kDistanceBlockSwitchCost float64 = 14.6 + kLiteralStrideLength uint = 70 + kCommandStrideLength uint = 40 + kSymbolsPerLiteralHistogram uint = 544 + kSymbolsPerCommandHistogram uint = 530 + kSymbolsPerDistanceHistogram uint = 544 + kMinLengthForBlockSplitting uint = 128 + kIterMulForRefining uint = 2 + kMinItersForRefining uint = 100 +) + +func countLiterals(cmds []command) uint { + var total_length uint = 0 + /* Count how many we have. */ + + for i := range cmds { + total_length += uint(cmds[i].insert_len_) + } + + return total_length +} + +func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) { + var pos uint = 0 + var from_pos uint = offset & mask + for i := range cmds { + var insert_len uint = uint(cmds[i].insert_len_) + if from_pos+insert_len > mask { + var head_size uint = mask + 1 - from_pos + copy(literals[pos:], data[from_pos:][:head_size]) + from_pos = 0 + pos += head_size + insert_len -= head_size + } + + if insert_len > 0 { + copy(literals[pos:], data[from_pos:][:insert_len]) + pos += insert_len + } + + from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask)) + } +} + +func myRand(seed *uint32) uint32 { + /* Initial seed should be 7. In this case, loop length is (1 << 29). */ + *seed *= 16807 + + return *seed +} + +func bitCost(count uint) float64 { + if count == 0 { + return -2.0 + } else { + return fastLog2(count) + } +} + +const histogramsPerBatch = 64 + +const clustersPerBatch = 16 + +func initBlockSplit(self *blockSplit) { + self.num_types = 0 + self.num_blocks = 0 + self.types = self.types[:0] + self.lengths = self.lengths[:0] + self.types_alloc_size = 0 + self.lengths_alloc_size = 0 +} + +func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) { + { + var literals_count uint = countLiterals(cmds) + var literals []byte = make([]byte, literals_count) + + /* Create a continuous array of literals. */ + copyLiteralsToByteArray(cmds, data, pos, mask, literals) + + /* Create the block split on the array of literals. + Literal histograms have alphabet size 256. */ + splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split) + + literals = nil + } + { + var insert_and_copy_codes []uint16 = make([]uint16, len(cmds)) + /* Compute prefix codes for commands. */ + + for i := range cmds { + insert_and_copy_codes[i] = cmds[i].cmd_prefix_ + } + + /* Create the block split on the array of command prefixes. */ + splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split) + + /* TODO: reuse for distances? */ + + insert_and_copy_codes = nil + } + { + var distance_prefixes []uint16 = make([]uint16, len(cmds)) + var j uint = 0 + /* Create a continuous array of distance prefixes. */ + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF + j++ + } + } + + /* Create the block split on the array of distance prefixes. */ + splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split) + + distance_prefixes = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_command.go b/vendor/github.com/andybalholm/brotli/block_splitter_command.go new file mode 100644 index 00000000000..9dec13e4d90 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_command.go @@ -0,0 +1,434 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorCommand(&histograms[i], data[pos:], stride) + } +} + +func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorCommand(sample, data[pos:], stride) +} + +func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramCommand + histogramClearCommand(&sample) + randomSampleCommand(&seed, data, length, stride, &sample) + histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeCommand() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsCommand_kInvalidId uint16 = 256 + +func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsCommand_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) { + var i uint + clearHistogramsCommand(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddCommand(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearCommand(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddCommand(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostCommand(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramCommand + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramCommand, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksCommand_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramCommand + var j uint + var best_out uint32 + var best_bits float64 + histogramClearCommand(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddCommand(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksCommand_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + length := uint(len(data)) + var data_size uint = histogramDataSizeCommand() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramCommand + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramCommand, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms) + buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksCommand(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_distance.go b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go new file mode 100644 index 00000000000..953530d518e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorDistance(&histograms[i], data[pos:], stride) + } +} + +func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorDistance(sample, data[pos:], stride) +} + +func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramDistance + histogramClearDistance(&sample) + randomSampleDistance(&seed, data, length, stride, &sample) + histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeDistance() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsDistance_kInvalidId uint16 = 256 + +func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsDistance_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) { + var i uint + clearHistogramsDistance(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddDistance(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearDistance(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddDistance(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostDistance(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramDistance + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramDistance, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksDistance_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramDistance + var j uint + var best_out uint32 + var best_bits float64 + histogramClearDistance(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddDistance(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksDistance_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeDistance() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramDistance + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramDistance, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms) + buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksDistance(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_literal.go b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go new file mode 100644 index 00000000000..1c895cf3889 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go @@ -0,0 +1,433 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var seed uint32 = 7 + var block_length uint = length / num_histograms + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < num_histograms; i++ { + var pos uint = length * i / num_histograms + if i != 0 { + pos += uint(myRand(&seed) % uint32(block_length)) + } + + if pos+stride >= length { + pos = length - stride - 1 + } + + histogramAddVectorLiteral(&histograms[i], data[pos:], stride) + } +} + +func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) { + var pos uint = 0 + if stride >= length { + stride = length + } else { + pos = uint(myRand(seed) % uint32(length-stride+1)) + } + + histogramAddVectorLiteral(sample, data[pos:], stride) +} + +func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { + var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining + var seed uint32 = 7 + var iter uint + iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms + for iter = 0; iter < iters; iter++ { + var sample histogramLiteral + histogramClearLiteral(&sample) + randomSampleLiteral(&seed, data, length, stride, &sample) + histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample) + } +} + +/* Assigns a block id from the range [0, num_histograms) to each data element + in data[0..length) and fills in block_id[0..length) with the assigned values. + Returns the number of blocks, i.e. one plus the number of block switches. */ +func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { + var data_size uint = histogramDataSizeLiteral() + var bitmaplen uint = (num_histograms + 7) >> 3 + var num_blocks uint = 1 + var i uint + var j uint + assert(num_histograms <= 256) + if num_histograms <= 1 { + for i = 0; i < length; i++ { + block_id[i] = 0 + } + + return 1 + } + + for i := 0; i < int(data_size*num_histograms); i++ { + insert_cost[i] = 0 + } + for i = 0; i < num_histograms; i++ { + insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) + } + + for i = data_size; i != 0; { + i-- + for j = 0; j < num_histograms; j++ { + insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) + } + } + + for i := 0; i < int(num_histograms); i++ { + cost[i] = 0 + } + for i := 0; i < int(length*bitmaplen); i++ { + switch_signal[i] = 0 + } + + /* After each iteration of this loop, cost[k] will contain the difference + between the minimum cost of arriving at the current byte position using + entropy code k, and the minimum cost of arriving at the current byte + position. This difference is capped at the block switch cost, and if it + reaches block switch cost, it means that when we trace back from the last + position, we need to switch here. */ + for i = 0; i < length; i++ { + var byte_ix uint = i + var ix uint = byte_ix * bitmaplen + var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms + var min_cost float64 = 1e99 + var block_switch_cost float64 = block_switch_bitcost + var k uint + for k = 0; k < num_histograms; k++ { + /* We are coding the symbol in data[byte_ix] with entropy code k. */ + cost[k] += insert_cost[insert_cost_ix+k] + + if cost[k] < min_cost { + min_cost = cost[k] + block_id[byte_ix] = byte(k) + } + } + + /* More blocks for the beginning. */ + if byte_ix < 2000 { + block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 + } + + for k = 0; k < num_histograms; k++ { + cost[k] -= min_cost + if cost[k] >= block_switch_cost { + var mask byte = byte(1 << (k & 7)) + cost[k] = block_switch_cost + assert(k>>3 < bitmaplen) + switch_signal[ix+(k>>3)] |= mask + /* Trace back from the last position and switch at the marked places. */ + } + } + } + { + var byte_ix uint = length - 1 + var ix uint = byte_ix * bitmaplen + var cur_id byte = block_id[byte_ix] + for byte_ix > 0 { + var mask byte = byte(1 << (cur_id & 7)) + assert(uint(cur_id)>>3 < bitmaplen) + byte_ix-- + ix -= bitmaplen + if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { + if cur_id != block_id[byte_ix] { + cur_id = block_id[byte_ix] + num_blocks++ + } + } + + block_id[byte_ix] = cur_id + } + } + + return num_blocks +} + +var remapBlockIdsLiteral_kInvalidId uint16 = 256 + +func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { + var next_id uint16 = 0 + var i uint + for i = 0; i < num_histograms; i++ { + new_id[i] = remapBlockIdsLiteral_kInvalidId + } + + for i = 0; i < length; i++ { + assert(uint(block_ids[i]) < num_histograms) + if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId { + new_id[block_ids[i]] = next_id + next_id++ + } + } + + for i = 0; i < length; i++ { + block_ids[i] = byte(new_id[block_ids[i]]) + assert(uint(block_ids[i]) < num_histograms) + } + + assert(uint(next_id) <= num_histograms) + return uint(next_id) +} + +func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) { + var i uint + clearHistogramsLiteral(histograms, num_histograms) + for i = 0; i < length; i++ { + histogramAddLiteral(&histograms[block_ids[i]], uint(data[i])) + } +} + +var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { + var histogram_symbols []uint32 = make([]uint32, num_blocks) + var block_lengths []uint32 = make([]uint32, num_blocks) + var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch + var all_histograms_size uint = 0 + var all_histograms_capacity uint = expected_num_clusters + var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity) + var cluster_size_size uint = 0 + var cluster_size_capacity uint = expected_num_clusters + var cluster_size []uint32 = make([]uint32, cluster_size_capacity) + var num_clusters uint = 0 + var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch)) + var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 + var pairs_capacity uint = max_num_pairs + 1 + var pairs []histogramPair = make([]histogramPair, pairs_capacity) + var pos uint = 0 + var clusters []uint32 + var num_final_clusters uint + var new_index []uint32 + var i uint + var sizes = [histogramsPerBatch]uint32{0} + var new_clusters = [histogramsPerBatch]uint32{0} + var symbols = [histogramsPerBatch]uint32{0} + var remap = [histogramsPerBatch]uint32{0} + + for i := 0; i < int(num_blocks); i++ { + block_lengths[i] = 0 + } + { + var block_idx uint = 0 + for i = 0; i < length; i++ { + assert(block_idx < num_blocks) + block_lengths[block_idx]++ + if i+1 == length || block_ids[i] != block_ids[i+1] { + block_idx++ + } + } + + assert(block_idx == num_blocks) + } + + for i = 0; i < num_blocks; i += histogramsPerBatch { + var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + var k uint + histogramClearLiteral(&histograms[j]) + for k = 0; uint32(k) < block_lengths[i+j]; k++ { + histogramAddLiteral(&histograms[j], uint(data[pos])) + pos++ + } + + histograms[j].bit_cost_ = populationCostLiteral(&histograms[j]) + new_clusters[j] = uint32(j) + symbols[j] = uint32(j) + sizes[j] = 1 + } + + num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) + if all_histograms_capacity < (all_histograms_size + num_new_clusters) { + var _new_size uint + if all_histograms_capacity == 0 { + _new_size = all_histograms_size + num_new_clusters + } else { + _new_size = all_histograms_capacity + } + var new_array []histogramLiteral + for _new_size < (all_histograms_size + num_new_clusters) { + _new_size *= 2 + } + new_array = make([]histogramLiteral, _new_size) + if all_histograms_capacity != 0 { + copy(new_array, all_histograms[:all_histograms_capacity]) + } + + all_histograms = new_array + all_histograms_capacity = _new_size + } + + brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) + for j = 0; j < num_new_clusters; j++ { + all_histograms[all_histograms_size] = histograms[new_clusters[j]] + all_histograms_size++ + cluster_size[cluster_size_size] = sizes[new_clusters[j]] + cluster_size_size++ + remap[new_clusters[j]] = uint32(j) + } + + for j = 0; j < num_to_combine; j++ { + histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] + } + + num_clusters += num_new_clusters + assert(num_clusters == cluster_size_size) + assert(num_clusters == all_histograms_size) + } + + histograms = nil + + max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < max_num_pairs+1 { + pairs = nil + pairs = make([]histogramPair, (max_num_pairs + 1)) + } + + clusters = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + clusters[i] = uint32(i) + } + + num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) + pairs = nil + cluster_size = nil + + new_index = make([]uint32, num_clusters) + for i = 0; i < num_clusters; i++ { + new_index[i] = clusterBlocksLiteral_kInvalidIndex + } + pos = 0 + { + var next_index uint32 = 0 + for i = 0; i < num_blocks; i++ { + var histo histogramLiteral + var j uint + var best_out uint32 + var best_bits float64 + histogramClearLiteral(&histo) + for j = 0; uint32(j) < block_lengths[i]; j++ { + histogramAddLiteral(&histo, uint(data[pos])) + pos++ + } + + if i == 0 { + best_out = histogram_symbols[0] + } else { + best_out = histogram_symbols[i-1] + } + best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out]) + for j = 0; j < num_final_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + histogram_symbols[i] = best_out + if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex { + new_index[best_out] = next_index + next_index++ + } + } + } + + clusters = nil + all_histograms = nil + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) + { + var cur_length uint32 = 0 + var block_idx uint = 0 + var max_type byte = 0 + for i = 0; i < num_blocks; i++ { + cur_length += block_lengths[i] + if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { + var id byte = byte(new_index[histogram_symbols[i]]) + split.types[block_idx] = id + split.lengths[block_idx] = cur_length + max_type = brotli_max_uint8_t(max_type, id) + cur_length = 0 + block_idx++ + } + } + + split.num_blocks = block_idx + split.num_types = uint(max_type) + 1 + } + + new_index = nil + block_lengths = nil + histogram_symbols = nil +} + +func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { + var data_size uint = histogramDataSizeLiteral() + var num_histograms uint = length/literals_per_histogram + 1 + var histograms []histogramLiteral + if num_histograms > max_histograms { + num_histograms = max_histograms + } + + if length == 0 { + split.num_types = 1 + return + } else if length < kMinLengthForBlockSplitting { + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) + split.num_types = 1 + split.types[split.num_blocks] = 0 + split.lengths[split.num_blocks] = uint32(length) + split.num_blocks++ + return + } + + histograms = make([]histogramLiteral, num_histograms) + + /* Find good entropy codes. */ + initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + + refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) + { + var block_ids []byte = make([]byte, length) + var num_blocks uint = 0 + var bitmaplen uint = (num_histograms + 7) >> 3 + var insert_cost []float64 = make([]float64, (data_size * num_histograms)) + var cost []float64 = make([]float64, num_histograms) + var switch_signal []byte = make([]byte, (length * bitmaplen)) + var new_id []uint16 = make([]uint16, num_histograms) + var iters uint + if params.quality < hqZopflificationQuality { + iters = 3 + } else { + iters = 10 + } + /* Find a good path through literals with the good entropy codes. */ + + var i uint + for i = 0; i < iters; i++ { + num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) + num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms) + buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms) + } + + insert_cost = nil + cost = nil + switch_signal = nil + new_id = nil + histograms = nil + clusterBlocksLiteral(data, length, num_blocks, block_ids, split) + block_ids = nil + } +} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go new file mode 100644 index 00000000000..ee6552982bf --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go @@ -0,0 +1,1539 @@ +package brotli + +import ( + "math" + "sync" +) + +const maxHuffmanTreeSize = (2*numCommandSymbols + 1) + +/* +The maximum size of Huffman dictionary for distances assuming that + + NPOSTFIX = 0 and NDIRECT = 0. +*/ +const maxSimpleDistanceAlphabetSize = 140 + +/* +Represents the range of values belonging to a prefix code: + + [offset, offset + 2^nbits) +*/ +type prefixCodeRange struct { + offset uint32 + nbits uint32 +} + +var kBlockLengthPrefixCode = [numBlockLenSymbols]prefixCodeRange{ + prefixCodeRange{1, 2}, + prefixCodeRange{5, 2}, + prefixCodeRange{9, 2}, + prefixCodeRange{13, 2}, + prefixCodeRange{17, 3}, + prefixCodeRange{25, 3}, + prefixCodeRange{33, 3}, + prefixCodeRange{41, 3}, + prefixCodeRange{49, 4}, + prefixCodeRange{65, 4}, + prefixCodeRange{81, 4}, + prefixCodeRange{97, 4}, + prefixCodeRange{113, 5}, + prefixCodeRange{145, 5}, + prefixCodeRange{177, 5}, + prefixCodeRange{209, 5}, + prefixCodeRange{241, 6}, + prefixCodeRange{305, 6}, + prefixCodeRange{369, 7}, + prefixCodeRange{497, 8}, + prefixCodeRange{753, 9}, + prefixCodeRange{1265, 10}, + prefixCodeRange{2289, 11}, + prefixCodeRange{4337, 12}, + prefixCodeRange{8433, 13}, + prefixCodeRange{16625, 24}, +} + +func blockLengthPrefixCode(len uint32) uint32 { + var code uint32 + if len >= 177 { + if len >= 753 { + code = 20 + } else { + code = 14 + } + } else if len >= 41 { + code = 7 + } else { + code = 0 + } + for code < (numBlockLenSymbols-1) && len >= kBlockLengthPrefixCode[code+1].offset { + code++ + } + return code +} + +func getBlockLengthPrefixCode(len uint32, code *uint, n_extra *uint32, extra *uint32) { + *code = uint(blockLengthPrefixCode(uint32(len))) + *n_extra = kBlockLengthPrefixCode[*code].nbits + *extra = len - kBlockLengthPrefixCode[*code].offset +} + +type blockTypeCodeCalculator struct { + last_type uint + second_last_type uint +} + +func initBlockTypeCodeCalculator(self *blockTypeCodeCalculator) { + self.last_type = 1 + self.second_last_type = 0 +} + +func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { + var type_code uint + if uint(type_) == calculator.last_type+1 { + type_code = 1 + } else if uint(type_) == calculator.second_last_type { + type_code = 0 + } else { + type_code = uint(type_) + 2 + } + calculator.second_last_type = calculator.last_type + calculator.last_type = uint(type_) + return type_code +} + +/* +|nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ +func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { + var lg uint + if length == 1 { + lg = 1 + } else { + lg = uint(log2FloorNonZero(uint(uint32(length-1)))) + 1 + } + var tmp uint + if lg < 16 { + tmp = 16 + } else { + tmp = (lg + 3) + } + var mnibbles uint = tmp / 4 + assert(length > 0) + assert(length <= 1<<24) + assert(lg <= 24) + *nibblesbits = uint64(mnibbles) - 4 + *numbits = mnibbles * 4 + *bits = uint64(length) - 1 +} + +func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { + var copylen_code uint32 = commandCopyLenCode(cmd) + var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_)) + var copycode uint16 = getCopyLengthCode(uint(copylen_code)) + var insnumextra uint32 = getInsertExtra(inscode) + var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode)) + var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode)) + var bits uint64 = copyextraval< 0 + REQUIRES: length <= (1 << 24) +*/ +func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + var is_final uint64 + if is_final_block { + is_final = 1 + } else { + is_final = 0 + } + + /* Write ISLAST bit. */ + writeBits(1, is_final, storage_ix, storage) + + /* Write ISEMPTY bit. */ + if is_final_block { + writeBits(1, 0, storage_ix, storage) + } + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + if !is_final_block { + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 0, storage_ix, storage) + } +} + +/* +Stores the uncompressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ +func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { + var lenbits uint64 + var nlenbits uint + var nibblesbits uint64 + + /* Write ISLAST bit. + Uncompressed block cannot be the last one, so set to 0. */ + writeBits(1, 0, storage_ix, storage) + + encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) + writeBits(2, nibblesbits, storage_ix, storage) + writeBits(nlenbits, lenbits, storage_ix, storage) + + /* Write ISUNCOMPRESSED bit. */ + writeBits(1, 1, storage_ix, storage) +} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15} +var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4} + +func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) { + var skip_some uint = 0 + var codes_to_store uint = codeLengthCodes + /* The bit lengths of the Huffman code over the code length alphabet + are compressed with the following static Huffman code: + Symbol Code + ------ ---- + 0 00 + 1 1110 + 2 110 + 3 01 + 4 10 + 5 1111 */ + + /* Throw away trailing zeros: */ + if num_codes > 1 { + for ; codes_to_store > 0; codes_to_store-- { + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[codes_to_store-1]] != 0 { + break + } + } + } + + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[0]] == 0 && code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[1]] == 0 { + skip_some = 2 /* skips two. */ + if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[2]] == 0 { + skip_some = 3 /* skips three. */ + } + } + + writeBits(2, uint64(skip_some), storage_ix, storage) + { + var i uint + for i = skip_some; i < codes_to_store; i++ { + var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]]) + writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage) + } + } +} + +func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) { + var i uint + for i = 0; i < huffman_tree_size; i++ { + var ix uint = uint(huffman_tree[i]) + writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage) + + /* Extra bits */ + switch ix { + case repeatPreviousCodeLength: + writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + + case repeatZeroCodeLength: + writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) + } + } +} + +func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) { + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */ + { + /* Sort */ + var i uint + for i = 0; i < num_symbols; i++ { + var j uint + for j = i + 1; j < num_symbols; j++ { + if depths[symbols[j]] < depths[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + } + + if num_symbols == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if num_symbols == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depths[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } +} + +/* +num = alphabet size + + depths = symbol depths +*/ +func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var huffman_tree [numCommandSymbols]byte + var huffman_tree_extra_bits [numCommandSymbols]byte + var huffman_tree_size uint = 0 + var code_length_bitdepth = [codeLengthCodes]byte{0} + var code_length_bitdepth_symbols [codeLengthCodes]uint16 + var huffman_tree_histogram = [codeLengthCodes]uint32{0} + var i uint + var num_codes int = 0 + /* Write the Huffman tree into the brotli-representation. + The command alphabet is the largest, so this allocation will fit all + alphabets. */ + + var code uint = 0 + + assert(num <= numCommandSymbols) + + writeHuffmanTree(depths, num, &huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:]) + + /* Calculate the statistics of the Huffman tree in brotli-representation. */ + for i = 0; i < huffman_tree_size; i++ { + huffman_tree_histogram[huffman_tree[i]]++ + } + + for i = 0; i < codeLengthCodes; i++ { + if huffman_tree_histogram[i] != 0 { + if num_codes == 0 { + code = i + num_codes = 1 + } else if num_codes == 1 { + num_codes = 2 + break + } + } + } + + /* Calculate another Huffman tree to use for compressing both the + earlier Huffman tree with. */ + createHuffmanTree(huffman_tree_histogram[:], codeLengthCodes, 5, tree, code_length_bitdepth[:]) + + convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:]) + + /* Now, we have all the data, let's start storing it */ + storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage) + + if num_codes == 1 { + code_length_bitdepth[code] = 0 + } + + /* Store the real Huffman tree now. */ + storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) +} + +/* +Builds a Huffman tree from histogram[0:length] into depth[0:length] and + + bits[0:length] and stores the encoded tree to the bit stream. +*/ +func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var s4 = [4]uint{0} + var i uint + var max_bits uint = 0 + for i = 0; i < histogram_length; i++ { + if histogram[i] != 0 { + if count < 4 { + s4[count] = i + } else if count > 4 { + break + } + + count++ + } + } + { + var max_bits_counter uint = alphabet_size - 1 + for max_bits_counter != 0 { + max_bits_counter >>= 1 + max_bits++ + } + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(s4[0]), storage_ix, storage) + depth[s4[0]] = 0 + bits[s4[0]] = 0 + return + } + + for i := 0; i < int(histogram_length); i++ { + depth[i] = 0 + } + createHuffmanTree(histogram, histogram_length, 15, tree, depth) + convertBitDepthsToSymbols(depth, histogram_length, bits) + + if count <= 4 { + storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage) + } else { + storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage) + } +} + +func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool { + return v0.total_count_ < v1.total_count_ +} + +var huffmanTreePool sync.Pool + +func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + writeBits(4, 1, storage_ix, storage) + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + writeBits(2, 1, storage_ix, storage) + + writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + } else if count == 3 { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + } else { + writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) + writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) + + /* tree-select */ + var tmp int + if depth[symbols[0]] == 1 { + tmp = 1 + } else { + tmp = 0 + } + writeBits(1, uint64(tmp), storage_ix, storage) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCode(storage_ix, storage) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage) + } else { + if previous_value != value { + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) + } + } else { + reps -= 3 + writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage) + } + + previous_value = value + } + } + } +} + +func buildAndStoreHuffmanTreeFastBW(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + bw.writeBits(4, 1) + bw.writeBits(max_bits, uint64(symbols[0])) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + bw.writeBits(2, 1) + + bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + } else if count == 3 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + } else { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + bw.writeBits(max_bits, uint64(symbols[3])) + + /* tree-select */ + bw.writeSingleBit(depth[symbols[0]] == 1) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCodeBW(bw) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps]) + } else { + if previous_value != value { + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + } + } else { + reps -= 3 + bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps]) + } + + previous_value = value + } + } + } +} + +func indexOf(v []byte, v_size uint, value byte) uint { + var i uint = 0 + for ; i < v_size; i++ { + if v[i] == value { + return i + } + } + + return i +} + +func moveToFront(v []byte, index uint) { + var value byte = v[index] + var i uint + for i = index; i != 0; i-- { + v[i] = v[i-1] + } + + v[0] = value +} + +func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { + var i uint + var mtf [256]byte + var max_value uint32 + if v_size == 0 { + return + } + + max_value = v_in[0] + for i = 1; i < v_size; i++ { + if v_in[i] > max_value { + max_value = v_in[i] + } + } + + assert(max_value < 256) + for i = 0; uint32(i) <= max_value; i++ { + mtf[i] = byte(i) + } + { + var mtf_size uint = uint(max_value + 1) + for i = 0; i < v_size; i++ { + var index uint = indexOf(mtf[:], mtf_size, byte(v_in[i])) + assert(index < mtf_size) + v_out[i] = uint32(index) + moveToFront(mtf[:], index) + } + } +} + +/* +Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of + + the run length plus extra bits (lower 9 bits is the prefix code and the rest + are the extra bits). Non-zero values in v[] are shifted by + *max_length_prefix. Will not create prefix codes bigger than the initial + value of *max_run_length_prefix. The prefix code of run length L is simply + Log2Floor(L) and the number of extra bits is the same as the prefix code. +*/ +func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { + var max_reps uint32 = 0 + var i uint + var max_prefix uint32 + for i = 0; i < in_size; { + var reps uint32 = 0 + for ; i < in_size && v[i] != 0; i++ { + } + for ; i < in_size && v[i] == 0; i++ { + reps++ + } + + max_reps = brotli_max_uint32_t(reps, max_reps) + } + + if max_reps > 0 { + max_prefix = log2FloorNonZero(uint(max_reps)) + } else { + max_prefix = 0 + } + max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix) + *max_run_length_prefix = max_prefix + *out_size = 0 + for i = 0; i < in_size; { + assert(*out_size <= i) + if v[i] != 0 { + v[*out_size] = v[i] + *max_run_length_prefix + i++ + (*out_size)++ + } else { + var reps uint32 = 1 + var k uint + for k = i + 1; k < in_size && v[k] == 0; k++ { + reps++ + } + + i += uint(reps) + for reps != 0 { + if reps < 2< 0) + writeSingleBit(use_rle, storage_ix, storage) + if use_rle { + writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage) + } + } + + buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_rle_symbols; i++ { + var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask + var extra_bits_val uint32 = rle_symbols[i] >> symbolBits + writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage) + if rle_symbol > 0 && rle_symbol <= max_run_length_prefix { + writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage) + } + } + + writeBits(1, 1, storage_ix, storage) /* use move-to-front */ + rle_symbols = nil +} + +/* Stores the block switch command with index block_ix to the bit stream. */ +func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) { + var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type) + var lencode uint + var len_nextra uint32 + var len_extra uint32 + if !is_first_block { + writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage) + } + + getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra) + + writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage) + writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) +} + +/* +Builds a BlockSplitCode data structure from the block split given by the + + vector of block types and block lengths and stores it to the bit stream. +*/ +func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { + var type_histo [maxBlockTypeSymbols]uint32 + var length_histo [numBlockLenSymbols]uint32 + var i uint + var type_code_calculator blockTypeCodeCalculator + for i := 0; i < int(num_types+2); i++ { + type_histo[i] = 0 + } + length_histo = [numBlockLenSymbols]uint32{} + initBlockTypeCodeCalculator(&type_code_calculator) + for i = 0; i < num_blocks; i++ { + var type_code uint = nextBlockTypeCode(&type_code_calculator, types[i]) + if i != 0 { + type_histo[type_code]++ + } + length_histo[blockLengthPrefixCode(lengths[i])]++ + } + + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */ + buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage) + buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage) + storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage) + } +} + +/* Stores a context map where the histogram type is always the block type. */ +func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + storeVarLenUint8(num_types-1, storage_ix, storage) + if num_types > 1 { + var repeat_code uint = context_bits - 1 + var repeat_bits uint = (1 << repeat_code) - 1 + var alphabet_size uint = num_types + repeat_code + var histogram [maxContextMapSymbols]uint32 + var depths [maxContextMapSymbols]byte + var bits [maxContextMapSymbols]uint16 + var i uint + for i := 0; i < int(alphabet_size); i++ { + histogram[i] = 0 + } + + /* Write RLEMAX. */ + writeBits(1, 1, storage_ix, storage) + + writeBits(4, uint64(repeat_code)-1, storage_ix, storage) + histogram[repeat_code] = uint32(num_types) + histogram[0] = 1 + for i = context_bits; i < alphabet_size; i++ { + histogram[i] = 1 + } + + buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage) + for i = 0; i < num_types; i++ { + var tmp uint + if i == 0 { + tmp = 0 + } else { + tmp = i + context_bits - 1 + } + var code uint = tmp + writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage) + writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage) + } + + /* Write IMTF (inverse-move-to-front) bit. */ + writeBits(1, 1, storage_ix, storage) + } +} + +/* Manages the encoding of one block category (literal, command or distance). */ +type blockEncoder struct { + histogram_length_ uint + num_block_types_ uint + block_types_ []byte + block_lengths_ []uint32 + num_blocks_ uint + block_split_code_ blockSplitCode + block_ix_ uint + block_len_ uint + entropy_ix_ uint + depths_ []byte + bits_ []uint16 +} + +var blockEncoderPool sync.Pool + +func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder { + self, _ := blockEncoderPool.Get().(*blockEncoder) + + if self != nil { + self.block_ix_ = 0 + self.entropy_ix_ = 0 + self.depths_ = self.depths_[:0] + self.bits_ = self.bits_[:0] + } else { + self = &blockEncoder{} + } + + self.histogram_length_ = histogram_length + self.num_block_types_ = num_block_types + self.block_types_ = block_types + self.block_lengths_ = block_lengths + self.num_blocks_ = num_blocks + initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator) + if num_blocks == 0 { + self.block_len_ = 0 + } else { + self.block_len_ = uint(block_lengths[0]) + } + + return self +} + +func cleanupBlockEncoder(self *blockEncoder) { + blockEncoderPool.Put(self) +} + +/* +Creates entropy codes of block lengths and block types and stores them + + to the bit stream. +*/ +func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { + buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) +} + +/* +Stores the next symbol with the entropy code of the current block type. + + Updates the block type and block length at block boundaries. +*/ +func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) * self.histogram_length_ + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var ix uint = self.entropy_ix_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +/* +Stores the next symbol with the entropy code of the current block type and + + context value. + Updates the block type and block length at block boundaries. +*/ +func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { + if self.block_len_ == 0 { + self.block_ix_++ + var block_ix uint = self.block_ix_ + var block_len uint32 = self.block_lengths_[block_ix] + var block_type byte = self.block_types_[block_ix] + self.block_len_ = uint(block_len) + self.entropy_ix_ = uint(block_type) << context_bits + storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) + } + + self.block_len_-- + { + var histo_ix uint = uint(context_map[self.entropy_ix_+context]) + var ix uint = histo_ix*self.histogram_length_ + symbol + writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) + } +} + +func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { + var table_size uint = histograms_size * self.histogram_length_ + if cap(self.depths_) < int(table_size) { + self.depths_ = make([]byte, table_size) + } else { + self.depths_ = self.depths_[:table_size] + } + if cap(self.bits_) < int(table_size) { + self.bits_ = make([]uint16, table_size) + } else { + self.bits_ = self.bits_[:table_size] + } + { + var i uint + for i = 0; i < histograms_size; i++ { + var ix uint = i * self.histogram_length_ + buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) + } + } +} + +func jumpToByteBoundary(storage_ix *uint, storage []byte) { + *storage_ix = (*storage_ix + 7) &^ 7 + storage[*storage_ix>>3] = 0 +} + +func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) { + var pos uint = start_pos + var i uint + var num_distance_symbols uint32 = params.dist.alphabet_size + var num_effective_distance_symbols uint32 = num_distance_symbols + var tree []huffmanTree + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var dist *distanceParams = ¶ms.dist + if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols { + num_effective_distance_symbols = numHistogramDistanceSymbols + } + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks) + command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks) + distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks) + + buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage) + buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage) + + writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage) + writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage) + for i = 0; i < mb.literal_split.num_types; i++ { + writeBits(2, uint64(literal_context_mode), storage_ix, storage) + } + + if mb.literal_context_map_size == 0 { + storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage) + } + + if mb.distance_context_map_size == 0 { + storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage) + } else { + encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage) + } + + buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage) + buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage) + tree = nil + + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + storeSymbol(command_enc, cmd_code, storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + if mb.literal_context_map_size == 0 { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage) + pos++ + } + } else { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut)) + var literal byte = input[pos&mask] + storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits) + prev_byte2 = prev_byte + prev_byte = literal + pos++ + } + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 { + prev_byte2 = input[(pos-2)&mask] + prev_byte = input[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint64 = uint64(cmd.dist_extra_) + if mb.distance_context_map_size == 0 { + storeSymbol(distance_enc, dist_code, storage_ix, storage) + } else { + var context uint = uint(commandDistanceContext(&cmd)) + storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits) + } + + writeBits(uint(distnumextra), distextra, storage_ix, storage) + } + } + } + + cleanupBlockEncoder(distance_enc) + cleanupBlockEncoder(command_enc) + cleanupBlockEncoder(literal_enc) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) { + var pos uint = start_pos + for _, cmd := range commands { + var j uint + histogramAddCommand(cmd_histo, uint(cmd.cmd_prefix_)) + for j = uint(cmd.insert_len_); j != 0; j-- { + histogramAddLiteral(lit_histo, uint(input[pos&mask])) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + histogramAddDistance(dist_histo, uint(cmd.dist_prefix_)&0x3FF) + } + } +} + +func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) { + var pos uint = start_pos + for _, cmd := range commands { + var cmd_code uint = uint(cmd.cmd_prefix_) + var j uint + writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage) + storeCommandExtra(&cmd, storage_ix, storage) + for j = uint(cmd.insert_len_); j != 0; j-- { + var literal byte = input[pos&mask] + writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage) + pos++ + } + + pos += uint(commandCopyLen(&cmd)) + if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { + var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF + var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 + var distextra uint32 = cmd.dist_extra_ + writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage) + writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage) + } + } +} + +func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + var tree []huffmanTree + var num_distance_symbols uint32 = params.dist.alphabet_size + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + + writeBits(13, 0, storage_ix, storage) + + tree = make([]huffmanTree, maxHuffmanTreeSize) + buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage) + buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage) + tree = nil + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { + var num_distance_symbols uint32 = params.dist.alphabet_size + var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1 + + storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) + + writeBits(13, 0, storage_ix, storage) + + if len(commands) <= 128 { + var histogram = [numLiteralSymbols]uint32{0} + var pos uint = start_pos + var num_literals uint = 0 + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + for _, cmd := range commands { + var j uint + for j = uint(cmd.insert_len_); j != 0; j-- { + histogram[input[pos&mask]]++ + pos++ + } + + num_literals += uint(cmd.insert_len_) + pos += uint(commandCopyLen(&cmd)) + } + + buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + storeStaticCommandHuffmanTree(storage_ix, storage) + storeStaticDistanceHuffmanTree(storage_ix, storage) + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage) + } else { + var lit_histo histogramLiteral + var cmd_histo histogramCommand + var dist_histo histogramDistance + var lit_depth [numLiteralSymbols]byte + var lit_bits [numLiteralSymbols]uint16 + var cmd_depth [numCommandSymbols]byte + var cmd_bits [numCommandSymbols]uint16 + var dist_depth [maxSimpleDistanceAlphabetSize]byte + var dist_bits [maxSimpleDistanceAlphabetSize]uint16 + histogramClearLiteral(&lit_histo) + histogramClearCommand(&cmd_histo) + histogramClearDistance(&dist_histo) + buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) + buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */ + 8, lit_depth[:], lit_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */ + 10, cmd_depth[:], cmd_bits[:], storage_ix, storage) + + buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */ + uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage) + + storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) + } + + if is_last { + jumpToByteBoundary(storage_ix, storage) + } +} + +/* +This is for storing uncompressed blocks (simple raw storage of + + bytes-as-bytes). +*/ +func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { + var masked_pos uint = position & mask + storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) + jumpToByteBoundary(storage_ix, storage) + + if masked_pos+len > mask+1 { + var len1 uint = mask + 1 - masked_pos + copy(storage[*storage_ix>>3:], input[masked_pos:][:len1]) + *storage_ix += len1 << 3 + len -= len1 + masked_pos = 0 + } + + copy(storage[*storage_ix>>3:], input[masked_pos:][:len]) + *storage_ix += uint(len << 3) + + /* We need to clear the next 4 bytes to continue to be + compatible with BrotliWriteBits. */ + writeBitsPrepareStorage(*storage_ix, storage) + + /* Since the uncompressed block itself may not be the final block, add an + empty one after this. */ + if is_final_block { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + jumpToByteBoundary(storage_ix, storage) + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster.go b/vendor/github.com/andybalholm/brotli/cluster.go new file mode 100644 index 00000000000..df8a3282245 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for clustering similar histograms together. */ + +type histogramPair struct { + idx1 uint32 + idx2 uint32 + cost_combo float64 + cost_diff float64 +} + +func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool { + if p1.cost_diff != p2.cost_diff { + return p1.cost_diff > p2.cost_diff + } + + return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1) +} + +/* Returns entropy reduction of the context map when we combine two clusters. */ +func clusterCostDiff(size_a uint, size_b uint) float64 { + var size_c uint = size_a + size_b + return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_command.go b/vendor/github.com/andybalholm/brotli/cluster_command.go new file mode 100644 index 00000000000..45b569bb2a5 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_command.go @@ -0,0 +1,164 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramCommand = out[idx1] + var cost_combo float64 + histogramAddHistogramCommand(&combo, &out[idx2]) + cost_combo = populationCostCommand(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramCommand = *histogram + histogramAddHistogramCommand(&tmp, candidate) + return populationCostCommand(&tmp) - candidate.bit_cost_ + } +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_distance.go b/vendor/github.com/andybalholm/brotli/cluster_distance.go new file mode 100644 index 00000000000..1aaa86e6ed8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_distance.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramDistance = out[idx1] + var cost_combo float64 + histogramAddHistogramDistance(&combo, &out[idx2]) + cost_combo = populationCostDistance(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramDistance = *histogram + histogramAddHistogramDistance(&tmp, candidate) + return populationCostDistance(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearDistance(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramDistance(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramDistance + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexDistance_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramDistance, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostDistance(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexDistance(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/cluster_literal.go b/vendor/github.com/andybalholm/brotli/cluster_literal.go new file mode 100644 index 00000000000..6ba66f31b2c --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/cluster_literal.go @@ -0,0 +1,326 @@ +package brotli + +import "math" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if + it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ +func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { + var is_good_pair bool = false + var p histogramPair + p.idx2 = 0 + p.idx1 = p.idx2 + p.cost_combo = 0 + p.cost_diff = p.cost_combo + if idx1 == idx2 { + return + } + + if idx2 < idx1 { + var t uint32 = idx2 + idx2 = idx1 + idx1 = t + } + + p.idx1 = idx1 + p.idx2 = idx2 + p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) + p.cost_diff -= out[idx1].bit_cost_ + p.cost_diff -= out[idx2].bit_cost_ + + if out[idx1].total_count_ == 0 { + p.cost_combo = out[idx2].bit_cost_ + is_good_pair = true + } else if out[idx2].total_count_ == 0 { + p.cost_combo = out[idx1].bit_cost_ + is_good_pair = true + } else { + var threshold float64 + if *num_pairs == 0 { + threshold = 1e99 + } else { + threshold = brotli_max_double(0.0, pairs[0].cost_diff) + } + var combo histogramLiteral = out[idx1] + var cost_combo float64 + histogramAddHistogramLiteral(&combo, &out[idx2]) + cost_combo = populationCostLiteral(&combo) + if cost_combo < threshold-p.cost_diff { + p.cost_combo = cost_combo + is_good_pair = true + } + } + + if is_good_pair { + p.cost_diff += p.cost_combo + if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { + /* Replace the top of the queue if needed. */ + if *num_pairs < max_num_pairs { + pairs[*num_pairs] = pairs[0] + (*num_pairs)++ + } + + pairs[0] = p + } else if *num_pairs < max_num_pairs { + pairs[*num_pairs] = p + (*num_pairs)++ + } + } +} + +func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { + var cost_diff_threshold float64 = 0.0 + var min_cluster_size uint = 1 + var num_pairs uint = 0 + { + /* We maintain a vector of histogram pairs, with the property that the pair + with the maximum bit cost reduction is the first. */ + var idx1 uint + for idx1 = 0; idx1 < num_clusters; idx1++ { + var idx2 uint + for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { + compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) + } + } + } + + for num_clusters > min_cluster_size { + var best_idx1 uint32 + var best_idx2 uint32 + var i uint + if pairs[0].cost_diff >= cost_diff_threshold { + cost_diff_threshold = 1e99 + min_cluster_size = max_clusters + continue + } + + /* Take the best pair from the top of heap. */ + best_idx1 = pairs[0].idx1 + + best_idx2 = pairs[0].idx2 + histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2]) + out[best_idx1].bit_cost_ = pairs[0].cost_combo + cluster_size[best_idx1] += cluster_size[best_idx2] + for i = 0; i < symbols_size; i++ { + if symbols[i] == best_idx2 { + symbols[i] = best_idx1 + } + } + + for i = 0; i < num_clusters; i++ { + if clusters[i] == best_idx2 { + copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) + break + } + } + + num_clusters-- + { + /* Remove pairs intersecting the just combined best pair. */ + var copy_to_idx uint = 0 + for i = 0; i < num_pairs; i++ { + var p *histogramPair = &pairs[i] + if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { + /* Remove invalid pair from the queue. */ + continue + } + + if histogramPairIsLess(&pairs[0], p) { + /* Replace the top of the queue if needed. */ + var front histogramPair = pairs[0] + pairs[0] = *p + pairs[copy_to_idx] = front + } else { + pairs[copy_to_idx] = *p + } + + copy_to_idx++ + } + + num_pairs = copy_to_idx + } + + /* Push new pairs formed with the combined histogram to the heap. */ + for i = 0; i < num_clusters; i++ { + compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) + } + } + + return num_clusters +} + +/* What is the bit cost of moving histogram from cur_symbol to candidate. */ +func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 { + if histogram.total_count_ == 0 { + return 0.0 + } else { + var tmp histogramLiteral = *histogram + histogramAddHistogramLiteral(&tmp, candidate) + return populationCostLiteral(&tmp) - candidate.bit_cost_ + } +} + +/* Find the best 'out' histogram for each of the 'in' histograms. + When called, clusters[0..num_clusters) contains the unique values from + symbols[0..in_size), but this property is not preserved in this function. + Note: we assume that out[]->bit_cost_ is already up-to-date. */ +func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) { + var i uint + for i = 0; i < in_size; i++ { + var best_out uint32 + if i == 0 { + best_out = symbols[0] + } else { + best_out = symbols[i-1] + } + var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out]) + var j uint + for j = 0; j < num_clusters; j++ { + var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]]) + if cur_bits < best_bits { + best_bits = cur_bits + best_out = clusters[j] + } + } + + symbols[i] = best_out + } + + /* Recompute each out based on raw and symbols. */ + for i = 0; i < num_clusters; i++ { + histogramClearLiteral(&out[clusters[i]]) + } + + for i = 0; i < in_size; i++ { + histogramAddHistogramLiteral(&out[symbols[i]], &in[i]) + } +} + +/* Reorders elements of the out[0..length) array and changes values in + symbols[0..length) array in the following way: + * when called, symbols[] contains indexes into out[], and has N unique + values (possibly N < length) + * on return, symbols'[i] = f(symbols[i]) and + out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, + where f is a bijection between the range of symbols[] and [0..N), and + the first occurrences of values in symbols'[i] come in consecutive + increasing order. + Returns N, the number of unique values in symbols[]. */ + +var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32 + +func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint { + var new_index []uint32 = make([]uint32, length) + var next_index uint32 + var tmp []histogramLiteral + var i uint + for i = 0; i < length; i++ { + new_index[i] = histogramReindexLiteral_kInvalidIndex + } + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex { + new_index[symbols[i]] = next_index + next_index++ + } + } + + /* TODO: by using idea of "cycle-sort" we can avoid allocation of + tmp and reduce the number of copying by the factor of 2. */ + tmp = make([]histogramLiteral, next_index) + + next_index = 0 + for i = 0; i < length; i++ { + if new_index[symbols[i]] == next_index { + tmp[next_index] = out[symbols[i]] + next_index++ + } + + symbols[i] = new_index[symbols[i]] + } + + new_index = nil + for i = 0; uint32(i) < next_index; i++ { + out[i] = tmp[i] + } + + tmp = nil + return uint(next_index) +} + +func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) { + var cluster_size []uint32 = make([]uint32, in_size) + var clusters []uint32 = make([]uint32, in_size) + var num_clusters uint = 0 + var max_input_histograms uint = 64 + var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 + var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) + var i uint + + /* For the first pass of clustering, we allow all pairs. */ + for i = 0; i < in_size; i++ { + cluster_size[i] = 1 + } + + for i = 0; i < in_size; i++ { + out[i] = in[i] + out[i].bit_cost_ = populationCostLiteral(&in[i]) + histogram_symbols[i] = uint32(i) + } + + for i = 0; i < in_size; i += max_input_histograms { + var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) + var num_new_clusters uint + var j uint + for j = 0; j < num_to_combine; j++ { + clusters[num_clusters+j] = uint32(i + j) + } + + num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) + num_clusters += num_new_clusters + } + { + /* For the second pass, we limit the total number of histogram pairs. + After this limit is reached, we only keep searching for the best pair. */ + var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) + if pairs_capacity < (max_num_pairs + 1) { + var _new_size uint + if pairs_capacity == 0 { + _new_size = max_num_pairs + 1 + } else { + _new_size = pairs_capacity + } + var new_array []histogramPair + for _new_size < (max_num_pairs + 1) { + _new_size *= 2 + } + new_array = make([]histogramPair, _new_size) + if pairs_capacity != 0 { + copy(new_array, pairs[:pairs_capacity]) + } + + pairs = new_array + pairs_capacity = _new_size + } + + /* Collapse similar histograms. */ + num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) + } + + pairs = nil + cluster_size = nil + + /* Find the optimal map from original histograms to the final ones. */ + histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols) + + clusters = nil + + /* Convert the context map to a canonical form. */ + *out_size = histogramReindexLiteral(out, histogram_symbols, in_size) +} diff --git a/vendor/github.com/andybalholm/brotli/command.go b/vendor/github.com/andybalholm/brotli/command.go new file mode 100644 index 00000000000..b1662a55552 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/command.go @@ -0,0 +1,254 @@ +package brotli + +var kInsBase = []uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +var kInsExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, +} + +var kCopyBase = []uint32{ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 18, + 22, + 30, + 38, + 54, + 70, + 102, + 134, + 198, + 326, + 582, + 1094, + 2118, +} + +var kCopyExtra = []uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, +} + +func getInsertLengthCode(insertlen uint) uint16 { + if insertlen < 6 { + return uint16(insertlen) + } else if insertlen < 130 { + var nbits uint32 = log2FloorNonZero(insertlen-2) - 1 + return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2) + } else if insertlen < 2114 { + return uint16(log2FloorNonZero(insertlen-66) + 10) + } else if insertlen < 6210 { + return 21 + } else if insertlen < 22594 { + return 22 + } else { + return 23 + } +} + +func getCopyLengthCode(copylen uint) uint16 { + if copylen < 10 { + return uint16(copylen - 2) + } else if copylen < 134 { + var nbits uint32 = log2FloorNonZero(copylen-6) - 1 + return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4) + } else if copylen < 2118 { + return uint16(log2FloorNonZero(copylen-70) + 12) + } else { + return 23 + } +} + +func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 { + var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3) + if use_last_distance && inscode < 8 && copycode < 16 { + if copycode < 8 { + return bits64 + } else { + return bits64 | 64 + } + } else { + /* Specification: 5 Encoding of ... (last table) */ + /* offset = 2 * index, where index is in range [0..8] */ + var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3)) + + /* All values in specification are K * 64, + where K = [2, 3, 6, 4, 5, 8, 7, 9, 10], + i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], + K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D. + All values in D require only 2 bits to encode. + Magic constant is shifted 6 bits left, to avoid final multiplication. */ + offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0) + + return uint16(offset | uint32(bits64)) + } +} + +func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) { + var inscode uint16 = getInsertLengthCode(insertlen) + var copycode uint16 = getCopyLengthCode(copylen) + *code = combineLengthCodes(inscode, copycode, use_last_distance) +} + +func getInsertBase(inscode uint16) uint32 { + return kInsBase[inscode] +} + +func getInsertExtra(inscode uint16) uint32 { + return kInsExtra[inscode] +} + +func getCopyBase(copycode uint16) uint32 { + return kCopyBase[copycode] +} + +func getCopyExtra(copycode uint16) uint32 { + return kCopyExtra[copycode] +} + +type command struct { + insert_len_ uint32 + copy_len_ uint32 + dist_extra_ uint32 + cmd_prefix_ uint16 + dist_prefix_ uint16 +} + +/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */ +func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) { + /* Don't rely on signed int representation, use honest casts. */ + var delta uint32 = uint32(byte(int8(copylen_code_delta))) + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = uint32(uint32(copylen) | delta<<25) + + /* The distance prefix and extra bits are stored in this Command as if + npostfix and ndirect were 0, they are only recomputed later after the + clustering if needed. */ + prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_) + + return cmd +} + +func makeInsertCommand(insertlen uint) (cmd command) { + cmd.insert_len_ = uint32(insertlen) + cmd.copy_len_ = 4 << 25 + cmd.dist_extra_ = 0 + cmd.dist_prefix_ = numDistanceShortCodes + getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_) + return cmd +} + +func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 { + if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes { + return uint32(self.dist_prefix_) & 0x3FF + } else { + var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF + var nbits uint32 = uint32(self.dist_prefix_) >> 10 + var extra uint32 = self.dist_extra_ + var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1 + var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits + var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask + var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4 + return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes + } +} + +func commandDistanceContext(self *command) uint32 { + var r uint32 = uint32(self.cmd_prefix_) >> 6 + var c uint32 = uint32(self.cmd_prefix_) & 7 + if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) { + return c + } + + return 3 +} + +func commandCopyLen(self *command) uint32 { + return self.copy_len_ & 0x1FFFFFF +} + +func commandCopyLenCode(self *command) uint32 { + var modifier uint32 = self.copy_len_ >> 25 + var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1))) + return uint32(int32(self.copy_len_&0x1FFFFFF) + delta) +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go new file mode 100644 index 00000000000..c9bd0577056 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment.go @@ -0,0 +1,834 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses one-pass processing: when we find a backward + match, we immediately emit the corresponding command and literal codes to + the bit stream. + + Adapted from the CompressFragment() function in + https://github.com/google/snappy/blob/master/snappy.cc */ + +const maxDistance_compress_fragment = 262128 + +func hash5(p []byte, shift uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 { + assert(offset >= 0) + assert(offset <= 3) + { + var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch5(p1 []byte, p2 []byte) bool { + return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) && + p1[4] == p2[4] +} + +/* Builds a literal prefix code into "depths" and "bits" based on the statistics + of the "input" string and stores it into the bit stream. + Note that the prefix code here is built from the pre-LZ77 input, therefore + we can only approximate the statistics of the actual literal stream. + Moreover, for long inputs we build a histogram from a sample of the input + and thus have to assign a non-zero depth for each literal. + Returns estimated compression ratio millibytes/char for encoding given input + with generated code. */ +func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint { + var histogram = [256]uint32{0} + var histogram_total uint + var i uint + if input_size < 1<<15 { + for i = 0; i < input_size; i++ { + histogram[input[i]]++ + } + + histogram_total = input_size + for i = 0; i < 256; i++ { + /* We weigh the first 11 samples with weight 3 to account for the + balancing effect of the LZ77 phase on the histogram. */ + var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } else { + const kSampleRate uint = 29 + for i = 0; i < input_size; i += kSampleRate { + histogram[input[i]]++ + } + + histogram_total = (input_size + kSampleRate - 1) / kSampleRate + for i = 0; i < 256; i++ { + /* We add 1 to each population count to avoid 0 bit depths (since this is + only a sample and we don't know if the symbol appears or not), and we + weigh the first 11 samples with weight 3 to account for the balancing + effect of the LZ77 phase on the histogram (more frequent symbols are + more likely to be in backward references instead as literals). */ + var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11) + histogram[i] += adjust + histogram_total += uint(adjust) + } + } + + buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */ + 8, depths, bits, storage_ix, storage) + { + var literal_ratio uint = 0 + for i = 0; i < 256; i++ { + if histogram[i] != 0 { + literal_ratio += uint(histogram[i] * uint32(depths[i])) + } + } + + /* Estimated encoding ratio, millibytes per symbol. */ + return (literal_ratio * 125) / histogram_total + } +} + +/* Builds a command and distance prefix code (each 64 symbols) into "depth" and + "bits" based on "histogram" and stores it into the bit stream. */ +func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[:24]) + + copy(cmd_depth[24:][:], depth[40:][:8]) + copy(cmd_depth[32:][:], depth[24:][:8]) + copy(cmd_depth[40:][:], depth[48:][:8]) + copy(cmd_depth[48:][:], depth[32:][:8]) + copy(cmd_depth[56:][:], depth[56:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[:24]) + copy(bits[24:], cmd_bits[32:][:8]) + copy(bits[32:], cmd_bits[48:][:8]) + copy(bits[40:], cmd_bits[24:][:8]) + copy(bits[48:], cmd_bits[40:][:8]) + copy(bits[56:], cmd_bits[56:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[:8]) + copy(cmd_depth[64:][:], depth[8:][:8]) + copy(cmd_depth[128:][:], depth[16:][:8]) + copy(cmd_depth[192:][:], depth[24:][:8]) + copy(cmd_depth[384:][:], depth[32:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[40+i] + cmd_depth[256+8*i] = depth[48+i] + cmd_depth[448+8*i] = depth[56+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +/* REQUIRES: insertlen < 6210 */ +func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) { + if insertlen < 6 { + var code uint = insertlen + 40 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + histo[code]++ + } else if insertlen < 130 { + var tail uint = insertlen - 2 + var nbits uint32 = log2FloorNonZero(tail) - 1 + var prefix uint = tail >> nbits + var inscode uint = uint((nbits << 1) + uint32(prefix) + 42) + writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 20) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits + var code uint = uint((nbits << 1) + uint32(prefix) + 4) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> 5) + 30 + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(5, uint64(tail)&31, storage_ix, storage) + writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage) + histo[code]++ + histo[64]++ + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint32 = log2FloorNonZero(tail) + var code uint = uint(nbits + 28) + writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) + writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<> nbits) & 1 + var offset uint = (2 + prefix) << nbits + var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80) + writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage) + writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage) + histo[distcode]++ +} + +func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var j uint + for j = 0; j < len; j++ { + var lit byte = input[j] + writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage) + } +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func updateBits(n_bits uint, bits uint32, pos uint, array []byte) { + for n_bits > 0 { + var byte_pos uint = pos >> 3 + var n_unchanged_bits uint = pos & 7 + var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits) + var total_bits uint = n_unchanged_bits + n_changed_bits + var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1) + var unchanged_bits uint32 = uint32(array[byte_pos]) & mask + var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1) + array[byte_pos] = byte(changed_bits<>= n_changed_bits + pos += n_changed_bits + } +} + +func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +var shouldMergeBlock_kSampleRate uint = 43 + +func shouldMergeBlock(data []byte, len uint, depths []byte) bool { + var histo = [256]uint{0} + var i uint + for i = 0; i < len; i += shouldMergeBlock_kSampleRate { + histo[data[i]]++ + } + { + var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate + var r float64 = (fastLog2(total)+0.5)*float64(total) + 200 + for i = 0; i < 256; i++ { + r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i])) + } + + return r >= 0.0 + } +} + +func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool { + var compressed uint = uint(-cap(next_emit) + cap(metablock_start)) + if compressed*50 > insertlen { + return false + } else { + return literal_ratio > 980 + } +} + +func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) { + var len uint = uint(-cap(end) + cap(begin)) + rewindBitPosition1(storage_ix_start, storage_ix, storage) + storeMetaBlockHeader1(uint(len), true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], begin[:len]) + *storage_ix += uint(len << 3) + storage[*storage_ix>>3] = 0 +} + +var kCmdHistoSeed = [128]uint32{ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, +} + +var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15 +var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16 + +func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var cmd_histo [128]uint32 + var ip_end int + var next_emit int = 0 + var base_ip int = 0 + var input int = 0 + const kInputMarginBytes uint = windowGap + const kMinMatchLen uint = 5 + var metablock_start int = input + var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + var total_block_size uint = block_size + var mlen_storage_ix uint = *storage_ix + 3 + var lit_depth [256]byte + var lit_bits [256]uint16 + var literal_ratio uint + var ip int + var last_distance int + var shift uint = 64 - table_bits + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + + /* Save the start of the first block for position and distance computations. + */ + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + { + /* Store the pre-compressed command and distance prefix codes. */ + var i uint + for i = 0; i+7 < *cmd_code_numbits; i += 8 { + writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage) + } + } + + writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage) + + /* Initialize the command and distance histograms. We will gather + statistics of command and distance codes during the processing + of this block and use it to update the command and distance + prefix codes for the next block. */ +emit_commands: + copy(cmd_histo[:], kCmdHistoSeed[:]) + + /* "ip" is the input pointer. */ + ip = input + + last_distance = -1 + ip_end = int(uint(input) + block_size) + + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes) + var ip_limit int = int(uint(input) + len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash5(in[ip:], shift); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 5-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (i.e. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + assert(hash == hash5(in[next_ip:], shift)) + ip = next_ip + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash5(in[next_ip:], shift) + candidate = ip - last_distance + if isMatch5(in[ip:], in[candidate:]) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch5(in[ip:], in[candidate:]) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit" to the bit stream, and then see if we can find a next match + immediately afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + var distance int = int(base - candidate) + /* We have a 5-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert uint = uint(base - next_emit) + ip += int(matched) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage) + input_size -= uint(base - input) + input = base + next_emit = input + goto next_block + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + } + + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + if distance == last_distance { + writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage) + cmd_histo[64]++ + } else { + emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + last_distance = distance + } + + emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for isMatch5(in[ip:], in[candidate:]) { + var base int = ip + /* We have a 5-byte match at ip, and no need to emit any literal bytes + prior to ip. */ + + var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) + if ip-candidate > maxDistance_compress_fragment { + break + } + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some positions + within the last copy. */ + { + var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) + var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) + var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) + table[prev_hash] = int(ip - base_ip - 1) + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash5(in[ip:], shift) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + input += int(block_size) + input_size -= block_size + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize) + + /* Decide if we want to continue this meta-block instead of emitting the + last insert-only command. */ + if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) { + assert(total_block_size > 1<<16) + + /* Update the size of the current meta-block and continue emitting commands. + We can do this because the current size and the new size both have 5 + nibbles. */ + total_block_size += block_size + + updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage) + goto emit_commands + } + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint = uint(ip_end - next_emit) + if insert < 6210 { + emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { + emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage) + } else { + emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) + emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) + } + } + + next_emit = ip_end + + /* If we have more data, write a new meta-block header and prefix codes and + then continue emitting commands. */ +next_block: + if input_size > 0 { + metablock_start = input + block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) + total_block_size = block_size + + /* Save the bit position of the MLEN field of the meta-block header, so that + we can update it later if we decide to extend this meta-block. */ + mlen_storage_ix = *storage_ix + 3 + + storeMetaBlockHeader1(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage) + goto emit_commands + } + + if !is_last { + /* If this is not the last block, update the command and distance prefix + codes for the next block and store the compressed forms. */ + cmd_code[0] = 0 + + *cmd_code_numbits = 0 + buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code) + } +} + +/* Compresses "input" string to the "*storage" buffer as one or more complete + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + "cmd_depth" and "cmd_bits" contain the command and distance prefix codes + (see comment in encode.h) used for the encoding of this input fragment. + If "is_last" is 0, they are updated to reflect the statistics + of this input fragment, to be used for the encoding of the next fragment. + + "*cmd_code_numbits" is the number of bits of the compressed representation + of the command and distance prefix codes, and "cmd_code" is an array of + at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed + command and distance prefix codes. If "is_last" is 0, these are also + updated to represent the updated "cmd_depth" and "cmd_bits". + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ +func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + + if input_size == 0 { + assert(is_last) + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go new file mode 100644 index 00000000000..79f9c7fdfc8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go @@ -0,0 +1,773 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function for fast encoding of an input fragment, independently from the input + history. This function uses two-pass processing: in the first pass we save + the found backward matches and literal bytes into a buffer, and in the + second pass we emit them into the bit stream using prefix codes built based + on the actual command and literal byte histograms. */ + +const kCompressFragmentTwoPassBlockSize uint = 1 << 17 + +func hash1(p []byte, shift uint, length uint) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) +} + +func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 { + assert(offset <= 8-length) + { + var h uint64 = ((v >> (8 * offset)) << ((8 - length) * 8)) * uint64(kHashMul32) + return uint32(h >> shift) + } +} + +func isMatch1(p1 []byte, p2 []byte, length uint) bool { + if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) { + return false + } + if length == 4 { + return true + } + return p1[4] == p2[4] && p1[5] == p2[5] +} + +/* +Builds a command and distance prefix code (each 64 symbols) into "depth" and + + "bits" based on "histogram" and stores it into the bit stream. +*/ +func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { + var tree [129]huffmanTree + var cmd_depth = [numCommandSymbols]byte{0} + /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ + + var cmd_bits [64]uint16 + createHuffmanTree(histogram, 64, 15, tree[:], depth) + createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) + + /* We have to jump through a few hoops here in order to compute + the command bits because the symbols are in a different order than in + the full alphabet. This looks complicated, but having the symbols + in this order in the command bits saves a few branches in the Emit* + functions. */ + copy(cmd_depth[:], depth[24:][:24]) + + copy(cmd_depth[24:][:], depth[:8]) + copy(cmd_depth[32:][:], depth[48:][:8]) + copy(cmd_depth[40:][:], depth[8:][:8]) + copy(cmd_depth[48:][:], depth[56:][:8]) + copy(cmd_depth[56:][:], depth[16:][:8]) + convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) + copy(bits, cmd_bits[24:][:8]) + copy(bits[8:], cmd_bits[40:][:8]) + copy(bits[16:], cmd_bits[56:][:8]) + copy(bits[24:], cmd_bits[:24]) + copy(bits[48:], cmd_bits[32:][:8]) + copy(bits[56:], cmd_bits[48:][:8]) + convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) + { + /* Create the bit length array for the full command alphabet. */ + var i uint + for i := 0; i < int(64); i++ { + cmd_depth[i] = 0 + } /* only 64 first values were used */ + copy(cmd_depth[:], depth[24:][:8]) + copy(cmd_depth[64:][:], depth[32:][:8]) + copy(cmd_depth[128:][:], depth[40:][:8]) + copy(cmd_depth[192:][:], depth[48:][:8]) + copy(cmd_depth[384:][:], depth[56:][:8]) + for i = 0; i < 8; i++ { + cmd_depth[128+8*i] = depth[i] + cmd_depth[256+8*i] = depth[8+i] + cmd_depth[448+8*i] = depth[16+i] + } + + storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) + } + + storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) +} + +func emitInsertLen(insertlen uint32, commands *[]uint32) { + if insertlen < 6 { + (*commands)[0] = insertlen + } else if insertlen < 130 { + var tail uint32 = insertlen - 2 + var nbits uint32 = log2FloorNonZero(uint(tail)) - 1 + var prefix uint32 = tail >> nbits + var inscode uint32 = (nbits << 1) + prefix + 2 + var extra uint32 = tail - (prefix << nbits) + (*commands)[0] = inscode | extra<<8 + } else if insertlen < 2114 { + var tail uint32 = insertlen - 66 + var nbits uint32 = log2FloorNonZero(uint(tail)) + var code uint32 = nbits + 10 + var extra uint32 = tail - (1 << nbits) + (*commands)[0] = code | extra<<8 + } else if insertlen < 6210 { + var extra uint32 = insertlen - 2114 + (*commands)[0] = 21 | extra<<8 + } else if insertlen < 22594 { + var extra uint32 = insertlen - 6210 + (*commands)[0] = 22 | extra<<8 + } else { + var extra uint32 = insertlen - 22594 + (*commands)[0] = 23 | extra<<8 + } + + *commands = (*commands)[1:] +} + +func emitCopyLen(copylen uint, commands *[]uint32) { + if copylen < 10 { + (*commands)[0] = uint32(copylen + 38) + } else if copylen < 134 { + var tail uint = copylen - 6 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 44 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else if copylen < 2118 { + var tail uint = copylen - 70 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + } else { + var extra uint = copylen - 2118 + (*commands)[0] = uint32(63 | extra<<8) + } + + *commands = (*commands)[1:] +} + +func emitCopyLenLastDistance(copylen uint, commands *[]uint32) { + if copylen < 12 { + (*commands)[0] = uint32(copylen + 20) + *commands = (*commands)[1:] + } else if copylen < 72 { + var tail uint = copylen - 8 + var nbits uint = uint(log2FloorNonZero(tail) - 1) + var prefix uint = tail >> nbits + var code uint = (nbits << 1) + prefix + 28 + var extra uint = tail - (prefix << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + } else if copylen < 136 { + var tail uint = copylen - 8 + var code uint = (tail >> 5) + 54 + var extra uint = tail & 31 + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else if copylen < 2120 { + var tail uint = copylen - 72 + var nbits uint = uint(log2FloorNonZero(tail)) + var code uint = nbits + 52 + var extra uint = tail - (uint(1) << nbits) + (*commands)[0] = uint32(code | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + var extra uint = copylen - 2120 + (*commands)[0] = uint32(63 | extra<<8) + *commands = (*commands)[1:] + (*commands)[0] = 64 + *commands = (*commands)[1:] + } +} + +func emitDistance(distance uint32, commands *[]uint32) { + var d uint32 = distance + 3 + var nbits uint32 = log2FloorNonZero(uint(d)) - 1 + var prefix uint32 = (d >> nbits) & 1 + var offset uint32 = (2 + prefix) << nbits + var distcode uint32 = 2*(nbits-1) + prefix + 80 + var extra uint32 = d - offset + (*commands)[0] = distcode | extra<<8 + *commands = (*commands)[1:] +} + +/* REQUIRES: len <= 1 << 24. */ +func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { + var nibbles uint = 6 + + /* ISLAST */ + writeBits(1, 0, storage_ix, storage) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + writeBits(2, uint64(nibbles)-4, storage_ix, storage) + writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) + + /* ISUNCOMPRESSED */ + writeSingleBit(is_uncompressed, storage_ix, storage) +} + +func storeMetaBlockHeaderBW(len uint, is_uncompressed bool, bw *bitWriter) { + var nibbles uint = 6 + + /* ISLAST */ + bw.writeBits(1, 0) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + bw.writeBits(2, uint64(nibbles)-4) + bw.writeBits(nibbles*4, uint64(len)-1) + + /* ISUNCOMPRESSED */ + bw.writeSingleBit(is_uncompressed) +} + +func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { + var ip int = 0 + var shift uint = 64 - table_bits + var ip_end int = int(block_size) + var base_ip int = -cap(base_ip_ptr) + cap(input) + var next_emit int = 0 + var last_distance int = -1 + /* "ip" is the input pointer. */ + + const kInputMarginBytes uint = windowGap + + /* "next_emit" is a pointer to the first byte that is not covered by a + previous copy. Bytes between "next_emit" and the start of the next copy or + the end of the input will be emitted as literal bytes. */ + if block_size >= kInputMarginBytes { + var len_limit uint = brotli_min_size_t(block_size-min_match, input_size-kInputMarginBytes) + var ip_limit int = int(len_limit) + /* For the last block, we need to keep a 16 bytes margin so that we can be + sure that all distances are at most window size - 16. + For all other blocks, we only need to keep a margin of 5 bytes so that + we don't go over the block size with a copy. */ + + var next_hash uint32 + ip++ + for next_hash = hash1(input[ip:], shift, min_match); ; { + var skip uint32 = 32 + var next_ip int = ip + /* Step 1: Scan forward in the input looking for a 6-byte-long match. + If we get close to exhausting the input then goto emit_remainder. + + Heuristic match skipping: If 32 bytes are scanned with no matches + found, start looking only at every other byte. If 32 more bytes are + scanned, look at every third byte, etc.. When a match is found, + immediately go back to looking at every byte. This is a small loss + (~5% performance, ~0.1% density) for compressible data due to more + bookkeeping, but for non-compressible data (such as JPEG) it's a huge + win since the compressor quickly "realizes" the data is incompressible + and doesn't bother looking for matches everywhere. + + The "skip" variable keeps track of how many bytes there are since the + last match; dividing it by 32 (ie. right-shifting by five) gives the + number of bytes to move ahead for each iteration. */ + + var candidate int + + assert(next_emit < ip) + + trawl: + for { + var hash uint32 = next_hash + var bytes_between_hash_lookups uint32 = skip >> 5 + skip++ + ip = next_ip + assert(hash == hash1(input[ip:], shift, min_match)) + next_ip = int(uint32(ip) + bytes_between_hash_lookups) + if next_ip > ip_limit { + goto emit_remainder + } + + next_hash = hash1(input[next_ip:], shift, min_match) + candidate = ip - last_distance + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + if candidate < ip { + table[hash] = int(ip - base_ip) + break + } + } + + candidate = base_ip + table[hash] + assert(candidate >= base_ip) + assert(candidate < ip) + + table[hash] = int(ip - base_ip) + if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + break + } + } + + /* Check copy distance. If candidate is not feasible, continue search. + Checking is done outside of hot loop to reduce overhead. */ + if ip-candidate > maxDistance_compress_fragment { + goto trawl + } + + /* Step 2: Emit the found match together with the literal bytes from + "next_emit", and then see if we can find a next match immediately + afterwards. Repeat until we find no match for the input + without emitting some literal bytes. */ + { + var base int = ip + /* > 0 */ + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + var distance int = int(base - candidate) + /* We have a 6-byte match at ip, and we need to emit bytes in + [next_emit, ip). */ + + var insert int = int(base - next_emit) + ip += int(matched) + emitInsertLen(uint32(insert), commands) + copy(*literals, input[next_emit:][:uint(insert)]) + *literals = (*literals)[insert:] + if distance == last_distance { + (*commands)[0] = 64 + *commands = (*commands)[1:] + } else { + emitDistance(uint32(distance), commands) + last_distance = distance + } + + emitCopyLenLastDistance(matched, commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + for ip-candidate <= maxDistance_compress_fragment && isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { + var base int = ip + /* We have a 6-byte match at ip, and no need to emit any + literal bytes prior to ip. */ + + var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) + ip += int(matched) + last_distance = int(base - candidate) /* > 0 */ + emitCopyLen(matched, commands) + emitDistance(uint32(last_distance), commands) + + next_emit = ip + if ip >= ip_limit { + goto emit_remainder + } + { + var input_bytes uint64 + var cur_hash uint32 + /* We could immediately start working at ip now, but to improve + compression we first update "table" with the hashes of some + positions within the last copy. */ + + var prev_hash uint32 + if min_match == 4 { + input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) + cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } else { + input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 5) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 4) + prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + table[prev_hash] = int(ip - base_ip - 3) + input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) + cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) + prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) + table[prev_hash] = int(ip - base_ip - 2) + prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) + table[prev_hash] = int(ip - base_ip - 1) + } + + candidate = base_ip + table[cur_hash] + table[cur_hash] = int(ip - base_ip) + } + } + + ip++ + next_hash = hash1(input[ip:], shift, min_match) + } + } + +emit_remainder: + assert(next_emit <= ip_end) + + /* Emit the remaining bytes as literals. */ + if next_emit < ip_end { + var insert uint32 = uint32(ip_end - next_emit) + emitInsertLen(insert, commands) + copy(*literals, input[next_emit:][:insert]) + *literals = (*literals)[insert:] + } +} + +var storeCommands_kNumExtraBits = [128]uint32{ + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 12, + 14, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 7, + 8, + 9, + 10, + 24, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 4, + 4, + 5, + 5, + 6, + 6, + 7, + 7, + 8, + 8, + 9, + 9, + 10, + 10, + 11, + 11, + 12, + 12, + 13, + 13, + 14, + 14, + 15, + 15, + 16, + 16, + 17, + 17, + 18, + 18, + 19, + 19, + 20, + 20, + 21, + 21, + 22, + 22, + 23, + 23, + 24, + 24, +} +var storeCommands_kInsertOffset = [24]uint32{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 14, + 18, + 26, + 34, + 50, + 66, + 98, + 130, + 194, + 322, + 578, + 1090, + 2114, + 6210, + 22594, +} + +func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) { + var lit_depths [256]byte + var lit_bits [256]uint16 + var lit_histo = [256]uint32{0} + var cmd_depths = [128]byte{0} + var cmd_bits = [128]uint16{0} + var cmd_histo = [128]uint32{0} + var i uint + for i = 0; i < num_literals; i++ { + lit_histo[literals[i]]++ + } + + buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */ + 8, lit_depths[:], lit_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var code uint32 = commands[i] & 0xFF + assert(code < 128) + cmd_histo[code]++ + } + + cmd_histo[1] += 1 + cmd_histo[2] += 1 + cmd_histo[64] += 1 + cmd_histo[84] += 1 + buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage) + + for i = 0; i < num_commands; i++ { + var cmd uint32 = commands[i] + var code uint32 = cmd & 0xFF + var extra uint32 = cmd >> 8 + assert(code < 128) + writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage) + writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage) + if code < 24 { + var insert uint32 = storeCommands_kInsertOffset[code] + extra + var j uint32 + for j = 0; j < insert; j++ { + var lit byte = literals[0] + writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage) + literals = literals[1:] + } + } + } +} + +/* Acceptable loss for uncompressible speedup is 2% */ +const minRatio = 0.98 + +const sampleRate = 43 + +func shouldCompress(input []byte, input_size uint, num_literals uint) bool { + var corpus_size float64 = float64(input_size) + if float64(num_literals) < minRatio*corpus_size { + return true + } else { + var literal_histo = [256]uint32{0} + var max_total_bit_cost float64 = corpus_size * 8 * minRatio / sampleRate + var i uint + for i = 0; i < input_size; i += sampleRate { + literal_histo[input[i]]++ + } + + return bitsEntropy(literal_histo[:], 256) < max_total_bit_cost + } +} + +func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) { + var bitpos uint = new_storage_ix & 7 + var mask uint = (1 << bitpos) - 1 + storage[new_storage_ix>>3] &= byte(mask) + *storage_ix = new_storage_ix +} + +func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) { + storeMetaBlockHeader(input_size, true, storage_ix, storage) + *storage_ix = (*storage_ix + 7) &^ 7 + copy(storage[*storage_ix>>3:], input[:input_size]) + *storage_ix += input_size << 3 + storage[*storage_ix>>3] = 0 +} + +func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) { + /* Save the start of the first block for position and distance computations. + */ + var base_ip []byte = input + + for input_size > 0 { + var block_size uint = brotli_min_size_t(input_size, kCompressFragmentTwoPassBlockSize) + var commands []uint32 = command_buf + var literals []byte = literal_buf + var num_literals uint + createCommands(input, block_size, input_size, base_ip, table, table_bits, min_match, &literals, &commands) + num_literals = uint(-cap(literals) + cap(literal_buf)) + if shouldCompress(input, block_size, num_literals) { + var num_commands uint = uint(-cap(commands) + cap(command_buf)) + storeMetaBlockHeader(block_size, false, storage_ix, storage) + + /* No block splits, no contexts. */ + writeBits(13, 0, storage_ix, storage) + + storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage) + } else { + /* Since we did not find many backward references and the entropy of + the data is close to 8 bits, we can simply emit an uncompressed block. + This makes compression speed of uncompressible data about 3x faster. */ + emitUncompressedMetaBlock(input, block_size, storage_ix, storage) + } + + input = input[block_size:] + input_size -= block_size + } +} + +/* +Compresses "input" string to the "*storage" buffer as one or more complete + + meta-blocks, and updates the "*storage_ix" bit position. + + If "is_last" is 1, emits an additional empty last meta-block. + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: "command_buf" and "literal_buf" point to at least + kCompressFragmentTwoPassBlockSize long arrays. + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is a power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) +*/ +func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { + var initial_storage_ix uint = *storage_ix + var table_bits uint = uint(log2FloorNonZero(table_size)) + var min_match uint + if table_bits <= 15 { + min_match = 4 + } else { + min_match = 6 + } + compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage) + + /* If output is larger than single uncompressed block, rewrite it. */ + if *storage_ix-initial_storage_ix > 31+(input_size<<3) { + rewindBitPosition(initial_storage_ix, storage_ix, storage) + emitUncompressedMetaBlock(input, input_size, storage_ix, storage) + } + + if is_last { + writeBits(1, 1, storage_ix, storage) /* islast */ + writeBits(1, 1, storage_ix, storage) /* isempty */ + *storage_ix = (*storage_ix + 7) &^ 7 + } +} diff --git a/vendor/github.com/andybalholm/brotli/constants.go b/vendor/github.com/andybalholm/brotli/constants.go new file mode 100644 index 00000000000..a880dff789d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/constants.go @@ -0,0 +1,77 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Specification: 7.3. Encoding of the context map */ +const contextMapMaxRle = 16 + +/* Specification: 2. Compressed representation overview */ +const maxNumberOfBlockTypes = 256 + +/* Specification: 3.3. Alphabet sizes: insert-and-copy length */ +const numLiteralSymbols = 256 + +const numCommandSymbols = 704 + +const numBlockLenSymbols = 26 + +const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle) + +const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2) + +/* Specification: 3.5. Complex prefix codes */ +const repeatPreviousCodeLength = 16 + +const repeatZeroCodeLength = 17 + +const codeLengthCodes = (repeatZeroCodeLength + 1) + +/* "code length of 8 is repeated" */ +const initialRepeatedCodeLength = 8 + +/* "Large Window Brotli" */ +const largeMaxDistanceBits = 62 + +const largeMinWbits = 10 + +const largeMaxWbits = 30 + +/* Specification: 4. Encoding of distances */ +const numDistanceShortCodes = 16 + +const maxNpostfix = 3 + +const maxNdirect = 120 + +const maxDistanceBits = 24 + +func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint { + return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1)) +} + +/* numDistanceSymbols == 1128 */ +const numDistanceSymbols = 1128 + +const maxDistance = 0x3FFFFFC + +const maxAllowedDistance = 0x7FFFFFFC + +/* 7.1. Context modes and context ID lookup for literals */ +/* "context IDs for literals are in the range of 0..63" */ +const literalContextBits = 6 + +/* 7.2. Context ID for distances */ +const distanceContextBits = 2 + +/* 9.1. Format of the Stream Header */ +/* Number of slack bytes for window size. Don't confuse + with BROTLI_NUM_DISTANCE_SHORT_CODES. */ +const windowGap = 16 + +func maxBackwardLimit(W uint) uint { + return (uint(1) << W) - windowGap +} diff --git a/vendor/github.com/andybalholm/brotli/context.go b/vendor/github.com/andybalholm/brotli/context.go new file mode 100644 index 00000000000..884ff8a2d69 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/context.go @@ -0,0 +1,2176 @@ +package brotli + +/* Lookup table to map the previous two bytes to a context id. + +There are four different context modeling modes defined here: + contextLSB6: context id is the least significant 6 bits of the last byte, + contextMSB6: context id is the most significant 6 bits of the last byte, + contextUTF8: second-order context model tuned for UTF8-encoded text, + contextSigned: second-order context model tuned for signed integers. + +If |p1| and |p2| are the previous two bytes, and |mode| is current context +mode, we calculate the context as: + + context = ContextLut(mode)[p1] | ContextLut(mode)[p2 + 256]. + +For contextUTF8 mode, if the previous two bytes are ASCII characters +(i.e. < 128), this will be equivalent to + + context = 4 * context1(p1) + context2(p2), + +where context1 is based on the previous byte in the following way: + + 0 : non-ASCII control + 1 : \t, \n, \r + 2 : space + 3 : other punctuation + 4 : " ' + 5 : % + 6 : ( < [ { + 7 : ) > ] } + 8 : , ; : + 9 : . + 10 : = + 11 : number + 12 : upper-case vowel + 13 : upper-case consonant + 14 : lower-case vowel + 15 : lower-case consonant + +and context2 is based on the second last byte: + + 0 : control, space + 1 : punctuation + 2 : upper-case letter, number + 3 : lower-case letter + +If the last byte is ASCII, and the second last byte is not (in a valid UTF8 +stream it will be a continuation byte, value between 128 and 191), the +context is the same as if the second last byte was an ASCII control or space. + +If the last byte is a UTF8 lead byte (value >= 192), then the next byte will +be a continuation byte and the context id is 2 or 3 depending on the LSB of +the last byte and to a lesser extent on the second last byte if it is ASCII. + +If the last byte is a UTF8 continuation byte, the second last byte can be: + - continuation byte: the next byte is probably ASCII or lead byte (assuming + 4-byte UTF8 characters are rare) and the context id is 0 or 1. + - lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1 + - lead byte (208 - 255): next byte is continuation byte, context is 2 or 3 + +The possible value combinations of the previous two bytes, the range of +context ids and the type of the next byte is summarized in the table below: + +|--------\-----------------------------------------------------------------| +| \ Last byte | +| Second \---------------------------------------------------------------| +| last byte \ ASCII | cont. byte | lead byte | +| \ (0-127) | (128-191) | (192-) | +|=============|===================|=====================|==================| +| ASCII | next: ASCII/lead | not valid | next: cont. | +| (0-127) | context: 4 - 63 | | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. | +| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: ASCII/lead | not valid | +| (192-207) | | context: 0 - 1 | | +|-------------|-------------------|---------------------|------------------| +| lead byte | not valid | next: cont. | not valid | +| (208-) | | context: 2 - 3 | | +|-------------|-------------------|---------------------|------------------| +*/ + +const ( + contextLSB6 = 0 + contextMSB6 = 1 + contextUTF8 = 2 + contextSigned = 3 +) + +/* Common context lookup table for all context modes. */ +var kContextLookup = [2048]byte{ + /* CONTEXT_LSB6, last byte. */ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + + /* CONTEXT_LSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_MSB6, last byte. */ + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 7, + 7, + 7, + 7, + 8, + 8, + 8, + 8, + 9, + 9, + 9, + 9, + 10, + 10, + 10, + 10, + 11, + 11, + 11, + 11, + 12, + 12, + 12, + 12, + 13, + 13, + 13, + 13, + 14, + 14, + 14, + 14, + 15, + 15, + 15, + 15, + 16, + 16, + 16, + 16, + 17, + 17, + 17, + 17, + 18, + 18, + 18, + 18, + 19, + 19, + 19, + 19, + 20, + 20, + 20, + 20, + 21, + 21, + 21, + 21, + 22, + 22, + 22, + 22, + 23, + 23, + 23, + 23, + 24, + 24, + 24, + 24, + 25, + 25, + 25, + 25, + 26, + 26, + 26, + 26, + 27, + 27, + 27, + 27, + 28, + 28, + 28, + 28, + 29, + 29, + 29, + 29, + 30, + 30, + 30, + 30, + 31, + 31, + 31, + 31, + 32, + 32, + 32, + 32, + 33, + 33, + 33, + 33, + 34, + 34, + 34, + 34, + 35, + 35, + 35, + 35, + 36, + 36, + 36, + 36, + 37, + 37, + 37, + 37, + 38, + 38, + 38, + 38, + 39, + 39, + 39, + 39, + 40, + 40, + 40, + 40, + 41, + 41, + 41, + 41, + 42, + 42, + 42, + 42, + 43, + 43, + 43, + 43, + 44, + 44, + 44, + 44, + 45, + 45, + 45, + 45, + 46, + 46, + 46, + 46, + 47, + 47, + 47, + 47, + 48, + 48, + 48, + 48, + 49, + 49, + 49, + 49, + 50, + 50, + 50, + 50, + 51, + 51, + 51, + 51, + 52, + 52, + 52, + 52, + 53, + 53, + 53, + 53, + 54, + 54, + 54, + 54, + 55, + 55, + 55, + 55, + 56, + 56, + 56, + 56, + 57, + 57, + 57, + 57, + 58, + 58, + 58, + 58, + 59, + 59, + 59, + 59, + 60, + 60, + 60, + 60, + 61, + 61, + 61, + 61, + 62, + 62, + 62, + 62, + 63, + 63, + 63, + 63, + + /* CONTEXT_MSB6, second last byte, */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* CONTEXT_UTF8, last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4, + 4, + 0, + 0, + 4, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8, + 12, + 16, + 12, + 12, + 20, + 12, + 16, + 24, + 28, + 12, + 12, + 32, + 12, + 36, + 12, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 44, + 32, + 32, + 24, + 40, + 28, + 12, + 12, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 48, + 52, + 52, + 52, + 52, + 52, + 24, + 12, + 28, + 12, + 12, + 12, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 56, + 60, + 60, + 60, + 60, + 60, + 24, + 12, + 28, + 12, + 0, + + /* UTF8 continuation byte range. */ + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + + /* UTF8 lead byte range. */ + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + 2, + 3, + + /* CONTEXT_UTF8 second last byte. */ + /* ASCII range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 1, + 1, + 1, + 1, + 1, + 1, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 1, + 1, + 1, + 1, + 0, + + /* UTF8 continuation byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + + /* UTF8 lead byte range. */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + + /* CONTEXT_SIGNED, last byte, same as the above values shifted by 3 bits. */ + 0, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 8, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 16, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 32, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 40, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 56, + + /* CONTEXT_SIGNED, second last byte. */ + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 5, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 7, +} + +type contextLUT []byte + +func getContextLUT(mode int) contextLUT { + return kContextLookup[mode<<9:] +} + +func getContext(p1 byte, p2 byte, lut contextLUT) byte { + return lut[p1] | lut[256+int(p2)] +} diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go new file mode 100644 index 00000000000..9d9513b7cfb --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/decode.go @@ -0,0 +1,2581 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +const ( + decoderResultError = 0 + decoderResultSuccess = 1 + decoderResultNeedsMoreInput = 2 + decoderResultNeedsMoreOutput = 3 +) + +/** + * Error code for detailed logging / production debugging. + * + * See ::BrotliDecoderGetErrorCode and ::BROTLI_LAST_ERROR_CODE. + */ +const ( + decoderNoError = 0 + decoderSuccess = 1 + decoderNeedsMoreInput = 2 + decoderNeedsMoreOutput = 3 + decoderErrorFormatExuberantNibble = -1 + decoderErrorFormatReserved = -2 + decoderErrorFormatExuberantMetaNibble = -3 + decoderErrorFormatSimpleHuffmanAlphabet = -4 + decoderErrorFormatSimpleHuffmanSame = -5 + decoderErrorFormatClSpace = -6 + decoderErrorFormatHuffmanSpace = -7 + decoderErrorFormatContextMapRepeat = -8 + decoderErrorFormatBlockLength1 = -9 + decoderErrorFormatBlockLength2 = -10 + decoderErrorFormatTransform = -11 + decoderErrorFormatDictionary = -12 + decoderErrorFormatWindowBits = -13 + decoderErrorFormatPadding1 = -14 + decoderErrorFormatPadding2 = -15 + decoderErrorFormatDistance = -16 + decoderErrorDictionaryNotSet = -19 + decoderErrorInvalidArguments = -20 + decoderErrorAllocContextModes = -21 + decoderErrorAllocTreeGroups = -22 + decoderErrorAllocContextMap = -25 + decoderErrorAllocRingBuffer1 = -26 + decoderErrorAllocRingBuffer2 = -27 + decoderErrorAllocBlockTypeTrees = -30 + decoderErrorUnreachable = -31 +) + +const huffmanTableBits = 8 + +const huffmanTableMask = 0xFF + +/* We need the slack region for the following reasons: + - doing up to two 16-byte copies for fast backward copying + - inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ +const kRingBufferWriteAheadSlack uint32 = 42 + +var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} + +/* Static prefix code for the complex code length code lengths. */ +var kCodeLengthPrefixLength = [16]byte{2, 2, 2, 3, 2, 2, 2, 4, 2, 2, 2, 3, 2, 2, 2, 4} + +var kCodeLengthPrefixValue = [16]byte{0, 4, 3, 2, 0, 4, 3, 1, 0, 4, 3, 2, 0, 4, 3, 5} + +/* Saves error code and converts it to BrotliDecoderResult. */ +func saveErrorCode(s *Reader, e int) int { + s.error_code = int(e) + switch e { + case decoderSuccess: + return decoderResultSuccess + + case decoderNeedsMoreInput: + return decoderResultNeedsMoreInput + + case decoderNeedsMoreOutput: + return decoderResultNeedsMoreOutput + + default: + return decoderResultError + } +} + +/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli". + Precondition: bit-reader accumulator has at least 8 bits. */ +func decodeWindowBits(s *Reader, br *bitReader) int { + var n uint32 + var large_window bool = s.large_window + s.large_window = false + takeBits(br, 1, &n) + if n == 0 { + s.window_bits = 16 + return decoderSuccess + } + + takeBits(br, 3, &n) + if n != 0 { + s.window_bits = 17 + n + return decoderSuccess + } + + takeBits(br, 3, &n) + if n == 1 { + if large_window { + takeBits(br, 1, &n) + if n == 1 { + return decoderErrorFormatWindowBits + } + + s.large_window = true + return decoderSuccess + } else { + return decoderErrorFormatWindowBits + } + } + + if n != 0 { + s.window_bits = 8 + n + return decoderSuccess + } + + s.window_bits = 17 + return decoderSuccess +} + +/* Decodes a number in the range [0..255], by reading 1 - 11 bits. */ +func decodeVarLenUint8(s *Reader, br *bitReader, value *uint32) int { + var bits uint32 + switch s.substate_decode_uint8 { + case stateDecodeUint8None: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 0 + return decoderSuccess + } + fallthrough + + /* Fall through. */ + case stateDecodeUint8Short: + if !safeReadBits(br, 3, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Short + return decoderNeedsMoreInput + } + + if bits == 0 { + *value = 1 + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + } + + /* Use output value as a temporary storage. It MUST be persisted. */ + *value = bits + fallthrough + + /* Fall through. */ + case stateDecodeUint8Long: + if !safeReadBits(br, *value, &bits) { + s.substate_decode_uint8 = stateDecodeUint8Long + return decoderNeedsMoreInput + } + + *value = (1 << *value) + bits + s.substate_decode_uint8 = stateDecodeUint8None + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a metablock length and flags by reading 2 - 31 bits. */ +func decodeMetaBlockLength(s *Reader, br *bitReader) int { + var bits uint32 + var i int + for { + switch s.substate_metablock_header { + case stateMetablockHeaderNone: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_last_metablock = 1 + } else { + s.is_last_metablock = 0 + } + s.meta_block_remaining_len = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + if s.is_last_metablock == 0 { + s.substate_metablock_header = stateMetablockHeaderNibbles + break + } + + s.substate_metablock_header = stateMetablockHeaderEmpty + fallthrough + + /* Fall through. */ + case stateMetablockHeaderEmpty: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.substate_metablock_header = stateMetablockHeaderNibbles + fallthrough + + /* Fall through. */ + case stateMetablockHeaderNibbles: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + s.size_nibbles = uint(byte(bits + 4)) + s.loop_counter = 0 + if bits == 3 { + s.is_metadata = 1 + s.substate_metablock_header = stateMetablockHeaderReserved + break + } + + s.substate_metablock_header = stateMetablockHeaderSize + fallthrough + + /* Fall through. */ + case stateMetablockHeaderSize: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 4, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 4 && bits == 0 { + return decoderErrorFormatExuberantNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*4)) + } + + s.substate_metablock_header = stateMetablockHeaderUncompressed + fallthrough + + /* Fall through. */ + case stateMetablockHeaderUncompressed: + if s.is_last_metablock == 0 { + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + s.is_uncompressed = 1 + } else { + s.is_uncompressed = 0 + } + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + case stateMetablockHeaderReserved: + if !safeReadBits(br, 1, &bits) { + return decoderNeedsMoreInput + } + + if bits != 0 { + return decoderErrorFormatReserved + } + + s.substate_metablock_header = stateMetablockHeaderBytes + fallthrough + + /* Fall through. */ + case stateMetablockHeaderBytes: + if !safeReadBits(br, 2, &bits) { + return decoderNeedsMoreInput + } + + if bits == 0 { + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + } + + s.size_nibbles = uint(byte(bits)) + s.substate_metablock_header = stateMetablockHeaderMetadata + fallthrough + + /* Fall through. */ + case stateMetablockHeaderMetadata: + i = s.loop_counter + + for ; i < int(s.size_nibbles); i++ { + if !safeReadBits(br, 8, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + if uint(i+1) == s.size_nibbles && s.size_nibbles > 1 && bits == 0 { + return decoderErrorFormatExuberantMetaNibble + } + + s.meta_block_remaining_len |= int(bits << uint(i*8)) + } + + s.meta_block_remaining_len++ + s.substate_metablock_header = stateMetablockHeaderNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes the Huffman code. + This method doesn't read data from the bit reader, BUT drops the amount of + bits that correspond to the decoded symbol. + bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */ +func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 { + table = table[bits&huffmanTableMask:] + if table[0].bits > huffmanTableBits { + var nbits uint32 = uint32(table[0].bits) - huffmanTableBits + dropBits(br, huffmanTableBits) + table = table[uint32(table[0].value)+((bits>>huffmanTableBits)&bitMask(nbits)):] + } + + dropBits(br, uint32(table[0].bits)) + return uint32(table[0].value) +} + +/* Reads and decodes the next Huffman code from bit-stream. + This method peeks 16 bits of input and drops 0 - 15 of them. */ +func readSymbol(table []huffmanCode, br *bitReader) uint32 { + return decodeSymbol(get16BitsUnmasked(br), table, br) +} + +/* Same as DecodeSymbol, but it is known that there is less than 15 bits of + input are currently available. */ +func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + var available_bits uint32 = getAvailableBits(br) + if available_bits == 0 { + if table[0].bits == 0 { + *result = uint32(table[0].value) + return true + } + + return false /* No valid bits at all. */ + } + + val = uint32(getBitsUnmasked(br)) + table = table[val&huffmanTableMask:] + if table[0].bits <= huffmanTableBits { + if uint32(table[0].bits) <= available_bits { + dropBits(br, uint32(table[0].bits)) + *result = uint32(table[0].value) + return true + } else { + return false /* Not enough bits for the first level. */ + } + } + + if available_bits <= huffmanTableBits { + return false /* Not enough bits to move to the second level. */ + } + + /* Speculatively drop HUFFMAN_TABLE_BITS. */ + val = (val & bitMask(uint32(table[0].bits))) >> huffmanTableBits + + available_bits -= huffmanTableBits + table = table[uint32(table[0].value)+val:] + if available_bits < uint32(table[0].bits) { + return false /* Not enough bits for the second level. */ + } + + dropBits(br, huffmanTableBits+uint32(table[0].bits)) + *result = uint32(table[0].value) + return true +} + +func safeReadSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { + var val uint32 + if safeGetBits(br, 15, &val) { + *result = decodeSymbol(val, table, br) + return true + } + + return safeDecodeSymbol(table, br, result) +} + +/* Makes a look-up in first level Huffman table. Peeks 8 bits. */ +func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, value *uint32) { + if safe != 0 { + return + } + + table = table[getBits(br, huffmanTableBits):] + *bits = uint32(table[0].bits) + *value = uint32(table[0].value) +} + +/* Decodes the next Huffman code using data prepared by PreloadSymbol. + Reads 0 - 15 bits. Also peeks 8 following bits. */ +func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 { + var result uint32 = *value + var ext []huffmanCode + if *bits > huffmanTableBits { + var val uint32 = get16BitsUnmasked(br) + ext = table[val&huffmanTableMask:][*value:] + var mask uint32 = bitMask((*bits - huffmanTableBits)) + dropBits(br, huffmanTableBits) + ext = ext[(val>>huffmanTableBits)&mask:] + dropBits(br, uint32(ext[0].bits)) + result = uint32(ext[0].value) + } else { + dropBits(br, *bits) + } + + preloadSymbol(0, table, br, bits, value) + return result +} + +func log2Floor(x uint32) uint32 { + var result uint32 = 0 + for x != 0 { + x >>= 1 + result++ + } + + return result +} + +/* Reads (s->symbol + 1) symbols. + Totally 1..4 symbols are read, 1..11 bits each. + The list of symbols MUST NOT contain duplicates. */ +func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int { + var br *bitReader = &s.br + var max_bits uint32 = log2Floor(alphabet_size - 1) + var i uint32 = s.sub_loop_counter + /* max_bits == 1..11; symbol == 0..3; 1..44 bits will be read. */ + + var num_symbols uint32 = s.symbol + for i <= num_symbols { + var v uint32 + if !safeReadBits(br, max_bits, &v) { + s.sub_loop_counter = i + s.substate_huffman = stateHuffmanSimpleRead + return decoderNeedsMoreInput + } + + if v >= max_symbol { + return decoderErrorFormatSimpleHuffmanAlphabet + } + + s.symbols_lists_array[i] = uint16(v) + i++ + } + + for i = 0; i < num_symbols; i++ { + var k uint32 = i + 1 + for ; k <= num_symbols; k++ { + if s.symbols_lists_array[i] == s.symbols_lists_array[k] { + return decoderErrorFormatSimpleHuffmanSame + } + } + } + + return decoderSuccess +} + +/* Process single decoded symbol code length: + A) reset the repeat variable + B) remember code length (if it is not 0) + C) extend corresponding index-chain + D) reduce the Huffman space + E) update the histogram */ +func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + *repeat = 0 + if code_len != 0 { /* code_len == 1..15 */ + symbolListPut(symbol_lists, next_symbol[code_len], uint16(*symbol)) + next_symbol[code_len] = int(*symbol) + *prev_code_len = code_len + *space -= 32768 >> code_len + code_length_histo[code_len]++ + } + + (*symbol)++ +} + +/* Process repeated symbol code length. + A) Check if it is the extension of previous repeat sequence; if the decoded + value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new + symbol-skip + B) Update repeat variable + C) Check if operation is feasible (fits alphabet) + D) For each symbol do the same operations as in ProcessSingleCodeLength + + PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or + code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */ +func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { + var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ + var extra_bits uint32 = 3 + var new_len uint32 = 0 + if code_len == repeatPreviousCodeLength { + new_len = *prev_code_len + extra_bits = 2 + } + + if *repeat_code_len != new_len { + *repeat = 0 + *repeat_code_len = new_len + } + + old_repeat = *repeat + if *repeat > 0 { + *repeat -= 2 + *repeat <<= extra_bits + } + + *repeat += repeat_delta + 3 + repeat_delta = *repeat - old_repeat + if *symbol+repeat_delta > alphabet_size { + *symbol = alphabet_size + *space = 0xFFFFF + return + } + + if *repeat_code_len != 0 { + var last uint = uint(*symbol + repeat_delta) + var next int = next_symbol[*repeat_code_len] + for { + symbolListPut(symbol_lists, next, uint16(*symbol)) + next = int(*symbol) + (*symbol)++ + if (*symbol) == uint32(last) { + break + } + } + + next_symbol[*repeat_code_len] = next + *space -= repeat_delta << (15 - *repeat_code_len) + code_length_histo[*repeat_code_len] = uint16(uint32(code_length_histo[*repeat_code_len]) + repeat_delta) + } else { + *symbol += repeat_delta + } +} + +/* Reads and decodes symbol codelengths. */ +func readSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var symbol uint32 = s.symbol + var repeat uint32 = s.repeat + var space uint32 = s.space + var prev_code_len uint32 = s.prev_code_len + var repeat_code_len uint32 = s.repeat_code_len + var symbol_lists symbolList = s.symbol_lists + var code_length_histo []uint16 = s.code_length_histo[:] + var next_symbol []int = s.next_symbol[:] + if !warmupBitReader(br) { + return decoderNeedsMoreInput + } + var p []huffmanCode + for symbol < alphabet_size && space > 0 { + p = s.table[:] + var code_len uint32 + if !checkInputAmount(br, shortFillBitWindowRead) { + s.symbol = symbol + s.repeat = repeat + s.prev_code_len = prev_code_len + s.repeat_code_len = repeat_code_len + s.space = space + return decoderNeedsMoreInput + } + + fillBitWindow16(br) + p = p[getBitsUnmasked(br)&uint64(bitMask(huffmanMaxCodeLengthCodeLength)):] + dropBits(br, uint32(p[0].bits)) /* Use 1..5 bits. */ + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + processSingleCodeLength(code_len, &symbol, &repeat, &space, &prev_code_len, symbol_lists, code_length_histo, next_symbol) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 + if code_len == repeatPreviousCodeLength { + extra_bits = 2 + } else { + extra_bits = 3 + } + var repeat_delta uint32 = uint32(getBitsUnmasked(br)) & bitMask(extra_bits) + dropBits(br, extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &symbol, &repeat, &space, &prev_code_len, &repeat_code_len, symbol_lists, code_length_histo, next_symbol) + } + } + + s.space = space + return decoderSuccess +} + +func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int { + var br *bitReader = &s.br + var get_byte bool = false + var p []huffmanCode + for s.symbol < alphabet_size && s.space > 0 { + p = s.table[:] + var code_len uint32 + var available_bits uint32 + var bits uint32 = 0 + if get_byte && !pullByte(br) { + return decoderNeedsMoreInput + } + get_byte = false + available_bits = getAvailableBits(br) + if available_bits != 0 { + bits = uint32(getBitsUnmasked(br)) + } + + p = p[bits&bitMask(huffmanMaxCodeLengthCodeLength):] + if uint32(p[0].bits) > available_bits { + get_byte = true + continue + } + + code_len = uint32(p[0].value) /* code_len == 0..17 */ + if code_len < repeatPreviousCodeLength { + dropBits(br, uint32(p[0].bits)) + processSingleCodeLength(code_len, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) /* code_len == 16..17, extra_bits == 2..3 */ + } else { + var extra_bits uint32 = code_len - 14 + var repeat_delta uint32 = (bits >> p[0].bits) & bitMask(extra_bits) + if available_bits < uint32(p[0].bits)+extra_bits { + get_byte = true + continue + } + + dropBits(br, uint32(p[0].bits)+extra_bits) + processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, &s.repeat_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) + } + } + + return decoderSuccess +} + +/* Reads and decodes 15..18 codes using static prefix code. + Each code is 2..4 bits long. In total 30..72 bits are used. */ +func readCodeLengthCodeLengths(s *Reader) int { + var br *bitReader = &s.br + var num_codes uint32 = s.repeat + var space uint32 = s.space + var i uint32 = s.sub_loop_counter + for ; i < codeLengthCodes; i++ { + var code_len_idx byte = kCodeLengthCodeOrder[i] + var ix uint32 + var v uint32 + if !safeGetBits(br, 4, &ix) { + var available_bits uint32 = getAvailableBits(br) + if available_bits != 0 { + ix = uint32(getBitsUnmasked(br) & 0xF) + } else { + ix = 0 + } + + if uint32(kCodeLengthPrefixLength[ix]) > available_bits { + s.sub_loop_counter = i + s.repeat = num_codes + s.space = space + s.substate_huffman = stateHuffmanComplex + return decoderNeedsMoreInput + } + } + + v = uint32(kCodeLengthPrefixValue[ix]) + dropBits(br, uint32(kCodeLengthPrefixLength[ix])) + s.code_length_code_lengths[code_len_idx] = byte(v) + if v != 0 { + space = space - (32 >> v) + num_codes++ + s.code_length_histo[v]++ + if space-1 >= 32 { + /* space is 0 or wrapped around. */ + break + } + } + } + + if num_codes != 1 && space != 0 { + return decoderErrorFormatClSpace + } + + return decoderSuccess +} + +/* Decodes the Huffman tables. + There are 2 scenarios: + A) Huffman code contains only few symbols (1..4). Those symbols are read + directly; their code lengths are defined by the number of symbols. + For this scenario 4 - 49 bits will be read. + + B) 2-phase decoding: + B.1) Small Huffman table is decoded; it is specified with code lengths + encoded with predefined entropy code. 32 - 74 bits are used. + B.2) Decoded table is used to decode code lengths of symbols in resulting + Huffman table. In worst case 3520 bits are read. */ +func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int { + var br *bitReader = &s.br + + /* Unnecessary masking, but might be good for safety. */ + alphabet_size &= 0x7FF + + /* State machine. */ + for { + switch s.substate_huffman { + case stateHuffmanNone: + if !safeReadBits(br, 2, &s.sub_loop_counter) { + return decoderNeedsMoreInput + } + + /* The value is used as follows: + 1 for simple code; + 0 for no skipping, 2 skips 2 code lengths, 3 skips 3 code lengths */ + if s.sub_loop_counter != 1 { + s.space = 32 + s.repeat = 0 /* num_codes */ + var i int + for i = 0; i <= huffmanMaxCodeLengthCodeLength; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i < codeLengthCodes; i++ { + s.code_length_code_lengths[i] = 0 + } + + s.substate_huffman = stateHuffmanComplex + continue + } + fallthrough + + /* Read symbols, codes & code lengths directly. */ + case stateHuffmanSimpleSize: + if !safeReadBits(br, 2, &s.symbol) { /* num_symbols */ + s.substate_huffman = stateHuffmanSimpleSize + return decoderNeedsMoreInput + } + + s.sub_loop_counter = 0 + fallthrough + + case stateHuffmanSimpleRead: + { + var result int = readSimpleHuffmanSymbols(alphabet_size, max_symbol, s) + if result != decoderSuccess { + return result + } + } + fallthrough + + case stateHuffmanSimpleBuild: + var table_size uint32 + if s.symbol == 3 { + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_huffman = stateHuffmanSimpleBuild + return decoderNeedsMoreInput + } + + s.symbol += bits + } + + table_size = buildSimpleHuffmanTable(table, huffmanTableBits, s.symbols_lists_array[:], s.symbol) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + /* Decode Huffman-coded code lengths. */ + case stateHuffmanComplex: + { + var i uint32 + var result int = readCodeLengthCodeLengths(s) + if result != decoderSuccess { + return result + } + + buildCodeLengthsHuffmanTable(s.table[:], s.code_length_code_lengths[:], s.code_length_histo[:]) + for i = 0; i < 16; i++ { + s.code_length_histo[i] = 0 + } + + for i = 0; i <= huffmanMaxCodeLength; i++ { + s.next_symbol[i] = int(i) - (huffmanMaxCodeLength + 1) + symbolListPut(s.symbol_lists, s.next_symbol[i], 0xFFFF) + } + + s.symbol = 0 + s.prev_code_len = initialRepeatedCodeLength + s.repeat = 0 + s.repeat_code_len = 0 + s.space = 32768 + s.substate_huffman = stateHuffmanLengthSymbols + } + fallthrough + + case stateHuffmanLengthSymbols: + var table_size uint32 + var result int = readSymbolCodeLengths(max_symbol, s) + if result == decoderNeedsMoreInput { + result = safeReadSymbolCodeLengths(max_symbol, s) + } + + if result != decoderSuccess { + return result + } + + if s.space != 0 { + return decoderErrorFormatHuffmanSpace + } + + table_size = buildHuffmanTable(table, huffmanTableBits, s.symbol_lists, s.code_length_histo[:]) + if opt_table_size != nil { + *opt_table_size = table_size + } + + s.substate_huffman = stateHuffmanNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } + } +} + +/* Decodes a block length by reading 3..39 bits. */ +func readBlockLength(table []huffmanCode, br *bitReader) uint32 { + var code uint32 + var nbits uint32 + code = readSymbol(table, br) + nbits = kBlockLengthPrefixCode[code].nbits /* nbits == 2..24 */ + return kBlockLengthPrefixCode[code].offset + readBits(br, nbits) +} + +/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then + reading can't be continued with ReadBlockLength. */ +func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool { + var index uint32 + if s.substate_read_block_length == stateReadBlockLengthNone { + if !safeReadSymbol(table, br, &index) { + return false + } + } else { + index = s.block_length_index + } + { + var bits uint32 /* nbits == 2..24 */ + var nbits uint32 = kBlockLengthPrefixCode[index].nbits + if !safeReadBits(br, nbits, &bits) { + s.block_length_index = index + s.substate_read_block_length = stateReadBlockLengthSuffix + return false + } + + *result = kBlockLengthPrefixCode[index].offset + bits + s.substate_read_block_length = stateReadBlockLengthNone + return true + } +} + +/* Transform: + 1) initialize list L with values 0, 1,... 255 + 2) For each input element X: + 2.1) let Y = L[X] + 2.2) remove X-th element from L + 2.3) prepend Y to L + 2.4) append Y to output + + In most cases max(Y) <= 7, so most of L remains intact. + To reduce the cost of initialization, we reuse L, remember the upper bound + of Y values, and reinitialize only first elements in L. + + Most of input values are 0 and 1. To reduce number of branches, we replace + inner for loop with do-while. */ +func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) { + var mtf [256]byte + var i int + for i = 1; i < 256; i++ { + mtf[i] = byte(i) + } + var mtf_1 byte + + /* Transform the input. */ + for i = 0; uint32(i) < v_len; i++ { + var index int = int(v[i]) + var value byte = mtf[index] + v[i] = value + mtf_1 = value + for index >= 1 { + index-- + mtf[index+1] = mtf[index] + } + + mtf[0] = mtf_1 + } +} + +/* Decodes a series of Huffman table using ReadHuffmanCode function. */ +func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int { + if s.substate_tree_group != stateTreeGroupLoop { + s.next = group.codes + s.htree_index = 0 + s.substate_tree_group = stateTreeGroupLoop + } + + for s.htree_index < int(group.num_htrees) { + var table_size uint32 + var result int = readHuffmanCode(uint32(group.alphabet_size), uint32(group.max_symbol), s.next, &table_size, s) + if result != decoderSuccess { + return result + } + group.htrees[s.htree_index] = s.next + s.next = s.next[table_size:] + s.htree_index++ + } + + s.substate_tree_group = stateTreeGroupNone + return decoderSuccess +} + +/* Decodes a context map. + Decoding is done in 4 phases: + 1) Read auxiliary information (6..16 bits) and allocate memory. + In case of trivial context map, decoding is finished at this phase. + 2) Decode Huffman table using ReadHuffmanCode function. + This table will be used for reading context map items. + 3) Read context map items; "0" values could be run-length encoded. + 4) Optionally, apply InverseMoveToFront transform to the resulting map. */ +func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int { + var br *bitReader = &s.br + var result int = decoderSuccess + + switch int(s.substate_context_map) { + case stateContextMapNone: + result = decodeVarLenUint8(s, br, num_htrees) + if result != decoderSuccess { + return result + } + + (*num_htrees)++ + s.context_index = 0 + *context_map_arg = make([]byte, uint(context_map_size)) + if *context_map_arg == nil { + return decoderErrorAllocContextMap + } + + if *num_htrees <= 1 { + for i := 0; i < int(context_map_size); i++ { + (*context_map_arg)[i] = 0 + } + return decoderSuccess + } + + s.substate_context_map = stateContextMapReadPrefix + fallthrough + /* Fall through. */ + case stateContextMapReadPrefix: + { + var bits uint32 + + /* In next stage ReadHuffmanCode uses at least 4 bits, so it is safe + to peek 4 bits ahead. */ + if !safeGetBits(br, 5, &bits) { + return decoderNeedsMoreInput + } + + if bits&1 != 0 { /* Use RLE for zeros. */ + s.max_run_length_prefix = (bits >> 1) + 1 + dropBits(br, 5) + } else { + s.max_run_length_prefix = 0 + dropBits(br, 1) + } + + s.substate_context_map = stateContextMapHuffman + } + fallthrough + + /* Fall through. */ + case stateContextMapHuffman: + { + var alphabet_size uint32 = *num_htrees + s.max_run_length_prefix + result = readHuffmanCode(alphabet_size, alphabet_size, s.context_map_table[:], nil, s) + if result != decoderSuccess { + return result + } + s.code = 0xFFFF + s.substate_context_map = stateContextMapDecode + } + fallthrough + + /* Fall through. */ + case stateContextMapDecode: + { + var context_index uint32 = s.context_index + var max_run_length_prefix uint32 = s.max_run_length_prefix + var context_map []byte = *context_map_arg + var code uint32 = s.code + var skip_preamble bool = (code != 0xFFFF) + for context_index < context_map_size || skip_preamble { + if !skip_preamble { + if !safeReadSymbol(s.context_map_table[:], br, &code) { + s.code = 0xFFFF + s.context_index = context_index + return decoderNeedsMoreInput + } + + if code == 0 { + context_map[context_index] = 0 + context_index++ + continue + } + + if code > max_run_length_prefix { + context_map[context_index] = byte(code - max_run_length_prefix) + context_index++ + continue + } + } else { + skip_preamble = false + } + + /* RLE sub-stage. */ + { + var reps uint32 + if !safeReadBits(br, code, &reps) { + s.code = code + s.context_index = context_index + return decoderNeedsMoreInput + } + + reps += 1 << code + if context_index+reps > context_map_size { + return decoderErrorFormatContextMapRepeat + } + + for { + context_map[context_index] = 0 + context_index++ + reps-- + if reps == 0 { + break + } + } + } + } + } + fallthrough + + case stateContextMapTransform: + var bits uint32 + if !safeReadBits(br, 1, &bits) { + s.substate_context_map = stateContextMapTransform + return decoderNeedsMoreInput + } + + if bits != 0 { + inverseMoveToFrontTransform(*context_map_arg, context_map_size, s) + } + + s.substate_context_map = stateContextMapNone + return decoderSuccess + + default: + return decoderErrorUnreachable + } +} + +/* Decodes a command or literal and updates block type ring-buffer. + Reads 3..54 bits. */ +func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { + var max_block_type uint32 = s.num_block_types[tree_type] + type_tree := s.block_type_trees[tree_type*huffmanMaxSize258:] + len_tree := s.block_len_trees[tree_type*huffmanMaxSize26:] + var br *bitReader = &s.br + var ringbuffer []uint32 = s.block_type_rb[tree_type*2:] + var block_type uint32 + if max_block_type <= 1 { + return false + } + + /* Read 0..15 + 3..39 bits. */ + if safe == 0 { + block_type = readSymbol(type_tree, br) + s.block_length[tree_type] = readBlockLength(len_tree, br) + } else { + var memento bitReaderState + bitReaderSaveState(br, &memento) + if !safeReadSymbol(type_tree, br, &block_type) { + return false + } + if !safeReadBlockLength(s, &s.block_length[tree_type], len_tree, br) { + s.substate_read_block_length = stateReadBlockLengthNone + bitReaderRestoreState(br, &memento) + return false + } + } + + if block_type == 1 { + block_type = ringbuffer[1] + 1 + } else if block_type == 0 { + block_type = ringbuffer[0] + } else { + block_type -= 2 + } + + if block_type >= max_block_type { + block_type -= max_block_type + } + + ringbuffer[0] = ringbuffer[1] + ringbuffer[1] = block_type + return true +} + +func detectTrivialLiteralBlockTypes(s *Reader) { + var i uint + for i = 0; i < 8; i++ { + s.trivial_literal_contexts[i] = 0 + } + for i = 0; uint32(i) < s.num_block_types[0]; i++ { + var offset uint = i << literalContextBits + var error uint = 0 + var sample uint = uint(s.context_map[offset]) + var j uint + for j = 0; j < 1<>5] |= 1 << (i & 31) + } + } +} + +func prepareLiteralDecoding(s *Reader) { + var context_mode byte + var trivial uint + var block_type uint32 = s.block_type_rb[1] + var context_offset uint32 = block_type << literalContextBits + s.context_map_slice = s.context_map[context_offset:] + trivial = uint(s.trivial_literal_contexts[block_type>>5]) + s.trivial_literal_context = int((trivial >> (block_type & 31)) & 1) + s.literal_htree = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[0]]) + context_mode = s.context_modes[block_type] & 3 + s.context_lookup = getContextLUT(int(context_mode)) +} + +/* Decodes the block type and updates the state for literal context. + Reads 3..54 bits. */ +func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 0) { + return false + } + + prepareLiteralDecoding(s) + return true +} + +func decodeLiteralBlockSwitch(s *Reader) { + decodeLiteralBlockSwitchInternal(0, s) +} + +func safeDecodeLiteralBlockSwitch(s *Reader) bool { + return decodeLiteralBlockSwitchInternal(1, s) +} + +/* Block switch for insert/copy length. + Reads 3..54 bits. */ +func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 1) { + return false + } + + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[s.block_type_rb[3]]) + return true +} + +func decodeCommandBlockSwitch(s *Reader) { + decodeCommandBlockSwitchInternal(0, s) +} + +func safeDecodeCommandBlockSwitch(s *Reader) bool { + return decodeCommandBlockSwitchInternal(1, s) +} + +/* Block switch for distance codes. + Reads 3..54 bits. */ +func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool { + if !decodeBlockTypeAndLength(safe, s, 2) { + return false + } + + s.dist_context_map_slice = s.dist_context_map[s.block_type_rb[5]< s.ringbuffer_size { + pos = uint(s.ringbuffer_size) + } else { + pos = uint(s.pos) + } + var partial_pos_rb uint = (s.rb_roundtrips * uint(s.ringbuffer_size)) + pos + return partial_pos_rb - s.partial_pos_out +} + +/* Dumps output. + Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push + and either ring-buffer is as big as window size, or |force| is true. */ +func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { + start := s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] + var to_write uint = unwrittenBytes(s, true) + var num_written uint = *available_out + if num_written > to_write { + num_written = to_write + } + + if s.meta_block_remaining_len < 0 { + return decoderErrorFormatBlockLength1 + } + + if next_out != nil && *next_out == nil { + *next_out = start + } else { + if next_out != nil { + copy(*next_out, start[:num_written]) + *next_out = (*next_out)[num_written:] + } + } + + *available_out -= num_written + s.partial_pos_out += num_written + if total_out != nil { + *total_out = s.partial_pos_out + } + + if num_written < to_write { + if s.ringbuffer_size == 1<= s.ringbuffer_size { + s.pos -= s.ringbuffer_size + s.rb_roundtrips++ + if uint(s.pos) != 0 { + s.should_wrap_ringbuffer = 1 + } else { + s.should_wrap_ringbuffer = 0 + } + } + + return decoderSuccess +} + +func wrapRingBuffer(s *Reader) { + if s.should_wrap_ringbuffer != 0 { + copy(s.ringbuffer, s.ringbuffer_end[:uint(s.pos)]) + s.should_wrap_ringbuffer = 0 + } +} + +/* Allocates ring-buffer. + + s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before + this function is called. + + Last two bytes of ring-buffer are initialized to 0, so context calculation + could be done uniformly for the first two and all other positions. */ +func ensureRingBuffer(s *Reader) bool { + var old_ringbuffer []byte + if s.ringbuffer_size == s.new_ringbuffer_size { + return true + } + spaceNeeded := int(s.new_ringbuffer_size) + int(kRingBufferWriteAheadSlack) + if len(s.ringbuffer) < spaceNeeded { + old_ringbuffer = s.ringbuffer + s.ringbuffer = make([]byte, spaceNeeded) + } + + s.ringbuffer[s.new_ringbuffer_size-2] = 0 + s.ringbuffer[s.new_ringbuffer_size-1] = 0 + + if old_ringbuffer != nil { + copy(s.ringbuffer, old_ringbuffer[:uint(s.pos)]) + } + + s.ringbuffer_size = s.new_ringbuffer_size + s.ringbuffer_mask = s.new_ringbuffer_size - 1 + s.ringbuffer_end = s.ringbuffer[s.ringbuffer_size:] + + return true +} + +func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_out *uint, s *Reader) int { + /* TODO: avoid allocation for single uncompressed block. */ + if !ensureRingBuffer(s) { + return decoderErrorAllocRingBuffer1 + } + + /* State machine */ + for { + switch s.substate_uncompressed { + case stateUncompressedNone: + { + var nbytes int = int(getRemainingBytes(&s.br)) + if nbytes > s.meta_block_remaining_len { + nbytes = s.meta_block_remaining_len + } + + if s.pos+nbytes > s.ringbuffer_size { + nbytes = s.ringbuffer_size - s.pos + } + + /* Copy remaining bytes from s->br.buf_ to ring-buffer. */ + copyBytes(s.ringbuffer[s.pos:], &s.br, uint(nbytes)) + + s.pos += nbytes + s.meta_block_remaining_len -= nbytes + if s.pos < 1<>1 >= min_size { + new_ringbuffer_size >>= 1 + } + } + + s.new_ringbuffer_size = new_ringbuffer_size +} + +/* Reads 1..256 2-bit context modes. */ +func readContextModes(s *Reader) int { + var br *bitReader = &s.br + var i int = s.loop_counter + + for i < int(s.num_block_types[0]) { + var bits uint32 + if !safeReadBits(br, 2, &bits) { + s.loop_counter = i + return decoderNeedsMoreInput + } + + s.context_modes[i] = byte(bits) + i++ + } + + return decoderSuccess +} + +func takeDistanceFromRingBuffer(s *Reader) { + if s.distance_code == 0 { + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + + /* Compensate double distance-ring-buffer roll for dictionary items. */ + s.distance_context = 1 + } else { + var distance_code int = s.distance_code << 1 + const kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B + const kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 + var v int = (s.dist_rb_idx + int(kDistanceShortCodeIndexOffset>>uint(distance_code))) & 0x3 + /* kDistanceShortCodeIndexOffset has 2-bit values from LSB: + 3, 2, 1, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 */ + + /* kDistanceShortCodeValueOffset has 2-bit values from LSB: + -0, 0,-0, 0,-1, 1,-2, 2,-3, 3,-1, 1,-2, 2,-3, 3 */ + s.distance_code = s.dist_rb[v] + + v = int(kDistanceShortCodeValueOffset>>uint(distance_code)) & 0x3 + if distance_code&0x3 != 0 { + s.distance_code += v + } else { + s.distance_code -= v + if s.distance_code <= 0 { + /* A huge distance will cause a () soon. + This is a little faster than failing here. */ + s.distance_code = 0x7FFFFFFF + } + } + } +} + +func safeReadBitsMaybeZero(br *bitReader, n_bits uint32, val *uint32) bool { + if n_bits != 0 { + return safeReadBits(br, n_bits, val) + } else { + *val = 0 + return true + } +} + +/* Precondition: s->distance_code < 0. */ +func readDistanceInternal(safe int, s *Reader, br *bitReader) bool { + var distval int + var memento bitReaderState + var distance_tree []huffmanCode = []huffmanCode(s.distance_hgroup.htrees[s.dist_htree_index]) + if safe == 0 { + s.distance_code = int(readSymbol(distance_tree, br)) + } else { + var code uint32 + bitReaderSaveState(br, &memento) + if !safeReadSymbol(distance_tree, br, &code) { + return false + } + + s.distance_code = int(code) + } + + /* Convert the distance code to the actual distance by possibly + looking up past distances from the s->ringbuffer. */ + s.distance_context = 0 + + if s.distance_code&^0xF == 0 { + takeDistanceFromRingBuffer(s) + s.block_length[2]-- + return true + } + + distval = s.distance_code - int(s.num_direct_distance_codes) + if distval >= 0 { + var nbits uint32 + var postfix int + var offset int + if safe == 0 && (s.distance_postfix_bits == 0) { + nbits = (uint32(distval) >> 1) + 1 + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + offset + int(readBits(br, nbits)) + } else { + /* This branch also works well when s->distance_postfix_bits == 0. */ + var bits uint32 + postfix = distval & s.distance_postfix_mask + distval >>= s.distance_postfix_bits + nbits = (uint32(distval) >> 1) + 1 + if safe != 0 { + if !safeReadBitsMaybeZero(br, nbits, &bits) { + s.distance_code = -1 /* Restore precondition. */ + bitReaderRestoreState(br, &memento) + return false + } + } else { + bits = readBits(br, nbits) + } + + offset = ((2 + (distval & 1)) << nbits) - 4 + s.distance_code = int(s.num_direct_distance_codes) + ((offset + int(bits)) << s.distance_postfix_bits) + postfix + } + } + + s.distance_code = s.distance_code - numDistanceShortCodes + 1 + s.block_length[2]-- + return true +} + +func readDistance(s *Reader, br *bitReader) { + readDistanceInternal(0, s, br) +} + +func safeReadDistance(s *Reader, br *bitReader) bool { + return readDistanceInternal(1, s, br) +} + +func readCommandInternal(safe int, s *Reader, br *bitReader, insert_length *int) bool { + var cmd_code uint32 + var insert_len_extra uint32 = 0 + var copy_length uint32 + var v cmdLutElement + var memento bitReaderState + if safe == 0 { + cmd_code = readSymbol(s.htree_command, br) + } else { + bitReaderSaveState(br, &memento) + if !safeReadSymbol(s.htree_command, br, &cmd_code) { + return false + } + } + + v = kCmdLut[cmd_code] + s.distance_code = int(v.distance_code) + s.distance_context = int(v.context) + s.dist_htree_index = s.dist_context_map_slice[s.distance_context] + *insert_length = int(v.insert_len_offset) + if safe == 0 { + if v.insert_len_extra_bits != 0 { + insert_len_extra = readBits(br, uint32(v.insert_len_extra_bits)) + } + + copy_length = readBits(br, uint32(v.copy_len_extra_bits)) + } else { + if !safeReadBitsMaybeZero(br, uint32(v.insert_len_extra_bits), &insert_len_extra) || !safeReadBitsMaybeZero(br, uint32(v.copy_len_extra_bits), ©_length) { + bitReaderRestoreState(br, &memento) + return false + } + } + + s.copy_length = int(copy_length) + int(v.copy_len_offset) + s.block_length[1]-- + *insert_length += int(insert_len_extra) + return true +} + +func readCommand(s *Reader, br *bitReader, insert_length *int) { + readCommandInternal(0, s, br, insert_length) +} + +func safeReadCommand(s *Reader, br *bitReader, insert_length *int) bool { + return readCommandInternal(1, s, br, insert_length) +} + +func checkInputAmountMaybeSafe(safe int, br *bitReader, num uint) bool { + if safe != 0 { + return true + } + + return checkInputAmount(br, num) +} + +func processCommandsInternal(safe int, s *Reader) int { + var pos int = s.pos + var i int = s.loop_counter + var result int = decoderSuccess + var br *bitReader = &s.br + var hc []huffmanCode + + if !checkInputAmountMaybeSafe(safe, br, 28) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if safe == 0 { + warmupBitReader(br) + } + + /* Jump into state machine. */ + if s.state == stateCommandBegin { + goto CommandBegin + } else if s.state == stateCommandInner { + goto CommandInner + } else if s.state == stateCommandPostDecodeLiterals { + goto CommandPostDecodeLiterals + } else if s.state == stateCommandPostWrapCopy { + goto CommandPostWrapCopy + } else { + return decoderErrorUnreachable + } + +CommandBegin: + if safe != 0 { + s.state = stateCommandBegin + } + + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 156 bits + 7 bytes */ + s.state = stateCommandBegin + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[1] == 0 { + if safe != 0 { + if !safeDecodeCommandBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeCommandBlockSwitch(s) + } + + goto CommandBegin + } + + /* Read the insert/copy length in the command. */ + if safe != 0 { + if !safeReadCommand(s, br, &i) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readCommand(s, br, &i) + } + + if i == 0 { + goto CommandPostDecodeLiterals + } + + s.meta_block_remaining_len -= i + +CommandInner: + if safe != 0 { + s.state = stateCommandInner + } + + /* Read the literals in the command. */ + if s.trivial_literal_context != 0 { + var bits uint32 + var value uint32 + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + for { + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + preloadSymbol(safe, s.literal_htree, br, &bits, &value) + if s.trivial_literal_context == 0 { + goto CommandInner + } + } + + if safe == 0 { + s.ringbuffer[pos] = byte(readPreloadedSymbol(s.literal_htree, br, &bits, &value)) + } else { + var literal uint32 + if !safeReadSymbol(s.literal_htree, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + s.ringbuffer[pos] = byte(literal) + } + + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } else { + var p1 byte = s.ringbuffer[(pos-1)&s.ringbuffer_mask] + var p2 byte = s.ringbuffer[(pos-2)&s.ringbuffer_mask] + for { + var context byte + if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ + s.state = stateCommandInner + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + if s.block_length[0] == 0 { + if safe != 0 { + if !safeDecodeLiteralBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeLiteralBlockSwitch(s) + } + + if s.trivial_literal_context != 0 { + goto CommandInner + } + } + + context = getContext(p1, p2, s.context_lookup) + hc = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[context]]) + p2 = p1 + if safe == 0 { + p1 = byte(readSymbol(hc, br)) + } else { + var literal uint32 + if !safeReadSymbol(hc, br, &literal) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + + p1 = byte(literal) + } + + s.ringbuffer[pos] = p1 + s.block_length[0]-- + pos++ + if pos == s.ringbuffer_size { + s.state = stateCommandInnerWrite + i-- + goto saveStateAndReturn + } + i-- + if i == 0 { + break + } + } + } + + if s.meta_block_remaining_len <= 0 { + s.state = stateMetablockDone + goto saveStateAndReturn + } + +CommandPostDecodeLiterals: + if safe != 0 { + s.state = stateCommandPostDecodeLiterals + } + + if s.distance_code >= 0 { + /* Implicit distance case. */ + if s.distance_code != 0 { + s.distance_context = 0 + } else { + s.distance_context = 1 + } + + s.dist_rb_idx-- + s.distance_code = s.dist_rb[s.dist_rb_idx&3] + } else { + /* Read distance code in the command, unless it was implicitly zero. */ + if s.block_length[2] == 0 { + if safe != 0 { + if !safeDecodeDistanceBlockSwitch(s) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + decodeDistanceBlockSwitch(s) + } + } + + if safe != 0 { + if !safeReadDistance(s, br) { + result = decoderNeedsMoreInput + goto saveStateAndReturn + } + } else { + readDistance(s, br) + } + } + + if s.max_distance != s.max_backward_distance { + if pos < s.max_backward_distance { + s.max_distance = pos + } else { + s.max_distance = s.max_backward_distance + } + } + + i = s.copy_length + + /* Apply copy of LZ77 back-reference, or static dictionary reference if + the distance is larger than the max LZ77 distance */ + if s.distance_code > s.max_distance { + /* The maximum allowed distance is BROTLI_MAX_ALLOWED_DISTANCE = 0x7FFFFFFC. + With this choice, no signed overflow can occur after decoding + a special distance code (e.g., after adding 3 to the last distance). */ + if s.distance_code > maxAllowedDistance { + return decoderErrorFormatDistance + } + + if i >= minDictionaryWordLength && i <= maxDictionaryWordLength { + var address int = s.distance_code - s.max_distance - 1 + var words *dictionary = s.dictionary + var trans *transforms = s.transforms + var offset int = int(s.dictionary.offsets_by_length[i]) + var shift uint32 = uint32(s.dictionary.size_bits_by_length[i]) + var mask int = int(bitMask(shift)) + var word_idx int = address & mask + var transform_idx int = address >> shift + + /* Compensate double distance-ring-buffer roll. */ + s.dist_rb_idx += s.distance_context + + offset += word_idx * i + if words.data == nil { + return decoderErrorDictionaryNotSet + } + + if transform_idx < int(trans.num_transforms) { + word := words.data[offset:] + var len int = i + if transform_idx == int(trans.cutOffTransforms[0]) { + copy(s.ringbuffer[pos:], word[:uint(len)]) + } else { + len = transformDictionaryWord(s.ringbuffer[pos:], word, int(len), trans, transform_idx) + } + + pos += int(len) + s.meta_block_remaining_len -= int(len) + if pos >= s.ringbuffer_size { + s.state = stateCommandPostWrite1 + goto saveStateAndReturn + } + } else { + return decoderErrorFormatTransform + } + } else { + return decoderErrorFormatDictionary + } + } else { + var src_start int = (pos - s.distance_code) & s.ringbuffer_mask + copy_dst := s.ringbuffer[pos:] + copy_src := s.ringbuffer[src_start:] + var dst_end int = pos + i + var src_end int = src_start + i + + /* Update the recent distances cache. */ + s.dist_rb[s.dist_rb_idx&3] = s.distance_code + + s.dist_rb_idx++ + s.meta_block_remaining_len -= i + + /* There are 32+ bytes of slack in the ring-buffer allocation. + Also, we have 16 short codes, that make these 16 bytes irrelevant + in the ring-buffer. Let's copy over them as a first guess. */ + copy(copy_dst, copy_src[:16]) + + if src_end > pos && dst_end > src_start { + /* Regions intersect. */ + goto CommandPostWrapCopy + } + + if dst_end >= s.ringbuffer_size || src_end >= s.ringbuffer_size { + /* At least one region wraps. */ + goto CommandPostWrapCopy + } + + pos += i + if i > 16 { + if i > 32 { + copy(copy_dst[16:], copy_src[16:][:uint(i-16)]) + } else { + /* This branch covers about 45% cases. + Fixed size short copy allows more compiler optimizations. */ + copy(copy_dst[16:], copy_src[16:][:16]) + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } +CommandPostWrapCopy: + { + var wrap_guard int = s.ringbuffer_size - pos + for { + i-- + if i < 0 { + break + } + s.ringbuffer[pos] = s.ringbuffer[(pos-s.distance_code)&s.ringbuffer_mask] + pos++ + wrap_guard-- + if wrap_guard == 0 { + s.state = stateCommandPostWrite2 + goto saveStateAndReturn + } + } + } + + if s.meta_block_remaining_len <= 0 { + /* Next metablock, if any. */ + s.state = stateMetablockDone + + goto saveStateAndReturn + } else { + goto CommandBegin + } + +saveStateAndReturn: + s.pos = pos + s.loop_counter = i + return result +} + +func processCommands(s *Reader) int { + return processCommandsInternal(0, s) +} + +func safeProcessCommands(s *Reader) int { + return processCommandsInternal(1, s) +} + +/* Returns the maximum number of distance symbols which can only represent + distances not exceeding BROTLI_MAX_ALLOWED_DISTANCE. */ + +var maxDistanceSymbol_bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} +var maxDistanceSymbol_diff = [maxNpostfix + 1]uint32{73, 126, 228, 424} + +func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 { + var postfix uint32 = 1 << npostfix + if ndirect < maxDistanceSymbol_bound[npostfix] { + return ndirect + maxDistanceSymbol_diff[npostfix] + postfix + } else if ndirect > maxDistanceSymbol_bound[npostfix]+postfix { + return ndirect + maxDistanceSymbol_diff[npostfix] + } else { + return maxDistanceSymbol_bound[npostfix] + maxDistanceSymbol_diff[npostfix] + postfix + } +} + +/* Invariant: input stream is never overconsumed: + - invalid input implies that the whole stream is invalid -> any amount of + input could be read and discarded + - when result is "needs more input", then at least one more byte is REQUIRED + to complete decoding; all input data MUST be consumed by decoder, so + client could swap the input buffer + - when result is "needs more output" decoder MUST ensure that it doesn't + hold more than 7 bits in bit reader; this saves client from swapping input + buffer ahead of time + - when result is "success" decoder MUST return all unused data back to input + buffer; this is possible because the invariant is held on enter */ +func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int { + var result int = decoderSuccess + var br *bitReader = &s.br + + /* Do not try to process further in a case of unrecoverable error. */ + if int(s.error_code) < 0 { + return decoderResultError + } + + if *available_out != 0 && (next_out == nil || *next_out == nil) { + return saveErrorCode(s, decoderErrorInvalidArguments) + } + + if *available_out == 0 { + next_out = nil + } + if s.buffer_length == 0 { /* Just connect bit reader to input stream. */ + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + } else { + /* At least one byte of input is required. More than one byte of input may + be required to complete the transaction -> reading more data must be + done in a loop -> do it in a main loop. */ + result = decoderNeedsMoreInput + + br.input = s.buffer.u8[:] + br.byte_pos = 0 + } + + /* State machine */ + for { + if result != decoderSuccess { + /* Error, needs more input/output. */ + if result == decoderNeedsMoreInput { + if s.ringbuffer != nil { /* Pro-actively push output. */ + var intermediate_result int = writeRingBuffer(s, available_out, next_out, nil, true) + + /* WriteRingBuffer checks s->meta_block_remaining_len validity. */ + if int(intermediate_result) < 0 { + result = intermediate_result + break + } + } + + if s.buffer_length != 0 { /* Used with internal buffer. */ + if br.byte_pos == br.input_len { + /* Successfully finished read transaction. + Accumulator contains less than 8 bits, because internal buffer + is expanded byte-by-byte until it is enough to complete read. */ + s.buffer_length = 0 + + /* Switch to input stream and restart. */ + result = decoderSuccess + + br.input_len = *available_in + br.input = *next_in + br.byte_pos = 0 + continue + } else if *available_in != 0 { + /* Not enough data in buffer, but can take one more byte from + input stream. */ + result = decoderSuccess + + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + br.input_len = uint(s.buffer_length) + *next_in = (*next_in)[1:] + (*available_in)-- + + /* Retry with more data in buffer. */ + continue + } + + /* Can't finish reading and no more input. */ + break + /* Input stream doesn't contain enough input. */ + } else { + /* Copy tail to internal buffer and return. */ + *next_in = br.input[br.byte_pos:] + + *available_in = br.input_len - br.byte_pos + for *available_in != 0 { + s.buffer.u8[s.buffer_length] = (*next_in)[0] + s.buffer_length++ + *next_in = (*next_in)[1:] + (*available_in)-- + } + + break + } + } + + /* Unreachable. */ + + /* Fail or needs more output. */ + if s.buffer_length != 0 { + /* Just consumed the buffered input and produced some output. Otherwise + it would result in "needs more input". Reset internal buffer. */ + s.buffer_length = 0 + } else { + /* Using input stream in last iteration. When decoder switches to input + stream it has less than 8 bits in accumulator, so it is safe to + return unused accumulator bits there. */ + bitReaderUnload(br) + + *available_in = br.input_len - br.byte_pos + *next_in = br.input[br.byte_pos:] + } + + break + } + + switch s.state { + /* Prepare to the first read. */ + case stateUninited: + if !warmupBitReader(br) { + result = decoderNeedsMoreInput + break + } + + /* Decode window size. */ + result = decodeWindowBits(s, br) /* Reads 1..8 bits. */ + if result != decoderSuccess { + break + } + + if s.large_window { + s.state = stateLargeWindowBits + break + } + + s.state = stateInitialize + + case stateLargeWindowBits: + if !safeReadBits(br, 6, &s.window_bits) { + result = decoderNeedsMoreInput + break + } + + if s.window_bits < largeMinWbits || s.window_bits > largeMaxWbits { + result = decoderErrorFormatWindowBits + break + } + + s.state = stateInitialize + fallthrough + + /* Maximum distance, see section 9.1. of the spec. */ + /* Fall through. */ + case stateInitialize: + s.max_backward_distance = (1 << s.window_bits) - windowGap + + /* Allocate memory for both block_type_trees and block_len_trees. */ + s.block_type_trees = make([]huffmanCode, (3 * (huffmanMaxSize258 + huffmanMaxSize26))) + + if s.block_type_trees == nil { + result = decoderErrorAllocBlockTypeTrees + break + } + + s.block_len_trees = s.block_type_trees[3*huffmanMaxSize258:] + + s.state = stateMetablockBegin + fallthrough + + /* Fall through. */ + case stateMetablockBegin: + decoderStateMetablockBegin(s) + + s.state = stateMetablockHeader + fallthrough + + /* Fall through. */ + case stateMetablockHeader: + result = decodeMetaBlockLength(s, br) + /* Reads 2 - 31 bits. */ + if result != decoderSuccess { + break + } + + if s.is_metadata != 0 || s.is_uncompressed != 0 { + if !bitReaderJumpToByteBoundary(br) { + result = decoderErrorFormatPadding1 + break + } + } + + if s.is_metadata != 0 { + s.state = stateMetadata + break + } + + if s.meta_block_remaining_len == 0 { + s.state = stateMetablockDone + break + } + + calculateRingBufferSize(s) + if s.is_uncompressed != 0 { + s.state = stateUncompressed + break + } + + s.loop_counter = 0 + s.state = stateHuffmanCode0 + + case stateUncompressed: + result = copyUncompressedBlockToOutput(available_out, next_out, nil, s) + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateMetadata: + for ; s.meta_block_remaining_len > 0; s.meta_block_remaining_len-- { + var bits uint32 + + /* Read one byte and ignore it. */ + if !safeReadBits(br, 8, &bits) { + result = decoderNeedsMoreInput + break + } + } + + if result == decoderSuccess { + s.state = stateMetablockDone + } + + case stateHuffmanCode0: + if s.loop_counter >= 3 { + s.state = stateMetablockHeader2 + break + } + + /* Reads 1..11 bits. */ + result = decodeVarLenUint8(s, br, &s.num_block_types[s.loop_counter]) + + if result != decoderSuccess { + break + } + + s.num_block_types[s.loop_counter]++ + if s.num_block_types[s.loop_counter] < 2 { + s.loop_counter++ + break + } + + s.state = stateHuffmanCode1 + fallthrough + + case stateHuffmanCode1: + { + var alphabet_size uint32 = s.num_block_types[s.loop_counter] + 2 + var tree_offset int = s.loop_counter * huffmanMaxSize258 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_type_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode2 + } + fallthrough + + case stateHuffmanCode2: + { + var alphabet_size uint32 = numBlockLenSymbols + var tree_offset int = s.loop_counter * huffmanMaxSize26 + result = readHuffmanCode(alphabet_size, alphabet_size, s.block_len_trees[tree_offset:], nil, s) + if result != decoderSuccess { + break + } + s.state = stateHuffmanCode3 + } + fallthrough + + case stateHuffmanCode3: + var tree_offset int = s.loop_counter * huffmanMaxSize26 + if !safeReadBlockLength(s, &s.block_length[s.loop_counter], s.block_len_trees[tree_offset:], br) { + result = decoderNeedsMoreInput + break + } + + s.loop_counter++ + s.state = stateHuffmanCode0 + + case stateMetablockHeader2: + { + var bits uint32 + if !safeReadBits(br, 6, &bits) { + result = decoderNeedsMoreInput + break + } + + s.distance_postfix_bits = bits & bitMask(2) + bits >>= 2 + s.num_direct_distance_codes = numDistanceShortCodes + (bits << s.distance_postfix_bits) + s.distance_postfix_mask = int(bitMask(s.distance_postfix_bits)) + s.context_modes = make([]byte, uint(s.num_block_types[0])) + if s.context_modes == nil { + result = decoderErrorAllocContextModes + break + } + + s.loop_counter = 0 + s.state = stateContextModes + } + fallthrough + + case stateContextModes: + result = readContextModes(s) + + if result != decoderSuccess { + break + } + + s.state = stateContextMap1 + fallthrough + + case stateContextMap1: + result = decodeContextMap(s.num_block_types[0]<= 3 { + prepareLiteralDecoding(s) + s.dist_context_map_slice = s.dist_context_map + s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[0]) + if !ensureRingBuffer(s) { + result = decoderErrorAllocRingBuffer2 + break + } + + s.state = stateCommandBegin + } + + case stateCommandBegin, stateCommandInner, stateCommandPostDecodeLiterals, stateCommandPostWrapCopy: + result = processCommands(s) + + if result == decoderNeedsMoreInput { + result = safeProcessCommands(s) + } + + case stateCommandInnerWrite, stateCommandPostWrite1, stateCommandPostWrite2: + result = writeRingBuffer(s, available_out, next_out, nil, false) + + if result != decoderSuccess { + break + } + + wrapRingBuffer(s) + if s.ringbuffer_size == 1<= uint64(block_size) { + return 0 + } + return block_size - uint(delta) +} + +/* Wraps 64-bit input position to 32-bit ring-buffer position preserving + "not-a-first-lap" feature. */ +func wrapPosition(position uint64) uint32 { + var result uint32 = uint32(position) + var gb uint64 = position >> 30 + if gb > 2 { + /* Wrap every 2GiB; The first 3GB are continuous. */ + result = result&((1<<30)-1) | (uint32((gb-1)&1)+1)<<30 + } + + return result +} + +func (s *Writer) getStorage(size int) []byte { + if len(s.storage) < size { + s.storage = make([]byte, size) + } + + return s.storage +} + +func hashTableSize(max_table_size uint, input_size uint) uint { + var htsize uint = 256 + for htsize < max_table_size && htsize < input_size { + htsize <<= 1 + } + + return htsize +} + +func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []int { + var max_table_size uint = maxHashTableSize(quality) + var htsize uint = hashTableSize(max_table_size, input_size) + /* Use smaller hash table when input.size() is smaller, since we + fill the table, incurring O(hash table size) overhead for + compression, and if the input is short, we won't need that + many hash table entries anyway. */ + + var table []int + assert(max_table_size >= 256) + if quality == fastOnePassCompressionQuality { + /* Only odd shifts are supported by fast-one-pass. */ + if htsize&0xAAAAA == 0 { + htsize <<= 1 + } + } + + if htsize <= uint(len(s.small_table_)) { + table = s.small_table_[:] + } else { + if htsize > s.large_table_size_ { + s.large_table_size_ = htsize + s.large_table_ = nil + s.large_table_ = make([]int, htsize) + } + + table = s.large_table_ + } + + *table_size = htsize + for i := 0; i < int(htsize); i++ { + table[i] = 0 + } + return table +} + +func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) { + if large_window { + *last_bytes = uint16((lgwin&0x3F)<<8 | 0x11) + *last_bytes_bits = 14 + } else { + if lgwin == 16 { + *last_bytes = 0 + *last_bytes_bits = 1 + } else if lgwin == 17 { + *last_bytes = 1 + *last_bytes_bits = 7 + } else if lgwin > 17 { + *last_bytes = uint16((lgwin-17)<<1 | 0x01) + *last_bytes_bits = 4 + } else { + *last_bytes = uint16((lgwin-8)<<4 | 0x01) + *last_bytes_bits = 7 + } + } +} + +/* Decide about the context map based on the ability of the prediction + ability of the previous byte UTF8-prefix on the next byte. The + prediction ability is calculated as Shannon entropy. Here we need + Shannon entropy instead of 'BitsEntropy' since the prefix will be + encoded with the remaining 6 bits of the following byte, and + BitsEntropy will assume that symbol to be stored alone using Huffman + coding. */ + +var kStaticContextMapContinuation = [64]uint32{ + 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} +var kStaticContextMapSimpleUTF8 = [64]uint32{ + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +func chooseContextMap(quality int, bigram_histo []uint32, num_literal_contexts *uint, literal_context_map *[]uint32) { + var monogram_histo = [3]uint32{0} + var two_prefix_histo = [6]uint32{0} + var total uint + var i uint + var dummy uint + var entropy [4]float64 + for i = 0; i < 9; i++ { + monogram_histo[i%3] += bigram_histo[i] + two_prefix_histo[i%6] += bigram_histo[i] + } + + entropy[1] = shannonEntropy(monogram_histo[:], 3, &dummy) + entropy[2] = (shannonEntropy(two_prefix_histo[:], 3, &dummy) + shannonEntropy(two_prefix_histo[3:], 3, &dummy)) + entropy[3] = 0 + for i = 0; i < 3; i++ { + entropy[3] += shannonEntropy(bigram_histo[3*i:], 3, &dummy) + } + + total = uint(monogram_histo[0] + monogram_histo[1] + monogram_histo[2]) + assert(total != 0) + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + entropy[3] *= entropy[0] + + if quality < minQualityForHqContextModeling { + /* 3 context models is a bit slower, don't use it at lower qualities. */ + entropy[3] = entropy[1] * 10 + } + + /* If expected savings by symbol are less than 0.2 bits, skip the + context modeling -- in exchange for faster decoding speed. */ + if entropy[1]-entropy[2] < 0.2 && entropy[1]-entropy[3] < 0.2 { + *num_literal_contexts = 1 + } else if entropy[2]-entropy[3] < 0.02 { + *num_literal_contexts = 2 + *literal_context_map = kStaticContextMapSimpleUTF8[:] + } else { + *num_literal_contexts = 3 + *literal_context_map = kStaticContextMapContinuation[:] + } +} + +/* Decide if we want to use a more complex static context map containing 13 + context values, based on the entropy reduction of histograms over the + first 5 bits of literals. */ + +var kStaticContextMapComplexUTF8 = [64]uint32{ + 11, 11, 12, 12, /* 0 special */ + 0, 0, 0, 0, /* 4 lf */ + 1, 1, 9, 9, /* 8 space */ + 2, 2, 2, 2, /* !, first after space/lf and after something else. */ + 1, 1, 1, 1, /* " */ + 8, 3, 3, 3, /* % */ + 1, 1, 1, 1, /* ({[ */ + 2, 2, 2, 2, /* }]) */ + 8, 4, 4, 4, /* :; */ + 8, 7, 4, 4, /* . */ + 8, 0, 0, 0, /* > */ + 3, 3, 3, 3, /* [0..9] */ + 5, 5, 10, 5, /* [A-Z] */ + 5, 5, 10, 5, + 6, 6, 6, 6, /* [a-z] */ + 6, 6, 6, 6, +} + +func shouldUseComplexStaticContextMap(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) bool { + /* Try the more complex static context map only for long data. */ + if size_hint < 1<<20 { + return false + } else { + var end_pos uint = start_pos + length + var combined_histo = [32]uint32{0} + var context_histo = [13][32]uint32{[32]uint32{0}} + var total uint32 = 0 + var entropy [3]float64 + var dummy uint + var i uint + var utf8_lut contextLUT = getContextLUT(contextUTF8) + /* To make entropy calculations faster and to fit on the stack, we collect + histograms over the 5 most significant bits of literals. One histogram + without context and 13 additional histograms for each context value. */ + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var stride_end_pos uint = start_pos + 64 + var prev2 byte = input[start_pos&mask] + var prev1 byte = input[(start_pos+1)&mask] + var pos uint + + /* To make the analysis of the data faster we only examine 64 byte long + strides at every 4kB intervals. */ + for pos = start_pos + 2; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + var context byte = byte(kStaticContextMapComplexUTF8[getContext(prev1, prev2, utf8_lut)]) + total++ + combined_histo[literal>>3]++ + context_histo[context][literal>>3]++ + prev2 = prev1 + prev1 = literal + } + } + + entropy[1] = shannonEntropy(combined_histo[:], 32, &dummy) + entropy[2] = 0 + for i = 0; i < 13; i++ { + entropy[2] += shannonEntropy(context_histo[i][0:], 32, &dummy) + } + + entropy[0] = 1.0 / float64(total) + entropy[1] *= entropy[0] + entropy[2] *= entropy[0] + + /* The triggering heuristics below were tuned by compressing the individual + files of the silesia corpus. If we skip this kind of context modeling + for not very well compressible input (i.e. entropy using context modeling + is 60% of maximal entropy) or if expected savings by symbol are less + than 0.2 bits, then in every case when it triggers, the final compression + ratio is improved. Note however that this heuristics might be too strict + for some cases and could be tuned further. */ + if entropy[2] > 3.0 || entropy[1]-entropy[2] < 0.2 { + return false + } else { + *num_literal_contexts = 13 + *literal_context_map = kStaticContextMapComplexUTF8[:] + return true + } + } +} + +func decideOverLiteralContextModeling(input []byte, start_pos uint, length uint, mask uint, quality int, size_hint uint, num_literal_contexts *uint, literal_context_map *[]uint32) { + if quality < minQualityForContextModeling || length < 64 { + return + } else if shouldUseComplexStaticContextMap(input, start_pos, length, mask, quality, size_hint, num_literal_contexts, literal_context_map) { + } else /* Context map was already set, nothing else to do. */ + { + var end_pos uint = start_pos + length + /* Gather bi-gram data of the UTF8 byte prefixes. To make the analysis of + UTF8 data faster we only examine 64 byte long strides at every 4kB + intervals. */ + + var bigram_prefix_histo = [9]uint32{0} + for ; start_pos+64 <= end_pos; start_pos += 4096 { + var lut = [4]int{0, 0, 1, 2} + var stride_end_pos uint = start_pos + 64 + var prev int = lut[input[start_pos&mask]>>6] * 3 + var pos uint + for pos = start_pos + 1; pos < stride_end_pos; pos++ { + var literal byte = input[pos&mask] + bigram_prefix_histo[prev+lut[literal>>6]]++ + prev = lut[literal>>6] * 3 + } + } + + chooseContextMap(quality, bigram_prefix_histo[0:], num_literal_contexts, literal_context_map) + } +} + +func shouldCompress_encode(data []byte, mask uint, last_flush_pos uint64, bytes uint, num_literals uint, num_commands uint) bool { + /* TODO: find more precise minimal block overhead. */ + if bytes <= 2 { + return false + } + if num_commands < (bytes>>8)+2 { + if float64(num_literals) > 0.99*float64(bytes) { + var literal_histo = [256]uint32{0} + const kSampleRate uint32 = 13 + const kMinEntropy float64 = 7.92 + var bit_cost_threshold float64 = float64(bytes) * kMinEntropy / float64(kSampleRate) + var t uint = uint((uint32(bytes) + kSampleRate - 1) / kSampleRate) + var pos uint32 = uint32(last_flush_pos) + var i uint + for i = 0; i < t; i++ { + literal_histo[data[pos&uint32(mask)]]++ + pos += kSampleRate + } + + if bitsEntropy(literal_histo[:], 256) > bit_cost_threshold { + return false + } + } + } + + return true +} + +/* Chooses the literal context mode for a metablock */ +func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint, length uint) int { + /* We only do the computation for the option of something else than + CONTEXT_UTF8 for the highest qualities */ + if params.quality >= minQualityForHqBlockSplitting && !isMostlyUTF8(data, pos, mask, length, kMinUTF8Ratio) { + return contextSigned + } + + return contextUTF8 +} + +func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) { + var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos) + var last_bytes uint16 + var last_bytes_bits byte + var literal_context_lut contextLUT = getContextLUT(literal_context_mode) + var block_params encoderParams = *params + + if bytes == 0 { + /* Write the ISLAST and ISEMPTY bits. */ + writeBits(2, 3, storage_ix, storage) + + *storage_ix = (*storage_ix + 7) &^ 7 + return + } + + if !shouldCompress_encode(data, mask, last_flush_pos, bytes, num_literals, uint(len(commands))) { + /* Restore the distance cache, as its last update by + CreateBackwardReferences is now unused. */ + copy(dist_cache, saved_dist_cache[:4]) + + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + return + } + + assert(*storage_ix <= 14) + last_bytes = uint16(storage[1])<<8 | uint16(storage[0]) + last_bytes_bits = byte(*storage_ix) + if params.quality <= maxQualityForStaticEntropyCodes { + storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else if params.quality < minQualityForBlockSplit { + storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage) + } else { + mb := getMetaBlockSplit() + if params.quality < minQualityForHqBlockSplitting { + var num_literal_contexts uint = 1 + var literal_context_map []uint32 = nil + if !params.disable_literal_context_modeling { + decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map) + } + + buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb) + } else { + buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb) + } + + if params.quality >= minQualityForOptimizeHistograms { + /* The number of distance symbols effectively used for distance + histograms. It might be less than distance alphabet size + for "Large Window Brotli" (32-bit). */ + var num_effective_dist_codes uint32 = block_params.dist.alphabet_size + if num_effective_dist_codes > numHistogramDistanceSymbols { + num_effective_dist_codes = numHistogramDistanceSymbols + } + + optimizeHistograms(num_effective_dist_codes, mb) + } + + storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage) + freeMetaBlockSplit(mb) + } + + if bytes+4 < *storage_ix>>3 { + /* Restore the distance cache and last byte. */ + copy(dist_cache, saved_dist_cache[:4]) + + storage[0] = byte(last_bytes) + storage[1] = byte(last_bytes >> 8) + *storage_ix = uint(last_bytes_bits) + storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage) + } +} + +func chooseDistanceParams(params *encoderParams) { + var distance_postfix_bits uint32 = 0 + var num_direct_distance_codes uint32 = 0 + + if params.quality >= minQualityForNonzeroDistanceParams { + var ndirect_msb uint32 + if params.mode == modeFont { + distance_postfix_bits = 1 + num_direct_distance_codes = 12 + } else { + distance_postfix_bits = params.dist.distance_postfix_bits + num_direct_distance_codes = params.dist.num_direct_distance_codes + } + + ndirect_msb = (num_direct_distance_codes >> distance_postfix_bits) & 0x0F + if distance_postfix_bits > maxNpostfix || num_direct_distance_codes > maxNdirect || ndirect_msb<>25)), (last_command.dist_prefix_&0x3FF == 0), &last_command.cmd_prefix_) + } +} + +/* + Processes the accumulated input data and writes + the new output meta-block to s.dest, if one has been + created (otherwise the processed input data is buffered internally). + If |is_last| or |force_flush| is true, an output meta-block is + always created. However, until |is_last| is true encoder may retain up + to 7 bits of the last byte of output. To force encoder to dump the remaining + bits use WriteMetadata() to append an empty meta-data block. + Returns false if the size of the input data is larger than + input_block_size(). +*/ +func encodeData(s *Writer, is_last bool, force_flush bool) bool { + var delta uint64 = unprocessedInputSize(s) + var bytes uint32 = uint32(delta) + var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_) + var data []byte + var mask uint32 + var literal_context_mode int + + data = s.ringbuffer_.buffer_ + mask = s.ringbuffer_.mask_ + + /* Adding more blocks after "last" block is forbidden. */ + if s.is_last_block_emitted_ { + return false + } + if is_last { + s.is_last_block_emitted_ = true + } + + if delta > uint64(inputBlockSize(s)) { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) { + s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize) + s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize) + } else { + s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize] + s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize] + } + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + var storage []byte + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if delta == 0 && !is_last { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + storage = s.getStorage(int(2*bytes + 503)) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, uint(bytes), &table_size) + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage) + } + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + updateLastProcessedPos(s) + s.writeOutput(storage[:storage_ix>>3]) + return true + } + { + /* Theoretical max number of commands is 1 per 2 bytes. */ + newsize := len(s.commands) + int(bytes)/2 + 1 + if newsize > cap(s.commands) { + /* Reserve a bit more memory to allow merging with a next block + without reallocation: that would impact speed. */ + newsize += int(bytes/4) + 16 + + new_commands := make([]command, len(s.commands), newsize) + if s.commands != nil { + copy(new_commands, s.commands) + } + + s.commands = new_commands + } + } + + initOrStitchToPreviousBlock(&s.hasher_, data, uint(mask), &s.params, uint(wrapped_last_processed_pos), uint(bytes), is_last) + + literal_context_mode = chooseContextMode(&s.params, data, uint(wrapPosition(s.last_flush_pos_)), uint(mask), uint(s.input_pos_-s.last_flush_pos_)) + + if len(s.commands) != 0 && s.last_insert_len_ == 0 { + extendLastCommand(s, &bytes, &wrapped_last_processed_pos) + } + + if s.params.quality == zopflificationQuality { + assert(s.params.hasher.type_ == 10) + createZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_.(*h10), s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else if s.params.quality == hqZopflificationQuality { + assert(s.params.hasher.type_ == 10) + createHqZopfliBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } else { + createBackwardReferences(uint(bytes), uint(wrapped_last_processed_pos), data, uint(mask), &s.params, s.hasher_, s.dist_cache_[:], &s.last_insert_len_, &s.commands, &s.num_literals_) + } + { + var max_length uint = maxMetablockSize(&s.params) + var max_literals uint = max_length / 8 + max_commands := int(max_length / 8) + var processed_bytes uint = uint(s.input_pos_ - s.last_flush_pos_) + var next_input_fits_metablock bool = (processed_bytes+inputBlockSize(s) <= max_length) + var should_flush bool = (s.params.quality < minQualityForBlockSplit && s.num_literals_+uint(len(s.commands)) >= maxNumDelayedSymbols) + /* If maximal possible additional block doesn't fit metablock, flush now. */ + /* TODO: Postpone decision until next block arrives? */ + + /* If block splitting is not used, then flush as soon as there is some + amount of commands / literals produced. */ + if !is_last && !force_flush && !should_flush && next_input_fits_metablock && s.num_literals_ < max_literals && len(s.commands) < max_commands { + /* Merge with next input block. Everything will happen later. */ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + return true + } + } + + /* Create the last insert-only command. */ + if s.last_insert_len_ > 0 { + s.commands = append(s.commands, makeInsertCommand(s.last_insert_len_)) + s.num_literals_ += s.last_insert_len_ + s.last_insert_len_ = 0 + } + + if !is_last && s.input_pos_ == s.last_flush_pos_ { + /* We have no new input data and we don't have to finish the stream, so + nothing to do. */ + return true + } + + assert(s.input_pos_ >= s.last_flush_pos_) + assert(s.input_pos_ > s.last_flush_pos_ || is_last) + assert(s.input_pos_-s.last_flush_pos_ <= 1<<24) + { + var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_) + var storage []byte = s.getStorage(int(2*metablock_size + 503)) + var storage_ix uint = uint(s.last_bytes_bits_) + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage) + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + s.last_flush_pos_ = s.input_pos_ + if updateLastProcessedPos(s) { + hasherReset(s.hasher_) + } + + if s.last_flush_pos_ > 0 { + s.prev_byte_ = data[(uint32(s.last_flush_pos_)-1)&mask] + } + + if s.last_flush_pos_ > 1 { + s.prev_byte2_ = data[uint32(s.last_flush_pos_-2)&mask] + } + + s.commands = s.commands[:0] + s.num_literals_ = 0 + + /* Save the state of the distance cache in case we need to restore it for + emitting an uncompressed block. */ + copy(s.saved_dist_cache_[:], s.dist_cache_[:]) + + s.writeOutput(storage[:storage_ix>>3]) + return true + } +} + +/* Dumps remaining output bits and metadata header to |header|. + Returns number of produced bytes. + REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long. + REQUIRED: |block_size| <= (1 << 24). */ +func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint { + storage_ix := uint(s.last_bytes_bits_) + header[0] = byte(s.last_bytes_) + header[1] = byte(s.last_bytes_ >> 8) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + writeBits(1, 0, &storage_ix, header) + writeBits(2, 3, &storage_ix, header) + writeBits(1, 0, &storage_ix, header) + if block_size == 0 { + writeBits(2, 0, &storage_ix, header) + } else { + var nbits uint32 + if block_size == 1 { + nbits = 0 + } else { + nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1 + } + var nbytes uint32 = (nbits + 7) / 8 + writeBits(2, uint64(nbytes), &storage_ix, header) + writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header) + } + + return (storage_ix + 7) >> 3 +} + +func injectBytePaddingBlock(s *Writer) { + var seal uint32 = uint32(s.last_bytes_) + var seal_bits uint = uint(s.last_bytes_bits_) + s.last_bytes_ = 0 + s.last_bytes_bits_ = 0 + + /* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */ + seal |= 0x6 << seal_bits + + seal_bits += 6 + + destination := s.tiny_buf_.u8[:] + + destination[0] = byte(seal) + if seal_bits > 8 { + destination[1] = byte(seal >> 8) + } + if seal_bits > 16 { + destination[2] = byte(seal >> 16) + } + s.writeOutput(destination[:(seal_bits+7)>>3]) +} + +func checkFlushComplete(s *Writer) { + if s.stream_state_ == streamFlushRequested && s.err == nil { + s.stream_state_ = streamProcessing + } +} + +func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + var block_size_limit uint = uint(1) << s.params.lgwin + var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit)) + var command_buf []uint32 = nil + var literal_buf []byte = nil + if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality { + return false + } + + if s.params.quality == fastTwoPassCompressionQuality { + if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) { + s.command_buf_ = make([]uint32, buf_size) + s.literal_buf_ = make([]byte, buf_size) + } else { + s.command_buf_ = s.command_buf_[:buf_size] + s.literal_buf_ = s.literal_buf_[:buf_size] + } + + command_buf = s.command_buf_ + literal_buf = s.literal_buf_ + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress block only when stream is not + finished, there is no pending flush request, and there is either + additional input or pending operation. */ + if s.stream_state_ == streamProcessing && (*available_in != 0 || op != int(operationProcess)) { + var block_size uint = brotli_min_size_t(block_size_limit, *available_in) + var is_last bool = (*available_in == block_size) && (op == int(operationFinish)) + var force_flush bool = (*available_in == block_size) && (op == int(operationFlush)) + var max_out_size uint = 2*block_size + 503 + var storage []byte = nil + var storage_ix uint = uint(s.last_bytes_bits_) + var table_size uint + var table []int + + if force_flush && block_size == 0 { + s.stream_state_ = streamFlushRequested + continue + } + + storage = s.getStorage(int(max_out_size)) + + storage[0] = byte(s.last_bytes_) + storage[1] = byte(s.last_bytes_ >> 8) + table = getHashTable(s, s.params.quality, block_size, &table_size) + + if s.params.quality == fastOnePassCompressionQuality { + compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage) + } else { + compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage) + } + + *next_in = (*next_in)[block_size:] + *available_in -= block_size + var out_bytes uint = storage_ix >> 3 + s.writeOutput(storage[:out_bytes]) + + s.last_bytes_ = uint16(storage[storage_ix>>3]) + s.last_bytes_bits_ = byte(storage_ix & 7) + + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + + break + } + + checkFlushComplete(s) + return true +} + +func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool { + if *available_in > 1<<24 { + return false + } + + /* Switch to metadata block workflow, if required. */ + if s.stream_state_ == streamProcessing { + s.remaining_metadata_bytes_ = uint32(*available_in) + s.stream_state_ = streamMetadataHead + } + + if s.stream_state_ != streamMetadataHead && s.stream_state_ != streamMetadataBody { + return false + } + + for { + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + if s.input_pos_ != s.last_flush_pos_ { + var result bool = encodeData(s, false, true) + if !result { + return false + } + continue + } + + if s.stream_state_ == streamMetadataHead { + n := writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.tiny_buf_.u8[:]) + s.writeOutput(s.tiny_buf_.u8[:n]) + s.stream_state_ = streamMetadataBody + continue + } else { + /* Exit workflow only when there is no more input and no more output. + Otherwise client may continue producing empty metadata blocks. */ + if s.remaining_metadata_bytes_ == 0 { + s.remaining_metadata_bytes_ = math.MaxUint32 + s.stream_state_ = streamProcessing + break + } + + /* This guarantees progress in "TakeOutput" workflow. */ + var c uint32 = brotli_min_uint32_t(s.remaining_metadata_bytes_, 16) + copy(s.tiny_buf_.u8[:], (*next_in)[:c]) + *next_in = (*next_in)[c:] + *available_in -= uint(c) + s.remaining_metadata_bytes_ -= c + s.writeOutput(s.tiny_buf_.u8[:c]) + + continue + } + } + + return true +} + +func updateSizeHint(s *Writer, available_in uint) { + if s.params.size_hint == 0 { + var delta uint64 = unprocessedInputSize(s) + var tail uint64 = uint64(available_in) + var limit uint32 = 1 << 30 + var total uint32 + if (delta >= uint64(limit)) || (tail >= uint64(limit)) || ((delta + tail) >= uint64(limit)) { + total = limit + } else { + total = uint32(delta + tail) + } + + s.params.size_hint = uint(total) + } +} + +func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byte) bool { + if !ensureInitialized(s) { + return false + } + + /* Unfinished metadata block; check requirements. */ + if s.remaining_metadata_bytes_ != math.MaxUint32 { + if uint32(*available_in) != s.remaining_metadata_bytes_ { + return false + } + if op != int(operationEmitMetadata) { + return false + } + } + + if op == int(operationEmitMetadata) { + updateSizeHint(s, 0) /* First data metablock might be emitted here. */ + return processMetadata(s, available_in, next_in) + } + + if s.stream_state_ == streamMetadataHead || s.stream_state_ == streamMetadataBody { + return false + } + + if s.stream_state_ != streamProcessing && *available_in != 0 { + return false + } + + if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality { + return encoderCompressStreamFast(s, op, available_in, next_in) + } + + for { + var remaining_block_size uint = remainingInputBlockSize(s) + + if remaining_block_size != 0 && *available_in != 0 { + var copy_input_size uint = brotli_min_size_t(remaining_block_size, *available_in) + copyInputToRingBuffer(s, copy_input_size, *next_in) + *next_in = (*next_in)[copy_input_size:] + *available_in -= copy_input_size + continue + } + + if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 { + injectBytePaddingBlock(s) + continue + } + + /* Compress data only when stream is not + finished and there is no pending flush request. */ + if s.stream_state_ == streamProcessing { + if remaining_block_size == 0 || op != int(operationProcess) { + var is_last bool = ((*available_in == 0) && op == int(operationFinish)) + var force_flush bool = ((*available_in == 0) && op == int(operationFlush)) + var result bool + updateSizeHint(s, *available_in) + result = encodeData(s, is_last, force_flush) + if !result { + return false + } + if force_flush { + s.stream_state_ = streamFlushRequested + } + if is_last { + s.stream_state_ = streamFinished + } + continue + } + } + + break + } + + checkFlushComplete(s) + return true +} + +func (w *Writer) writeOutput(data []byte) { + if w.err != nil { + return + } + + _, w.err = w.dst.Write(data) + if w.err == nil { + checkFlushComplete(w) + } +} diff --git a/vendor/github.com/andybalholm/brotli/encoder.go b/vendor/github.com/andybalholm/brotli/encoder.go new file mode 100644 index 00000000000..1928382596e --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder.go @@ -0,0 +1,177 @@ +package brotli + +import "github.com/andybalholm/brotli/matchfinder" + +// An Encoder implements the matchfinder.Encoder interface, writing in Brotli format. +type Encoder struct { + wroteHeader bool + bw bitWriter + distCache []distanceCode +} + +func (e *Encoder) Reset() { + e.wroteHeader = false + e.bw = bitWriter{} +} + +func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, lastBlock bool) []byte { + e.bw.dst = dst + if !e.wroteHeader { + e.bw.writeBits(4, 15) + e.wroteHeader = true + } + + if len(src) == 0 { + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + return e.bw.dst + } + return dst + } + + var literalHisto [256]uint32 + var commandHisto [704]uint32 + var distanceHisto [64]uint32 + literalCount := 0 + commandCount := 0 + distanceCount := 0 + + if len(e.distCache) < len(matches) { + e.distCache = make([]distanceCode, len(matches)) + } + + // first pass: build the histograms + pos := 0 + + // d is the ring buffer of the last 4 distances. + d := [4]int{-10, -10, -10, -10} + for i, m := range matches { + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + literalHisto[c]++ + } + literalCount += m.Unmatched + } + + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + commandHisto[command]++ + commandCount++ + + if command >= 128 && m.Length != 0 { + var distCode distanceCode + switch m.Distance { + case d[3]: + distCode.code = 0 + case d[2]: + distCode.code = 1 + case d[1]: + distCode.code = 2 + case d[0]: + distCode.code = 3 + case d[3] - 1: + distCode.code = 4 + case d[3] + 1: + distCode.code = 5 + case d[3] - 2: + distCode.code = 6 + case d[3] + 2: + distCode.code = 7 + case d[3] - 3: + distCode.code = 8 + case d[3] + 3: + distCode.code = 9 + + // In my testing, codes 10–15 actually reduced the compression ratio. + + default: + distCode = getDistanceCode(m.Distance) + } + e.distCache[i] = distCode + distanceHisto[distCode.code]++ + distanceCount++ + if distCode.code != 0 { + d[0], d[1], d[2], d[3] = d[1], d[2], d[3], m.Distance + } + } + + pos += m.Unmatched + m.Length + } + + storeMetaBlockHeaderBW(uint(len(src)), false, &e.bw) + e.bw.writeBits(13, 0) + + var literalDepths [256]byte + var literalBits [256]uint16 + buildAndStoreHuffmanTreeFastBW(literalHisto[:], uint(literalCount), 8, literalDepths[:], literalBits[:], &e.bw) + + var commandDepths [704]byte + var commandBits [704]uint16 + buildAndStoreHuffmanTreeFastBW(commandHisto[:], uint(commandCount), 10, commandDepths[:], commandBits[:], &e.bw) + + var distanceDepths [64]byte + var distanceBits [64]uint16 + buildAndStoreHuffmanTreeFastBW(distanceHisto[:], uint(distanceCount), 6, distanceDepths[:], distanceBits[:], &e.bw) + + pos = 0 + for i, m := range matches { + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + e.bw.writeBits(uint(commandDepths[command]), uint64(commandBits[command])) + if kInsExtra[insertCode] > 0 { + e.bw.writeBits(uint(kInsExtra[insertCode]), uint64(m.Unmatched)-uint64(kInsBase[insertCode])) + } + if kCopyExtra[copyCode] > 0 { + e.bw.writeBits(uint(kCopyExtra[copyCode]), uint64(m.Length)-uint64(kCopyBase[copyCode])) + } + + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + e.bw.writeBits(uint(literalDepths[c]), uint64(literalBits[c])) + } + } + + if command >= 128 && m.Length != 0 { + distCode := e.distCache[i] + e.bw.writeBits(uint(distanceDepths[distCode.code]), uint64(distanceBits[distCode.code])) + if distCode.nExtra > 0 { + e.bw.writeBits(distCode.nExtra, distCode.extraBits) + } + } + + pos += m.Unmatched + m.Length + } + + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + } + return e.bw.dst +} + +type distanceCode struct { + code int + nExtra uint + extraBits uint64 +} + +func getDistanceCode(distance int) distanceCode { + d := distance + 3 + nbits := log2FloorNonZero(uint(d)) - 1 + prefix := (d >> nbits) & 1 + offset := (2 + prefix) << nbits + distcode := int(2*(nbits-1)) + prefix + 16 + extra := d - offset + return distanceCode{distcode, uint(nbits), uint64(extra)} +} diff --git a/vendor/github.com/andybalholm/brotli/encoder_dict.go b/vendor/github.com/andybalholm/brotli/encoder_dict.go new file mode 100644 index 00000000000..55c051c6238 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder_dict.go @@ -0,0 +1,22 @@ +package brotli + +/* Dictionary data (words and transforms) for 1 possible context */ +type encoderDictionary struct { + words *dictionary + cutoffTransformsCount uint32 + cutoffTransforms uint64 + hash_table []uint16 + buckets []uint16 + dict_words []dictWord +} + +func initEncoderDictionary(dict *encoderDictionary) { + dict.words = getDictionary() + + dict.hash_table = kStaticDictionaryHash[:] + dict.buckets = kStaticDictionaryBuckets[:] + dict.dict_words = kStaticDictionaryWords[:] + + dict.cutoffTransformsCount = kCutoffTransformsCount + dict.cutoffTransforms = kCutoffTransforms +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode.go b/vendor/github.com/andybalholm/brotli/entropy_encode.go new file mode 100644 index 00000000000..3f469a3dd94 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode.go @@ -0,0 +1,592 @@ +package brotli + +import "math" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Entropy encoding (Huffman) utilities. */ + +/* A node of a Huffman tree. */ +type huffmanTree struct { + total_count_ uint32 + index_left_ int16 + index_right_or_value_ int16 +} + +func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) { + self.total_count_ = count + self.index_left_ = left + self.index_right_or_value_ = right +} + +/* Input size optimized Shell sort. */ +type huffmanTreeComparator func(huffmanTree, huffmanTree) bool + +var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1} + +func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeComparator) { + if n < 13 { + /* Insertion sort. */ + var i uint + for i = 1; i < n; i++ { + var tmp huffmanTree = items[i] + var k uint = i + var j uint = i - 1 + for comparator(tmp, items[j]) { + items[k] = items[j] + k = j + if j == 0 { + break + } + j-- + } + + items[k] = tmp + } + + return + } else { + var g int + if n < 57 { + g = 2 + } else { + g = 0 + } + for ; g < 6; g++ { + var gap uint = sortHuffmanTreeItems_gaps[g] + var i uint + for i = gap; i < n; i++ { + var j uint = i + var tmp huffmanTree = items[i] + for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap { + items[j] = items[j-gap] + } + + items[j] = tmp + } + } + } +} + +/* Returns 1 if assignment of depths succeeded, otherwise 0. */ +func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool { + var stack [16]int + var level int = 0 + var p int = p0 + assert(max_depth <= 15) + stack[0] = -1 + for { + if pool[p].index_left_ >= 0 { + level++ + if level > max_depth { + return false + } + stack[level] = int(pool[p].index_right_or_value_) + p = int(pool[p].index_left_) + continue + } else { + depth[pool[p].index_right_or_value_] = byte(level) + } + + for level >= 0 && stack[level] == -1 { + level-- + } + if level < 0 { + return true + } + p = stack[level] + stack[level] = -1 + } +} + +/* Sort the root nodes, least popular first. */ +func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool { + if v0.total_count_ != v1.total_count_ { + return v0.total_count_ < v1.total_count_ + } + + return v0.index_right_or_value_ > v1.index_right_or_value_ +} + +/* This function will create a Huffman tree. + + The catch here is that the tree cannot be arbitrarily deep. + Brotli specifies a maximum depth of 15 bits for "code trees" + and 7 bits for "code length code trees." + + count_limit is the value that is to be faked as the minimum value + and this minimum value is raised until the tree matches the + maximum length requirement. + + This algorithm is not of excellent performance for very long data blocks, + especially when population counts are longer than 2**tree_limit, but + we are not planning to use this with extremely long blocks. + + See http://en.wikipedia.org/wiki/Huffman_coding */ +func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) { + var count_limit uint32 + var sentinel huffmanTree + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + /* For block sizes below 64 kB, we never need to do a second iteration + of this loop. Probably all of our block sizes will be smaller than + that, so this loop is mostly of academic interest. If we actually + would need this, we would be better off with the Katajainen algorithm. */ + for count_limit = 1; ; count_limit *= 2 { + var n uint = 0 + var i uint + var j uint + var k uint + for i = length; i != 0; { + i-- + if data[i] != 0 { + var count uint32 = brotli_max_uint32_t(data[i], count_limit) + initHuffmanTree(&tree[n], count, -1, int16(i)) + n++ + } + } + + if n == 1 { + depth[tree[0].index_right_or_value_] = 1 /* Only one element. */ + break + } + + sortHuffmanTreeItems(tree, n, huffmanTreeComparator(sortHuffmanTree)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + tree[n] = sentinel + + tree[n+1] = sentinel + + i = 0 /* Points to the next leaf node. */ + j = n + 1 /* Points to the next non-leaf node. */ + for k = n - 1; k != 0; k-- { + var left uint + var right uint + if tree[i].total_count_ <= tree[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if tree[i].total_count_ <= tree[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + { + /* The sentinel node becomes the parent node. */ + var j_end uint = 2*n - k + tree[j_end].total_count_ = tree[left].total_count_ + tree[right].total_count_ + tree[j_end].index_left_ = int16(left) + tree[j_end].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + tree[j_end+1] = sentinel + } + } + + if setDepth(int(2*n-1), tree[0:], depth, tree_limit) { + /* We need to pack the Huffman tree in tree_limit bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } +} + +func reverse(v []byte, start uint, end uint) { + end-- + for start < end { + var tmp byte = v[start] + v[start] = v[end] + v[end] = tmp + start++ + end-- + } +} + +func writeHuffmanTreeRepetitions(previous_value byte, value byte, repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + assert(repetitions > 0) + if previous_value != value { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions == 7 { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = value + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatPreviousCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x3) + (*tree_size)++ + repetitions >>= 2 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + if repetitions == 11 { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + repetitions-- + } + + if repetitions < 3 { + var i uint + for i = 0; i < repetitions; i++ { + tree[*tree_size] = 0 + extra_bits_data[*tree_size] = 0 + (*tree_size)++ + } + } else { + var start uint = *tree_size + repetitions -= 3 + for { + tree[*tree_size] = repeatZeroCodeLength + extra_bits_data[*tree_size] = byte(repetitions & 0x7) + (*tree_size)++ + repetitions >>= 3 + if repetitions == 0 { + break + } + + repetitions-- + } + + reverse(tree, start, *tree_size) + reverse(extra_bits_data, start, *tree_size) + } +} + +/* Change the population counts in a way that the consequent + Huffman tree compression, especially its RLE-part will be more + likely to compress this data more efficiently. + + length contains the size of the histogram. + counts contains the population counts. + good_for_rle is a buffer of at least length size */ +func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) { + var nonzero_count uint = 0 + var stride uint + var limit uint + var sum uint + var streak_limit uint = 1240 + var i uint + /* Let's make the Huffman code more compatible with RLE encoding. */ + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzero_count++ + } + } + + if nonzero_count < 16 { + return + } + + for length != 0 && counts[length-1] == 0 { + length-- + } + + if length == 0 { + return /* All zeros. */ + } + + /* Now counts[0..length - 1] does not have trailing zeros. */ + { + var nonzeros uint = 0 + var smallest_nonzero uint32 = 1 << 30 + for i = 0; i < length; i++ { + if counts[i] != 0 { + nonzeros++ + if smallest_nonzero > counts[i] { + smallest_nonzero = counts[i] + } + } + } + + if nonzeros < 5 { + /* Small histogram will model it well. */ + return + } + + if smallest_nonzero < 4 { + var zeros uint = length - nonzeros + if zeros < 6 { + for i = 1; i < length-1; i++ { + if counts[i-1] != 0 && counts[i] == 0 && counts[i+1] != 0 { + counts[i] = 1 + } + } + } + } + + if nonzeros < 28 { + return + } + } + + /* 2) Let's mark all population counts that already can be encoded + with an RLE code. */ + for i := 0; i < int(length); i++ { + good_for_rle[i] = 0 + } + { + var symbol uint32 = counts[0] + /* Let's not spoil any of the existing good RLE codes. + Mark any seq of 0's that is longer as 5 as a good_for_rle. + Mark any seq of non-0's that is longer as 7 as a good_for_rle. */ + + var step uint = 0 + for i = 0; i <= length; i++ { + if i == length || counts[i] != symbol { + if (symbol == 0 && step >= 5) || (symbol != 0 && step >= 7) { + var k uint + for k = 0; k < step; k++ { + good_for_rle[i-k-1] = 1 + } + } + + step = 1 + if i != length { + symbol = counts[i] + } + } else { + step++ + } + } + } + + /* 3) Let's replace those population counts that lead to more RLE codes. + Math here is in 24.8 fixed point representation. */ + stride = 0 + + limit = uint(256*(counts[0]+counts[1]+counts[2])/3 + 420) + sum = 0 + for i = 0; i <= length; i++ { + if i == length || good_for_rle[i] != 0 || (i != 0 && good_for_rle[i-1] != 0) || (256*counts[i]-uint32(limit)+uint32(streak_limit)) >= uint32(2*streak_limit) { + if stride >= 4 || (stride >= 3 && sum == 0) { + var k uint + var count uint = (sum + stride/2) / stride + /* The stride must end, collapse what we have, if we have enough (4). */ + if count == 0 { + count = 1 + } + + if sum == 0 { + /* Don't make an all zeros stride to be upgraded to ones. */ + count = 0 + } + + for k = 0; k < stride; k++ { + /* We don't want to change value at counts[i], + that is already belonging to the next stride. Thus - 1. */ + counts[i-k-1] = uint32(count) + } + } + + stride = 0 + sum = 0 + if i < length-2 { + /* All interesting strides have a count of at least 4, */ + /* at least when non-zeros. */ + limit = uint(256*(counts[i]+counts[i+1]+counts[i+2])/3 + 420) + } else if i < length { + limit = uint(256 * counts[i]) + } else { + limit = 0 + } + } + + stride++ + if i != length { + sum += uint(counts[i]) + if stride >= 4 { + limit = (256*sum + stride/2) / stride + } + + if stride == 4 { + limit += 120 + } + } + } +} + +func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use_rle_for_zero *bool) { + var total_reps_zero uint = 0 + var total_reps_non_zero uint = 0 + var count_reps_zero uint = 1 + var count_reps_non_zero uint = 1 + var i uint + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + if reps >= 3 && value == 0 { + total_reps_zero += reps + count_reps_zero++ + } + + if reps >= 4 && value != 0 { + total_reps_non_zero += reps + count_reps_non_zero++ + } + + i += reps + } + + *use_rle_for_non_zero = total_reps_non_zero > count_reps_non_zero*2 + *use_rle_for_zero = total_reps_zero > count_reps_zero*2 +} + +/* Write a Huffman tree from bit depths into the bit-stream representation + of a Huffman tree. The generated Huffman tree is to be compressed once + more using a Huffman tree */ +func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) { + var previous_value byte = initialRepeatedCodeLength + var i uint + var use_rle_for_non_zero bool = false + var use_rle_for_zero bool = false + var new_length uint = length + /* Throw away trailing zeros. */ + for i = 0; i < length; i++ { + if depth[length-i-1] == 0 { + new_length-- + } else { + break + } + } + + /* First gather statistics on if it is a good idea to do RLE. */ + if length > 50 { + /* Find RLE coding for longer codes. + Shorter codes seem not to benefit from RLE. */ + decideOverRLEUse(depth, new_length, &use_rle_for_non_zero, &use_rle_for_zero) + } + + /* Actual RLE coding. */ + for i = 0; i < new_length; { + var value byte = depth[i] + var reps uint = 1 + if (value != 0 && use_rle_for_non_zero) || (value == 0 && use_rle_for_zero) { + var k uint + for k = i + 1; k < new_length && depth[k] == value; k++ { + reps++ + } + } + + if value == 0 { + writeHuffmanTreeRepetitionsZeros(reps, tree_size, tree, extra_bits_data) + } else { + writeHuffmanTreeRepetitions(previous_value, value, reps, tree_size, tree, extra_bits_data) + previous_value = value + } + + i += reps + } +} + +var reverseBits_kLut = [16]uint{ + 0x00, + 0x08, + 0x04, + 0x0C, + 0x02, + 0x0A, + 0x06, + 0x0E, + 0x01, + 0x09, + 0x05, + 0x0D, + 0x03, + 0x0B, + 0x07, + 0x0F, +} + +func reverseBits(num_bits uint, bits uint16) uint16 { + var retval uint = reverseBits_kLut[bits&0x0F] + var i uint + for i = 4; i < num_bits; i += 4 { + retval <<= 4 + bits = uint16(bits >> 4) + retval |= reverseBits_kLut[bits&0x0F] + } + + retval >>= ((0 - num_bits) & 0x03) + return uint16(retval) +} + +/* 0..15 are values for bits */ +const maxHuffmanBits = 16 + +/* Get the actual bit values for a tree of bit depths. */ +func convertBitDepthsToSymbols(depth []byte, len uint, bits []uint16) { + var bl_count = [maxHuffmanBits]uint16{0} + var next_code [maxHuffmanBits]uint16 + var i uint + /* In Brotli, all bit depths are [1..15] + 0 bit depth means that the symbol does not exist. */ + + var code int = 0 + for i = 0; i < len; i++ { + bl_count[depth[i]]++ + } + + bl_count[0] = 0 + next_code[0] = 0 + for i = 1; i < maxHuffmanBits; i++ { + code = (code + int(bl_count[i-1])) << 1 + next_code[i] = uint16(code) + } + + for i = 0; i < len; i++ { + if depth[i] != 0 { + bits[i] = reverseBits(uint(depth[i]), next_code[depth[i]]) + next_code[depth[i]]++ + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go new file mode 100644 index 00000000000..294aff4f4e6 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go @@ -0,0 +1,4399 @@ +package brotli + +var kCodeLengthDepth = [18]byte{4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 0, 4, 4} + +var kStaticCommandCodeDepth = [numCommandSymbols]byte{ + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 9, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, + 11, +} + +var kStaticDistanceCodeDepth = [64]byte{ + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, + 6, +} + +var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7} + +func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) { + writeBits(40, 0x0000FF55555554, storage_ix, storage) +} + +func storeStaticCodeLengthCodeBW(bw *bitWriter) { + bw.writeBits(32, 0x55555554) + bw.writeBits(8, 0xFF) +} + +var kZeroRepsBits = [numCommandSymbols]uint64{ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000007, + 0x00000017, + 0x00000027, + 0x00000037, + 0x00000047, + 0x00000057, + 0x00000067, + 0x00000077, + 0x00000770, + 0x00000b87, + 0x00001387, + 0x00001b87, + 0x00002387, + 0x00002b87, + 0x00003387, + 0x00003b87, + 0x00000397, + 0x00000b97, + 0x00001397, + 0x00001b97, + 0x00002397, + 0x00002b97, + 0x00003397, + 0x00003b97, + 0x000003a7, + 0x00000ba7, + 0x000013a7, + 0x00001ba7, + 0x000023a7, + 0x00002ba7, + 0x000033a7, + 0x00003ba7, + 0x000003b7, + 0x00000bb7, + 0x000013b7, + 0x00001bb7, + 0x000023b7, + 0x00002bb7, + 0x000033b7, + 0x00003bb7, + 0x000003c7, + 0x00000bc7, + 0x000013c7, + 0x00001bc7, + 0x000023c7, + 0x00002bc7, + 0x000033c7, + 0x00003bc7, + 0x000003d7, + 0x00000bd7, + 0x000013d7, + 0x00001bd7, + 0x000023d7, + 0x00002bd7, + 0x000033d7, + 0x00003bd7, + 0x000003e7, + 0x00000be7, + 0x000013e7, + 0x00001be7, + 0x000023e7, + 0x00002be7, + 0x000033e7, + 0x00003be7, + 0x000003f7, + 0x00000bf7, + 0x000013f7, + 0x00001bf7, + 0x000023f7, + 0x00002bf7, + 0x000033f7, + 0x00003bf7, + 0x0001c387, + 0x0005c387, + 0x0009c387, + 0x000dc387, + 0x0011c387, + 0x0015c387, + 0x0019c387, + 0x001dc387, + 0x0001cb87, + 0x0005cb87, + 0x0009cb87, + 0x000dcb87, + 0x0011cb87, + 0x0015cb87, + 0x0019cb87, + 0x001dcb87, + 0x0001d387, + 0x0005d387, + 0x0009d387, + 0x000dd387, + 0x0011d387, + 0x0015d387, + 0x0019d387, + 0x001dd387, + 0x0001db87, + 0x0005db87, + 0x0009db87, + 0x000ddb87, + 0x0011db87, + 0x0015db87, + 0x0019db87, + 0x001ddb87, + 0x0001e387, + 0x0005e387, + 0x0009e387, + 0x000de387, + 0x0011e387, + 0x0015e387, + 0x0019e387, + 0x001de387, + 0x0001eb87, + 0x0005eb87, + 0x0009eb87, + 0x000deb87, + 0x0011eb87, + 0x0015eb87, + 0x0019eb87, + 0x001deb87, + 0x0001f387, + 0x0005f387, + 0x0009f387, + 0x000df387, + 0x0011f387, + 0x0015f387, + 0x0019f387, + 0x001df387, + 0x0001fb87, + 0x0005fb87, + 0x0009fb87, + 0x000dfb87, + 0x0011fb87, + 0x0015fb87, + 0x0019fb87, + 0x001dfb87, + 0x0001c397, + 0x0005c397, + 0x0009c397, + 0x000dc397, + 0x0011c397, + 0x0015c397, + 0x0019c397, + 0x001dc397, + 0x0001cb97, + 0x0005cb97, + 0x0009cb97, + 0x000dcb97, + 0x0011cb97, + 0x0015cb97, + 0x0019cb97, + 0x001dcb97, + 0x0001d397, + 0x0005d397, + 0x0009d397, + 0x000dd397, + 0x0011d397, + 0x0015d397, + 0x0019d397, + 0x001dd397, + 0x0001db97, + 0x0005db97, + 0x0009db97, + 0x000ddb97, + 0x0011db97, + 0x0015db97, + 0x0019db97, + 0x001ddb97, + 0x0001e397, + 0x0005e397, + 0x0009e397, + 0x000de397, + 0x0011e397, + 0x0015e397, + 0x0019e397, + 0x001de397, + 0x0001eb97, + 0x0005eb97, + 0x0009eb97, + 0x000deb97, + 0x0011eb97, + 0x0015eb97, + 0x0019eb97, + 0x001deb97, + 0x0001f397, + 0x0005f397, + 0x0009f397, + 0x000df397, + 0x0011f397, + 0x0015f397, + 0x0019f397, + 0x001df397, + 0x0001fb97, + 0x0005fb97, + 0x0009fb97, + 0x000dfb97, + 0x0011fb97, + 0x0015fb97, + 0x0019fb97, + 0x001dfb97, + 0x0001c3a7, + 0x0005c3a7, + 0x0009c3a7, + 0x000dc3a7, + 0x0011c3a7, + 0x0015c3a7, + 0x0019c3a7, + 0x001dc3a7, + 0x0001cba7, + 0x0005cba7, + 0x0009cba7, + 0x000dcba7, + 0x0011cba7, + 0x0015cba7, + 0x0019cba7, + 0x001dcba7, + 0x0001d3a7, + 0x0005d3a7, + 0x0009d3a7, + 0x000dd3a7, + 0x0011d3a7, + 0x0015d3a7, + 0x0019d3a7, + 0x001dd3a7, + 0x0001dba7, + 0x0005dba7, + 0x0009dba7, + 0x000ddba7, + 0x0011dba7, + 0x0015dba7, + 0x0019dba7, + 0x001ddba7, + 0x0001e3a7, + 0x0005e3a7, + 0x0009e3a7, + 0x000de3a7, + 0x0011e3a7, + 0x0015e3a7, + 0x0019e3a7, + 0x001de3a7, + 0x0001eba7, + 0x0005eba7, + 0x0009eba7, + 0x000deba7, + 0x0011eba7, + 0x0015eba7, + 0x0019eba7, + 0x001deba7, + 0x0001f3a7, + 0x0005f3a7, + 0x0009f3a7, + 0x000df3a7, + 0x0011f3a7, + 0x0015f3a7, + 0x0019f3a7, + 0x001df3a7, + 0x0001fba7, + 0x0005fba7, + 0x0009fba7, + 0x000dfba7, + 0x0011fba7, + 0x0015fba7, + 0x0019fba7, + 0x001dfba7, + 0x0001c3b7, + 0x0005c3b7, + 0x0009c3b7, + 0x000dc3b7, + 0x0011c3b7, + 0x0015c3b7, + 0x0019c3b7, + 0x001dc3b7, + 0x0001cbb7, + 0x0005cbb7, + 0x0009cbb7, + 0x000dcbb7, + 0x0011cbb7, + 0x0015cbb7, + 0x0019cbb7, + 0x001dcbb7, + 0x0001d3b7, + 0x0005d3b7, + 0x0009d3b7, + 0x000dd3b7, + 0x0011d3b7, + 0x0015d3b7, + 0x0019d3b7, + 0x001dd3b7, + 0x0001dbb7, + 0x0005dbb7, + 0x0009dbb7, + 0x000ddbb7, + 0x0011dbb7, + 0x0015dbb7, + 0x0019dbb7, + 0x001ddbb7, + 0x0001e3b7, + 0x0005e3b7, + 0x0009e3b7, + 0x000de3b7, + 0x0011e3b7, + 0x0015e3b7, + 0x0019e3b7, + 0x001de3b7, + 0x0001ebb7, + 0x0005ebb7, + 0x0009ebb7, + 0x000debb7, + 0x0011ebb7, + 0x0015ebb7, + 0x0019ebb7, + 0x001debb7, + 0x0001f3b7, + 0x0005f3b7, + 0x0009f3b7, + 0x000df3b7, + 0x0011f3b7, + 0x0015f3b7, + 0x0019f3b7, + 0x001df3b7, + 0x0001fbb7, + 0x0005fbb7, + 0x0009fbb7, + 0x000dfbb7, + 0x0011fbb7, + 0x0015fbb7, + 0x0019fbb7, + 0x001dfbb7, + 0x0001c3c7, + 0x0005c3c7, + 0x0009c3c7, + 0x000dc3c7, + 0x0011c3c7, + 0x0015c3c7, + 0x0019c3c7, + 0x001dc3c7, + 0x0001cbc7, + 0x0005cbc7, + 0x0009cbc7, + 0x000dcbc7, + 0x0011cbc7, + 0x0015cbc7, + 0x0019cbc7, + 0x001dcbc7, + 0x0001d3c7, + 0x0005d3c7, + 0x0009d3c7, + 0x000dd3c7, + 0x0011d3c7, + 0x0015d3c7, + 0x0019d3c7, + 0x001dd3c7, + 0x0001dbc7, + 0x0005dbc7, + 0x0009dbc7, + 0x000ddbc7, + 0x0011dbc7, + 0x0015dbc7, + 0x0019dbc7, + 0x001ddbc7, + 0x0001e3c7, + 0x0005e3c7, + 0x0009e3c7, + 0x000de3c7, + 0x0011e3c7, + 0x0015e3c7, + 0x0019e3c7, + 0x001de3c7, + 0x0001ebc7, + 0x0005ebc7, + 0x0009ebc7, + 0x000debc7, + 0x0011ebc7, + 0x0015ebc7, + 0x0019ebc7, + 0x001debc7, + 0x0001f3c7, + 0x0005f3c7, + 0x0009f3c7, + 0x000df3c7, + 0x0011f3c7, + 0x0015f3c7, + 0x0019f3c7, + 0x001df3c7, + 0x0001fbc7, + 0x0005fbc7, + 0x0009fbc7, + 0x000dfbc7, + 0x0011fbc7, + 0x0015fbc7, + 0x0019fbc7, + 0x001dfbc7, + 0x0001c3d7, + 0x0005c3d7, + 0x0009c3d7, + 0x000dc3d7, + 0x0011c3d7, + 0x0015c3d7, + 0x0019c3d7, + 0x001dc3d7, + 0x0001cbd7, + 0x0005cbd7, + 0x0009cbd7, + 0x000dcbd7, + 0x0011cbd7, + 0x0015cbd7, + 0x0019cbd7, + 0x001dcbd7, + 0x0001d3d7, + 0x0005d3d7, + 0x0009d3d7, + 0x000dd3d7, + 0x0011d3d7, + 0x0015d3d7, + 0x0019d3d7, + 0x001dd3d7, + 0x0001dbd7, + 0x0005dbd7, + 0x0009dbd7, + 0x000ddbd7, + 0x0011dbd7, + 0x0015dbd7, + 0x0019dbd7, + 0x001ddbd7, + 0x0001e3d7, + 0x0005e3d7, + 0x0009e3d7, + 0x000de3d7, + 0x0011e3d7, + 0x0015e3d7, + 0x0019e3d7, + 0x001de3d7, + 0x0001ebd7, + 0x0005ebd7, + 0x0009ebd7, + 0x000debd7, + 0x0011ebd7, + 0x0015ebd7, + 0x0019ebd7, + 0x001debd7, + 0x0001f3d7, + 0x0005f3d7, + 0x0009f3d7, + 0x000df3d7, + 0x0011f3d7, + 0x0015f3d7, + 0x0019f3d7, + 0x001df3d7, + 0x0001fbd7, + 0x0005fbd7, + 0x0009fbd7, + 0x000dfbd7, + 0x0011fbd7, + 0x0015fbd7, + 0x0019fbd7, + 0x001dfbd7, + 0x0001c3e7, + 0x0005c3e7, + 0x0009c3e7, + 0x000dc3e7, + 0x0011c3e7, + 0x0015c3e7, + 0x0019c3e7, + 0x001dc3e7, + 0x0001cbe7, + 0x0005cbe7, + 0x0009cbe7, + 0x000dcbe7, + 0x0011cbe7, + 0x0015cbe7, + 0x0019cbe7, + 0x001dcbe7, + 0x0001d3e7, + 0x0005d3e7, + 0x0009d3e7, + 0x000dd3e7, + 0x0011d3e7, + 0x0015d3e7, + 0x0019d3e7, + 0x001dd3e7, + 0x0001dbe7, + 0x0005dbe7, + 0x0009dbe7, + 0x000ddbe7, + 0x0011dbe7, + 0x0015dbe7, + 0x0019dbe7, + 0x001ddbe7, + 0x0001e3e7, + 0x0005e3e7, + 0x0009e3e7, + 0x000de3e7, + 0x0011e3e7, + 0x0015e3e7, + 0x0019e3e7, + 0x001de3e7, + 0x0001ebe7, + 0x0005ebe7, + 0x0009ebe7, + 0x000debe7, + 0x0011ebe7, + 0x0015ebe7, + 0x0019ebe7, + 0x001debe7, + 0x0001f3e7, + 0x0005f3e7, + 0x0009f3e7, + 0x000df3e7, + 0x0011f3e7, + 0x0015f3e7, + 0x0019f3e7, + 0x001df3e7, + 0x0001fbe7, + 0x0005fbe7, + 0x0009fbe7, + 0x000dfbe7, + 0x0011fbe7, + 0x0015fbe7, + 0x0019fbe7, + 0x001dfbe7, + 0x0001c3f7, + 0x0005c3f7, + 0x0009c3f7, + 0x000dc3f7, + 0x0011c3f7, + 0x0015c3f7, + 0x0019c3f7, + 0x001dc3f7, + 0x0001cbf7, + 0x0005cbf7, + 0x0009cbf7, + 0x000dcbf7, + 0x0011cbf7, + 0x0015cbf7, + 0x0019cbf7, + 0x001dcbf7, + 0x0001d3f7, + 0x0005d3f7, + 0x0009d3f7, + 0x000dd3f7, + 0x0011d3f7, + 0x0015d3f7, + 0x0019d3f7, + 0x001dd3f7, + 0x0001dbf7, + 0x0005dbf7, + 0x0009dbf7, + 0x000ddbf7, + 0x0011dbf7, + 0x0015dbf7, + 0x0019dbf7, + 0x001ddbf7, + 0x0001e3f7, + 0x0005e3f7, + 0x0009e3f7, + 0x000de3f7, + 0x0011e3f7, + 0x0015e3f7, + 0x0019e3f7, + 0x001de3f7, + 0x0001ebf7, + 0x0005ebf7, + 0x0009ebf7, + 0x000debf7, + 0x0011ebf7, + 0x0015ebf7, + 0x0019ebf7, + 0x001debf7, + 0x0001f3f7, + 0x0005f3f7, + 0x0009f3f7, + 0x000df3f7, + 0x0011f3f7, + 0x0015f3f7, + 0x0019f3f7, + 0x001df3f7, + 0x0001fbf7, + 0x0005fbf7, + 0x0009fbf7, + 0x000dfbf7, + 0x0011fbf7, + 0x0015fbf7, + 0x0019fbf7, + 0x001dfbf7, + 0x00e1c387, + 0x02e1c387, + 0x04e1c387, + 0x06e1c387, + 0x08e1c387, + 0x0ae1c387, + 0x0ce1c387, + 0x0ee1c387, + 0x00e5c387, + 0x02e5c387, + 0x04e5c387, + 0x06e5c387, + 0x08e5c387, + 0x0ae5c387, + 0x0ce5c387, + 0x0ee5c387, + 0x00e9c387, + 0x02e9c387, + 0x04e9c387, + 0x06e9c387, + 0x08e9c387, + 0x0ae9c387, + 0x0ce9c387, + 0x0ee9c387, + 0x00edc387, + 0x02edc387, + 0x04edc387, + 0x06edc387, + 0x08edc387, + 0x0aedc387, + 0x0cedc387, + 0x0eedc387, + 0x00f1c387, + 0x02f1c387, + 0x04f1c387, + 0x06f1c387, + 0x08f1c387, + 0x0af1c387, + 0x0cf1c387, + 0x0ef1c387, + 0x00f5c387, + 0x02f5c387, + 0x04f5c387, + 0x06f5c387, + 0x08f5c387, + 0x0af5c387, + 0x0cf5c387, + 0x0ef5c387, + 0x00f9c387, + 0x02f9c387, + 0x04f9c387, + 0x06f9c387, + 0x08f9c387, + 0x0af9c387, + 0x0cf9c387, + 0x0ef9c387, + 0x00fdc387, + 0x02fdc387, + 0x04fdc387, + 0x06fdc387, + 0x08fdc387, + 0x0afdc387, + 0x0cfdc387, + 0x0efdc387, + 0x00e1cb87, + 0x02e1cb87, + 0x04e1cb87, + 0x06e1cb87, + 0x08e1cb87, + 0x0ae1cb87, + 0x0ce1cb87, + 0x0ee1cb87, + 0x00e5cb87, + 0x02e5cb87, + 0x04e5cb87, + 0x06e5cb87, + 0x08e5cb87, + 0x0ae5cb87, + 0x0ce5cb87, + 0x0ee5cb87, + 0x00e9cb87, + 0x02e9cb87, + 0x04e9cb87, + 0x06e9cb87, + 0x08e9cb87, + 0x0ae9cb87, + 0x0ce9cb87, + 0x0ee9cb87, + 0x00edcb87, + 0x02edcb87, + 0x04edcb87, + 0x06edcb87, + 0x08edcb87, + 0x0aedcb87, + 0x0cedcb87, + 0x0eedcb87, + 0x00f1cb87, + 0x02f1cb87, + 0x04f1cb87, + 0x06f1cb87, + 0x08f1cb87, + 0x0af1cb87, + 0x0cf1cb87, + 0x0ef1cb87, + 0x00f5cb87, + 0x02f5cb87, + 0x04f5cb87, + 0x06f5cb87, + 0x08f5cb87, + 0x0af5cb87, + 0x0cf5cb87, + 0x0ef5cb87, + 0x00f9cb87, + 0x02f9cb87, + 0x04f9cb87, + 0x06f9cb87, + 0x08f9cb87, +} + +var kZeroRepsDepth = [numCommandSymbols]uint32{ + 0, + 4, + 8, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 7, + 11, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 14, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 21, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, + 28, +} + +var kNonZeroRepsBits = [numCommandSymbols]uint64{ + 0x0000000b, + 0x0000001b, + 0x0000002b, + 0x0000003b, + 0x000002cb, + 0x000006cb, + 0x00000acb, + 0x00000ecb, + 0x000002db, + 0x000006db, + 0x00000adb, + 0x00000edb, + 0x000002eb, + 0x000006eb, + 0x00000aeb, + 0x00000eeb, + 0x000002fb, + 0x000006fb, + 0x00000afb, + 0x00000efb, + 0x0000b2cb, + 0x0001b2cb, + 0x0002b2cb, + 0x0003b2cb, + 0x0000b6cb, + 0x0001b6cb, + 0x0002b6cb, + 0x0003b6cb, + 0x0000bacb, + 0x0001bacb, + 0x0002bacb, + 0x0003bacb, + 0x0000becb, + 0x0001becb, + 0x0002becb, + 0x0003becb, + 0x0000b2db, + 0x0001b2db, + 0x0002b2db, + 0x0003b2db, + 0x0000b6db, + 0x0001b6db, + 0x0002b6db, + 0x0003b6db, + 0x0000badb, + 0x0001badb, + 0x0002badb, + 0x0003badb, + 0x0000bedb, + 0x0001bedb, + 0x0002bedb, + 0x0003bedb, + 0x0000b2eb, + 0x0001b2eb, + 0x0002b2eb, + 0x0003b2eb, + 0x0000b6eb, + 0x0001b6eb, + 0x0002b6eb, + 0x0003b6eb, + 0x0000baeb, + 0x0001baeb, + 0x0002baeb, + 0x0003baeb, + 0x0000beeb, + 0x0001beeb, + 0x0002beeb, + 0x0003beeb, + 0x0000b2fb, + 0x0001b2fb, + 0x0002b2fb, + 0x0003b2fb, + 0x0000b6fb, + 0x0001b6fb, + 0x0002b6fb, + 0x0003b6fb, + 0x0000bafb, + 0x0001bafb, + 0x0002bafb, + 0x0003bafb, + 0x0000befb, + 0x0001befb, + 0x0002befb, + 0x0003befb, + 0x002cb2cb, + 0x006cb2cb, + 0x00acb2cb, + 0x00ecb2cb, + 0x002db2cb, + 0x006db2cb, + 0x00adb2cb, + 0x00edb2cb, + 0x002eb2cb, + 0x006eb2cb, + 0x00aeb2cb, + 0x00eeb2cb, + 0x002fb2cb, + 0x006fb2cb, + 0x00afb2cb, + 0x00efb2cb, + 0x002cb6cb, + 0x006cb6cb, + 0x00acb6cb, + 0x00ecb6cb, + 0x002db6cb, + 0x006db6cb, + 0x00adb6cb, + 0x00edb6cb, + 0x002eb6cb, + 0x006eb6cb, + 0x00aeb6cb, + 0x00eeb6cb, + 0x002fb6cb, + 0x006fb6cb, + 0x00afb6cb, + 0x00efb6cb, + 0x002cbacb, + 0x006cbacb, + 0x00acbacb, + 0x00ecbacb, + 0x002dbacb, + 0x006dbacb, + 0x00adbacb, + 0x00edbacb, + 0x002ebacb, + 0x006ebacb, + 0x00aebacb, + 0x00eebacb, + 0x002fbacb, + 0x006fbacb, + 0x00afbacb, + 0x00efbacb, + 0x002cbecb, + 0x006cbecb, + 0x00acbecb, + 0x00ecbecb, + 0x002dbecb, + 0x006dbecb, + 0x00adbecb, + 0x00edbecb, + 0x002ebecb, + 0x006ebecb, + 0x00aebecb, + 0x00eebecb, + 0x002fbecb, + 0x006fbecb, + 0x00afbecb, + 0x00efbecb, + 0x002cb2db, + 0x006cb2db, + 0x00acb2db, + 0x00ecb2db, + 0x002db2db, + 0x006db2db, + 0x00adb2db, + 0x00edb2db, + 0x002eb2db, + 0x006eb2db, + 0x00aeb2db, + 0x00eeb2db, + 0x002fb2db, + 0x006fb2db, + 0x00afb2db, + 0x00efb2db, + 0x002cb6db, + 0x006cb6db, + 0x00acb6db, + 0x00ecb6db, + 0x002db6db, + 0x006db6db, + 0x00adb6db, + 0x00edb6db, + 0x002eb6db, + 0x006eb6db, + 0x00aeb6db, + 0x00eeb6db, + 0x002fb6db, + 0x006fb6db, + 0x00afb6db, + 0x00efb6db, + 0x002cbadb, + 0x006cbadb, + 0x00acbadb, + 0x00ecbadb, + 0x002dbadb, + 0x006dbadb, + 0x00adbadb, + 0x00edbadb, + 0x002ebadb, + 0x006ebadb, + 0x00aebadb, + 0x00eebadb, + 0x002fbadb, + 0x006fbadb, + 0x00afbadb, + 0x00efbadb, + 0x002cbedb, + 0x006cbedb, + 0x00acbedb, + 0x00ecbedb, + 0x002dbedb, + 0x006dbedb, + 0x00adbedb, + 0x00edbedb, + 0x002ebedb, + 0x006ebedb, + 0x00aebedb, + 0x00eebedb, + 0x002fbedb, + 0x006fbedb, + 0x00afbedb, + 0x00efbedb, + 0x002cb2eb, + 0x006cb2eb, + 0x00acb2eb, + 0x00ecb2eb, + 0x002db2eb, + 0x006db2eb, + 0x00adb2eb, + 0x00edb2eb, + 0x002eb2eb, + 0x006eb2eb, + 0x00aeb2eb, + 0x00eeb2eb, + 0x002fb2eb, + 0x006fb2eb, + 0x00afb2eb, + 0x00efb2eb, + 0x002cb6eb, + 0x006cb6eb, + 0x00acb6eb, + 0x00ecb6eb, + 0x002db6eb, + 0x006db6eb, + 0x00adb6eb, + 0x00edb6eb, + 0x002eb6eb, + 0x006eb6eb, + 0x00aeb6eb, + 0x00eeb6eb, + 0x002fb6eb, + 0x006fb6eb, + 0x00afb6eb, + 0x00efb6eb, + 0x002cbaeb, + 0x006cbaeb, + 0x00acbaeb, + 0x00ecbaeb, + 0x002dbaeb, + 0x006dbaeb, + 0x00adbaeb, + 0x00edbaeb, + 0x002ebaeb, + 0x006ebaeb, + 0x00aebaeb, + 0x00eebaeb, + 0x002fbaeb, + 0x006fbaeb, + 0x00afbaeb, + 0x00efbaeb, + 0x002cbeeb, + 0x006cbeeb, + 0x00acbeeb, + 0x00ecbeeb, + 0x002dbeeb, + 0x006dbeeb, + 0x00adbeeb, + 0x00edbeeb, + 0x002ebeeb, + 0x006ebeeb, + 0x00aebeeb, + 0x00eebeeb, + 0x002fbeeb, + 0x006fbeeb, + 0x00afbeeb, + 0x00efbeeb, + 0x002cb2fb, + 0x006cb2fb, + 0x00acb2fb, + 0x00ecb2fb, + 0x002db2fb, + 0x006db2fb, + 0x00adb2fb, + 0x00edb2fb, + 0x002eb2fb, + 0x006eb2fb, + 0x00aeb2fb, + 0x00eeb2fb, + 0x002fb2fb, + 0x006fb2fb, + 0x00afb2fb, + 0x00efb2fb, + 0x002cb6fb, + 0x006cb6fb, + 0x00acb6fb, + 0x00ecb6fb, + 0x002db6fb, + 0x006db6fb, + 0x00adb6fb, + 0x00edb6fb, + 0x002eb6fb, + 0x006eb6fb, + 0x00aeb6fb, + 0x00eeb6fb, + 0x002fb6fb, + 0x006fb6fb, + 0x00afb6fb, + 0x00efb6fb, + 0x002cbafb, + 0x006cbafb, + 0x00acbafb, + 0x00ecbafb, + 0x002dbafb, + 0x006dbafb, + 0x00adbafb, + 0x00edbafb, + 0x002ebafb, + 0x006ebafb, + 0x00aebafb, + 0x00eebafb, + 0x002fbafb, + 0x006fbafb, + 0x00afbafb, + 0x00efbafb, + 0x002cbefb, + 0x006cbefb, + 0x00acbefb, + 0x00ecbefb, + 0x002dbefb, + 0x006dbefb, + 0x00adbefb, + 0x00edbefb, + 0x002ebefb, + 0x006ebefb, + 0x00aebefb, + 0x00eebefb, + 0x002fbefb, + 0x006fbefb, + 0x00afbefb, + 0x00efbefb, + 0x0b2cb2cb, + 0x1b2cb2cb, + 0x2b2cb2cb, + 0x3b2cb2cb, + 0x0b6cb2cb, + 0x1b6cb2cb, + 0x2b6cb2cb, + 0x3b6cb2cb, + 0x0bacb2cb, + 0x1bacb2cb, + 0x2bacb2cb, + 0x3bacb2cb, + 0x0becb2cb, + 0x1becb2cb, + 0x2becb2cb, + 0x3becb2cb, + 0x0b2db2cb, + 0x1b2db2cb, + 0x2b2db2cb, + 0x3b2db2cb, + 0x0b6db2cb, + 0x1b6db2cb, + 0x2b6db2cb, + 0x3b6db2cb, + 0x0badb2cb, + 0x1badb2cb, + 0x2badb2cb, + 0x3badb2cb, + 0x0bedb2cb, + 0x1bedb2cb, + 0x2bedb2cb, + 0x3bedb2cb, + 0x0b2eb2cb, + 0x1b2eb2cb, + 0x2b2eb2cb, + 0x3b2eb2cb, + 0x0b6eb2cb, + 0x1b6eb2cb, + 0x2b6eb2cb, + 0x3b6eb2cb, + 0x0baeb2cb, + 0x1baeb2cb, + 0x2baeb2cb, + 0x3baeb2cb, + 0x0beeb2cb, + 0x1beeb2cb, + 0x2beeb2cb, + 0x3beeb2cb, + 0x0b2fb2cb, + 0x1b2fb2cb, + 0x2b2fb2cb, + 0x3b2fb2cb, + 0x0b6fb2cb, + 0x1b6fb2cb, + 0x2b6fb2cb, + 0x3b6fb2cb, + 0x0bafb2cb, + 0x1bafb2cb, + 0x2bafb2cb, + 0x3bafb2cb, + 0x0befb2cb, + 0x1befb2cb, + 0x2befb2cb, + 0x3befb2cb, + 0x0b2cb6cb, + 0x1b2cb6cb, + 0x2b2cb6cb, + 0x3b2cb6cb, + 0x0b6cb6cb, + 0x1b6cb6cb, + 0x2b6cb6cb, + 0x3b6cb6cb, + 0x0bacb6cb, + 0x1bacb6cb, + 0x2bacb6cb, + 0x3bacb6cb, + 0x0becb6cb, + 0x1becb6cb, + 0x2becb6cb, + 0x3becb6cb, + 0x0b2db6cb, + 0x1b2db6cb, + 0x2b2db6cb, + 0x3b2db6cb, + 0x0b6db6cb, + 0x1b6db6cb, + 0x2b6db6cb, + 0x3b6db6cb, + 0x0badb6cb, + 0x1badb6cb, + 0x2badb6cb, + 0x3badb6cb, + 0x0bedb6cb, + 0x1bedb6cb, + 0x2bedb6cb, + 0x3bedb6cb, + 0x0b2eb6cb, + 0x1b2eb6cb, + 0x2b2eb6cb, + 0x3b2eb6cb, + 0x0b6eb6cb, + 0x1b6eb6cb, + 0x2b6eb6cb, + 0x3b6eb6cb, + 0x0baeb6cb, + 0x1baeb6cb, + 0x2baeb6cb, + 0x3baeb6cb, + 0x0beeb6cb, + 0x1beeb6cb, + 0x2beeb6cb, + 0x3beeb6cb, + 0x0b2fb6cb, + 0x1b2fb6cb, + 0x2b2fb6cb, + 0x3b2fb6cb, + 0x0b6fb6cb, + 0x1b6fb6cb, + 0x2b6fb6cb, + 0x3b6fb6cb, + 0x0bafb6cb, + 0x1bafb6cb, + 0x2bafb6cb, + 0x3bafb6cb, + 0x0befb6cb, + 0x1befb6cb, + 0x2befb6cb, + 0x3befb6cb, + 0x0b2cbacb, + 0x1b2cbacb, + 0x2b2cbacb, + 0x3b2cbacb, + 0x0b6cbacb, + 0x1b6cbacb, + 0x2b6cbacb, + 0x3b6cbacb, + 0x0bacbacb, + 0x1bacbacb, + 0x2bacbacb, + 0x3bacbacb, + 0x0becbacb, + 0x1becbacb, + 0x2becbacb, + 0x3becbacb, + 0x0b2dbacb, + 0x1b2dbacb, + 0x2b2dbacb, + 0x3b2dbacb, + 0x0b6dbacb, + 0x1b6dbacb, + 0x2b6dbacb, + 0x3b6dbacb, + 0x0badbacb, + 0x1badbacb, + 0x2badbacb, + 0x3badbacb, + 0x0bedbacb, + 0x1bedbacb, + 0x2bedbacb, + 0x3bedbacb, + 0x0b2ebacb, + 0x1b2ebacb, + 0x2b2ebacb, + 0x3b2ebacb, + 0x0b6ebacb, + 0x1b6ebacb, + 0x2b6ebacb, + 0x3b6ebacb, + 0x0baebacb, + 0x1baebacb, + 0x2baebacb, + 0x3baebacb, + 0x0beebacb, + 0x1beebacb, + 0x2beebacb, + 0x3beebacb, + 0x0b2fbacb, + 0x1b2fbacb, + 0x2b2fbacb, + 0x3b2fbacb, + 0x0b6fbacb, + 0x1b6fbacb, + 0x2b6fbacb, + 0x3b6fbacb, + 0x0bafbacb, + 0x1bafbacb, + 0x2bafbacb, + 0x3bafbacb, + 0x0befbacb, + 0x1befbacb, + 0x2befbacb, + 0x3befbacb, + 0x0b2cbecb, + 0x1b2cbecb, + 0x2b2cbecb, + 0x3b2cbecb, + 0x0b6cbecb, + 0x1b6cbecb, + 0x2b6cbecb, + 0x3b6cbecb, + 0x0bacbecb, + 0x1bacbecb, + 0x2bacbecb, + 0x3bacbecb, + 0x0becbecb, + 0x1becbecb, + 0x2becbecb, + 0x3becbecb, + 0x0b2dbecb, + 0x1b2dbecb, + 0x2b2dbecb, + 0x3b2dbecb, + 0x0b6dbecb, + 0x1b6dbecb, + 0x2b6dbecb, + 0x3b6dbecb, + 0x0badbecb, + 0x1badbecb, + 0x2badbecb, + 0x3badbecb, + 0x0bedbecb, + 0x1bedbecb, + 0x2bedbecb, + 0x3bedbecb, + 0x0b2ebecb, + 0x1b2ebecb, + 0x2b2ebecb, + 0x3b2ebecb, + 0x0b6ebecb, + 0x1b6ebecb, + 0x2b6ebecb, + 0x3b6ebecb, + 0x0baebecb, + 0x1baebecb, + 0x2baebecb, + 0x3baebecb, + 0x0beebecb, + 0x1beebecb, + 0x2beebecb, + 0x3beebecb, + 0x0b2fbecb, + 0x1b2fbecb, + 0x2b2fbecb, + 0x3b2fbecb, + 0x0b6fbecb, + 0x1b6fbecb, + 0x2b6fbecb, + 0x3b6fbecb, + 0x0bafbecb, + 0x1bafbecb, + 0x2bafbecb, + 0x3bafbecb, + 0x0befbecb, + 0x1befbecb, + 0x2befbecb, + 0x3befbecb, + 0x0b2cb2db, + 0x1b2cb2db, + 0x2b2cb2db, + 0x3b2cb2db, + 0x0b6cb2db, + 0x1b6cb2db, + 0x2b6cb2db, + 0x3b6cb2db, + 0x0bacb2db, + 0x1bacb2db, + 0x2bacb2db, + 0x3bacb2db, + 0x0becb2db, + 0x1becb2db, + 0x2becb2db, + 0x3becb2db, + 0x0b2db2db, + 0x1b2db2db, + 0x2b2db2db, + 0x3b2db2db, + 0x0b6db2db, + 0x1b6db2db, + 0x2b6db2db, + 0x3b6db2db, + 0x0badb2db, + 0x1badb2db, + 0x2badb2db, + 0x3badb2db, + 0x0bedb2db, + 0x1bedb2db, + 0x2bedb2db, + 0x3bedb2db, + 0x0b2eb2db, + 0x1b2eb2db, + 0x2b2eb2db, + 0x3b2eb2db, + 0x0b6eb2db, + 0x1b6eb2db, + 0x2b6eb2db, + 0x3b6eb2db, + 0x0baeb2db, + 0x1baeb2db, + 0x2baeb2db, + 0x3baeb2db, + 0x0beeb2db, + 0x1beeb2db, + 0x2beeb2db, + 0x3beeb2db, + 0x0b2fb2db, + 0x1b2fb2db, + 0x2b2fb2db, + 0x3b2fb2db, + 0x0b6fb2db, + 0x1b6fb2db, + 0x2b6fb2db, + 0x3b6fb2db, + 0x0bafb2db, + 0x1bafb2db, + 0x2bafb2db, + 0x3bafb2db, + 0x0befb2db, + 0x1befb2db, + 0x2befb2db, + 0x3befb2db, + 0x0b2cb6db, + 0x1b2cb6db, + 0x2b2cb6db, + 0x3b2cb6db, + 0x0b6cb6db, + 0x1b6cb6db, + 0x2b6cb6db, + 0x3b6cb6db, + 0x0bacb6db, + 0x1bacb6db, + 0x2bacb6db, + 0x3bacb6db, + 0x0becb6db, + 0x1becb6db, + 0x2becb6db, + 0x3becb6db, + 0x0b2db6db, + 0x1b2db6db, + 0x2b2db6db, + 0x3b2db6db, + 0x0b6db6db, + 0x1b6db6db, + 0x2b6db6db, + 0x3b6db6db, + 0x0badb6db, + 0x1badb6db, + 0x2badb6db, + 0x3badb6db, + 0x0bedb6db, + 0x1bedb6db, + 0x2bedb6db, + 0x3bedb6db, + 0x0b2eb6db, + 0x1b2eb6db, + 0x2b2eb6db, + 0x3b2eb6db, + 0x0b6eb6db, + 0x1b6eb6db, + 0x2b6eb6db, + 0x3b6eb6db, + 0x0baeb6db, + 0x1baeb6db, + 0x2baeb6db, + 0x3baeb6db, +} + +var kNonZeroRepsDepth = [numCommandSymbols]uint32{ + 6, + 6, + 6, + 6, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 18, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, + 30, +} + +var kStaticCommandCodeBits = [numCommandSymbols]uint16{ + 0, + 256, + 128, + 384, + 64, + 320, + 192, + 448, + 32, + 288, + 160, + 416, + 96, + 352, + 224, + 480, + 16, + 272, + 144, + 400, + 80, + 336, + 208, + 464, + 48, + 304, + 176, + 432, + 112, + 368, + 240, + 496, + 8, + 264, + 136, + 392, + 72, + 328, + 200, + 456, + 40, + 296, + 168, + 424, + 104, + 360, + 232, + 488, + 24, + 280, + 152, + 408, + 88, + 344, + 216, + 472, + 56, + 312, + 184, + 440, + 120, + 376, + 248, + 504, + 4, + 260, + 132, + 388, + 68, + 324, + 196, + 452, + 36, + 292, + 164, + 420, + 100, + 356, + 228, + 484, + 20, + 276, + 148, + 404, + 84, + 340, + 212, + 468, + 52, + 308, + 180, + 436, + 116, + 372, + 244, + 500, + 12, + 268, + 140, + 396, + 76, + 332, + 204, + 460, + 44, + 300, + 172, + 428, + 108, + 364, + 236, + 492, + 28, + 284, + 156, + 412, + 92, + 348, + 220, + 476, + 60, + 316, + 188, + 444, + 124, + 380, + 252, + 508, + 2, + 258, + 130, + 386, + 66, + 322, + 194, + 450, + 34, + 290, + 162, + 418, + 98, + 354, + 226, + 482, + 18, + 274, + 146, + 402, + 82, + 338, + 210, + 466, + 50, + 306, + 178, + 434, + 114, + 370, + 242, + 498, + 10, + 266, + 138, + 394, + 74, + 330, + 202, + 458, + 42, + 298, + 170, + 426, + 106, + 362, + 234, + 490, + 26, + 282, + 154, + 410, + 90, + 346, + 218, + 474, + 58, + 314, + 186, + 442, + 122, + 378, + 250, + 506, + 6, + 262, + 134, + 390, + 70, + 326, + 198, + 454, + 38, + 294, + 166, + 422, + 102, + 358, + 230, + 486, + 22, + 278, + 150, + 406, + 86, + 342, + 214, + 470, + 54, + 310, + 182, + 438, + 118, + 374, + 246, + 502, + 14, + 270, + 142, + 398, + 78, + 334, + 206, + 462, + 46, + 302, + 174, + 430, + 110, + 366, + 238, + 494, + 30, + 286, + 158, + 414, + 94, + 350, + 222, + 478, + 62, + 318, + 190, + 446, + 126, + 382, + 254, + 510, + 1, + 257, + 129, + 385, + 65, + 321, + 193, + 449, + 33, + 289, + 161, + 417, + 97, + 353, + 225, + 481, + 17, + 273, + 145, + 401, + 81, + 337, + 209, + 465, + 49, + 305, + 177, + 433, + 113, + 369, + 241, + 497, + 9, + 265, + 137, + 393, + 73, + 329, + 201, + 457, + 41, + 297, + 169, + 425, + 105, + 361, + 233, + 489, + 25, + 281, + 153, + 409, + 89, + 345, + 217, + 473, + 57, + 313, + 185, + 441, + 121, + 377, + 249, + 505, + 5, + 261, + 133, + 389, + 69, + 325, + 197, + 453, + 37, + 293, + 165, + 421, + 101, + 357, + 229, + 485, + 21, + 277, + 149, + 405, + 85, + 341, + 213, + 469, + 53, + 309, + 181, + 437, + 117, + 373, + 245, + 501, + 13, + 269, + 141, + 397, + 77, + 333, + 205, + 461, + 45, + 301, + 173, + 429, + 109, + 365, + 237, + 493, + 29, + 285, + 157, + 413, + 93, + 349, + 221, + 477, + 61, + 317, + 189, + 445, + 125, + 381, + 253, + 509, + 3, + 259, + 131, + 387, + 67, + 323, + 195, + 451, + 35, + 291, + 163, + 419, + 99, + 355, + 227, + 483, + 19, + 275, + 147, + 403, + 83, + 339, + 211, + 467, + 51, + 307, + 179, + 435, + 115, + 371, + 243, + 499, + 11, + 267, + 139, + 395, + 75, + 331, + 203, + 459, + 43, + 299, + 171, + 427, + 107, + 363, + 235, + 491, + 27, + 283, + 155, + 411, + 91, + 347, + 219, + 475, + 59, + 315, + 187, + 443, + 123, + 379, + 251, + 507, + 7, + 1031, + 519, + 1543, + 263, + 1287, + 775, + 1799, + 135, + 1159, + 647, + 1671, + 391, + 1415, + 903, + 1927, + 71, + 1095, + 583, + 1607, + 327, + 1351, + 839, + 1863, + 199, + 1223, + 711, + 1735, + 455, + 1479, + 967, + 1991, + 39, + 1063, + 551, + 1575, + 295, + 1319, + 807, + 1831, + 167, + 1191, + 679, + 1703, + 423, + 1447, + 935, + 1959, + 103, + 1127, + 615, + 1639, + 359, + 1383, + 871, + 1895, + 231, + 1255, + 743, + 1767, + 487, + 1511, + 999, + 2023, + 23, + 1047, + 535, + 1559, + 279, + 1303, + 791, + 1815, + 151, + 1175, + 663, + 1687, + 407, + 1431, + 919, + 1943, + 87, + 1111, + 599, + 1623, + 343, + 1367, + 855, + 1879, + 215, + 1239, + 727, + 1751, + 471, + 1495, + 983, + 2007, + 55, + 1079, + 567, + 1591, + 311, + 1335, + 823, + 1847, + 183, + 1207, + 695, + 1719, + 439, + 1463, + 951, + 1975, + 119, + 1143, + 631, + 1655, + 375, + 1399, + 887, + 1911, + 247, + 1271, + 759, + 1783, + 503, + 1527, + 1015, + 2039, + 15, + 1039, + 527, + 1551, + 271, + 1295, + 783, + 1807, + 143, + 1167, + 655, + 1679, + 399, + 1423, + 911, + 1935, + 79, + 1103, + 591, + 1615, + 335, + 1359, + 847, + 1871, + 207, + 1231, + 719, + 1743, + 463, + 1487, + 975, + 1999, + 47, + 1071, + 559, + 1583, + 303, + 1327, + 815, + 1839, + 175, + 1199, + 687, + 1711, + 431, + 1455, + 943, + 1967, + 111, + 1135, + 623, + 1647, + 367, + 1391, + 879, + 1903, + 239, + 1263, + 751, + 1775, + 495, + 1519, + 1007, + 2031, + 31, + 1055, + 543, + 1567, + 287, + 1311, + 799, + 1823, + 159, + 1183, + 671, + 1695, + 415, + 1439, + 927, + 1951, + 95, + 1119, + 607, + 1631, + 351, + 1375, + 863, + 1887, + 223, + 1247, + 735, + 1759, + 479, + 1503, + 991, + 2015, + 63, + 1087, + 575, + 1599, + 319, + 1343, + 831, + 1855, + 191, + 1215, + 703, + 1727, + 447, + 1471, + 959, + 1983, + 127, + 1151, + 639, + 1663, + 383, + 1407, + 895, + 1919, + 255, + 1279, + 767, + 1791, + 511, + 1535, + 1023, + 2047, +} + +func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(56, 0x92624416307003, storage_ix, storage) + writeBits(3, 0x00000000, storage_ix, storage) +} + +var kStaticDistanceCodeBits = [64]uint16{ + 0, + 32, + 16, + 48, + 8, + 40, + 24, + 56, + 4, + 36, + 20, + 52, + 12, + 44, + 28, + 60, + 2, + 34, + 18, + 50, + 10, + 42, + 26, + 58, + 6, + 38, + 22, + 54, + 14, + 46, + 30, + 62, + 1, + 33, + 17, + 49, + 9, + 41, + 25, + 57, + 5, + 37, + 21, + 53, + 13, + 45, + 29, + 61, + 3, + 35, + 19, + 51, + 11, + 43, + 27, + 59, + 7, + 39, + 23, + 55, + 15, + 47, + 31, + 63, +} + +func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) { + writeBits(28, 0x0369DC03, storage_ix, storage) +} diff --git a/vendor/github.com/andybalholm/brotli/fast_log.go b/vendor/github.com/andybalholm/brotli/fast_log.go new file mode 100644 index 00000000000..9d6607f7e2f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/fast_log.go @@ -0,0 +1,290 @@ +package brotli + +import ( + "math" + "math/bits" +) + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for fast computation of logarithms. */ + +func log2FloorNonZero(n uint) uint32 { + return uint32(bits.Len(n)) - 1 +} + +/* A lookup table for small values of log2(int) to be used in entropy + computation. + + ", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */ +var kLog2Table = []float32{ + 0.0000000000000000, + 0.0000000000000000, + 1.0000000000000000, + 1.5849625007211563, + 2.0000000000000000, + 2.3219280948873622, + 2.5849625007211561, + 2.8073549220576042, + 3.0000000000000000, + 3.1699250014423126, + 3.3219280948873626, + 3.4594316186372978, + 3.5849625007211565, + 3.7004397181410922, + 3.8073549220576037, + 3.9068905956085187, + 4.0000000000000000, + 4.0874628412503400, + 4.1699250014423122, + 4.2479275134435852, + 4.3219280948873626, + 4.3923174227787607, + 4.4594316186372973, + 4.5235619560570131, + 4.5849625007211570, + 4.6438561897747244, + 4.7004397181410926, + 4.7548875021634691, + 4.8073549220576037, + 4.8579809951275728, + 4.9068905956085187, + 4.9541963103868758, + 5.0000000000000000, + 5.0443941193584534, + 5.0874628412503400, + 5.1292830169449664, + 5.1699250014423122, + 5.2094533656289501, + 5.2479275134435852, + 5.2854022188622487, + 5.3219280948873626, + 5.3575520046180838, + 5.3923174227787607, + 5.4262647547020979, + 5.4594316186372973, + 5.4918530963296748, + 5.5235619560570131, + 5.5545888516776376, + 5.5849625007211570, + 5.6147098441152083, + 5.6438561897747244, + 5.6724253419714961, + 5.7004397181410926, + 5.7279204545631996, + 5.7548875021634691, + 5.7813597135246599, + 5.8073549220576046, + 5.8328900141647422, + 5.8579809951275719, + 5.8826430493618416, + 5.9068905956085187, + 5.9307373375628867, + 5.9541963103868758, + 5.9772799234999168, + 6.0000000000000000, + 6.0223678130284544, + 6.0443941193584534, + 6.0660891904577721, + 6.0874628412503400, + 6.1085244567781700, + 6.1292830169449672, + 6.1497471195046822, + 6.1699250014423122, + 6.1898245588800176, + 6.2094533656289510, + 6.2288186904958804, + 6.2479275134435861, + 6.2667865406949019, + 6.2854022188622487, + 6.3037807481771031, + 6.3219280948873617, + 6.3398500028846252, + 6.3575520046180847, + 6.3750394313469254, + 6.3923174227787598, + 6.4093909361377026, + 6.4262647547020979, + 6.4429434958487288, + 6.4594316186372982, + 6.4757334309663976, + 6.4918530963296748, + 6.5077946401986964, + 6.5235619560570131, + 6.5391588111080319, + 6.5545888516776376, + 6.5698556083309478, + 6.5849625007211561, + 6.5999128421871278, + 6.6147098441152092, + 6.6293566200796095, + 6.6438561897747253, + 6.6582114827517955, + 6.6724253419714952, + 6.6865005271832185, + 6.7004397181410917, + 6.7142455176661224, + 6.7279204545631988, + 6.7414669864011465, + 6.7548875021634691, + 6.7681843247769260, + 6.7813597135246599, + 6.7944158663501062, + 6.8073549220576037, + 6.8201789624151887, + 6.8328900141647422, + 6.8454900509443757, + 6.8579809951275719, + 6.8703647195834048, + 6.8826430493618416, + 6.8948177633079437, + 6.9068905956085187, + 6.9188632372745955, + 6.9307373375628867, + 6.9425145053392399, + 6.9541963103868758, + 6.9657842846620879, + 6.9772799234999168, + 6.9886846867721664, + 7.0000000000000000, + 7.0112272554232540, + 7.0223678130284544, + 7.0334230015374501, + 7.0443941193584534, + 7.0552824355011898, + 7.0660891904577721, + 7.0768155970508317, + 7.0874628412503400, + 7.0980320829605272, + 7.1085244567781700, + 7.1189410727235076, + 7.1292830169449664, + 7.1395513523987937, + 7.1497471195046822, + 7.1598713367783891, + 7.1699250014423130, + 7.1799090900149345, + 7.1898245588800176, + 7.1996723448363644, + 7.2094533656289492, + 7.2191685204621621, + 7.2288186904958804, + 7.2384047393250794, + 7.2479275134435861, + 7.2573878426926521, + 7.2667865406949019, + 7.2761244052742384, + 7.2854022188622487, + 7.2946207488916270, + 7.3037807481771031, + 7.3128829552843557, + 7.3219280948873617, + 7.3309168781146177, + 7.3398500028846243, + 7.3487281542310781, + 7.3575520046180847, + 7.3663222142458151, + 7.3750394313469254, + 7.3837042924740528, + 7.3923174227787607, + 7.4008794362821844, + 7.4093909361377026, + 7.4178525148858991, + 7.4262647547020979, + 7.4346282276367255, + 7.4429434958487288, + 7.4512111118323299, + 7.4594316186372973, + 7.4676055500829976, + 7.4757334309663976, + 7.4838157772642564, + 7.4918530963296748, + 7.4998458870832057, + 7.5077946401986964, + 7.5156998382840436, + 7.5235619560570131, + 7.5313814605163119, + 7.5391588111080319, + 7.5468944598876373, + 7.5545888516776376, + 7.5622424242210728, + 7.5698556083309478, + 7.5774288280357487, + 7.5849625007211561, + 7.5924570372680806, + 7.5999128421871278, + 7.6073303137496113, + 7.6147098441152075, + 7.6220518194563764, + 7.6293566200796095, + 7.6366246205436488, + 7.6438561897747244, + 7.6510516911789290, + 7.6582114827517955, + 7.6653359171851765, + 7.6724253419714952, + 7.6794800995054464, + 7.6865005271832185, + 7.6934869574993252, + 7.7004397181410926, + 7.7073591320808825, + 7.7142455176661224, + 7.7210991887071856, + 7.7279204545631996, + 7.7347096202258392, + 7.7414669864011465, + 7.7481928495894596, + 7.7548875021634691, + 7.7615512324444795, + 7.7681843247769260, + 7.7747870596011737, + 7.7813597135246608, + 7.7879025593914317, + 7.7944158663501062, + 7.8008998999203047, + 7.8073549220576037, + 7.8137811912170374, + 7.8201789624151887, + 7.8265484872909159, + 7.8328900141647422, + 7.8392037880969445, + 7.8454900509443757, + 7.8517490414160571, + 7.8579809951275719, + 7.8641861446542798, + 7.8703647195834048, + 7.8765169465650002, + 7.8826430493618425, + 7.8887432488982601, + 7.8948177633079446, + 7.9008668079807496, + 7.9068905956085187, + 7.9128893362299619, + 7.9188632372745955, + 7.9248125036057813, + 7.9307373375628867, + 7.9366379390025719, + 7.9425145053392399, + 7.9483672315846778, + 7.9541963103868758, + 7.9600019320680806, + 7.9657842846620870, + 7.9715435539507720, + 7.9772799234999168, + 7.9829935746943104, + 7.9886846867721664, + 7.9943534368588578, +} + +/* Faster logarithm for small integers, with the property of log2(0) == 0. */ +func fastLog2(v uint) float64 { + if v < uint(len(kLog2Table)) { + return float64(kLog2Table[v]) + } + + return math.Log2(float64(v)) +} diff --git a/vendor/github.com/andybalholm/brotli/find_match_length.go b/vendor/github.com/andybalholm/brotli/find_match_length.go new file mode 100644 index 00000000000..09d2ae67268 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/find_match_length.go @@ -0,0 +1,45 @@ +package brotli + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Function to find maximal matching prefixes of strings. */ +func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint { + var matched uint = 0 + _, _ = s1[limit-1], s2[limit-1] // bounds check + switch runtime.GOARCH { + case "amd64": + // Compare 8 bytes at at time. + for matched+8 <= limit { + w1 := binary.LittleEndian.Uint64(s1[matched:]) + w2 := binary.LittleEndian.Uint64(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros64(w1^w2)>>3) + } + matched += 8 + } + case "386": + // Compare 4 bytes at at time. + for matched+4 <= limit { + w1 := binary.LittleEndian.Uint32(s1[matched:]) + w2 := binary.LittleEndian.Uint32(s2[matched:]) + if w1 != w2 { + return matched + uint(bits.TrailingZeros32(w1^w2)>>3) + } + matched += 4 + } + } + for matched < limit && s1[matched] == s2[matched] { + matched++ + } + return matched +} diff --git a/vendor/github.com/andybalholm/brotli/h10.go b/vendor/github.com/andybalholm/brotli/h10.go new file mode 100644 index 00000000000..5662fbbbb52 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h10.go @@ -0,0 +1,287 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*h10) HashTypeLength() uint { + return 4 +} + +func (*h10) StoreLookahead() uint { + return 128 +} + +func hashBytesH10(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 17) +} + +/* A (forgetful) hash table where each hash bucket contains a binary tree of + sequences whose first 4 bytes share the same hash code. + Each sequence is 128 long and is identified by its starting + position in the input data. The binary tree is sorted by the lexicographic + order of the sequences, and it is also a max-heap with respect to the + starting positions. */ +type h10 struct { + hasherCommon + window_mask_ uint + buckets_ [1 << 17]uint32 + invalid_pos_ uint32 + forest []uint32 +} + +func (h *h10) Initialize(params *encoderParams) { + h.window_mask_ = (1 << params.lgwin) - 1 + h.invalid_pos_ = uint32(0 - h.window_mask_) + var num_nodes uint = uint(1) << params.lgwin + h.forest = make([]uint32, 2*num_nodes) +} + +func (h *h10) Prepare(one_shot bool, input_size uint, data []byte) { + var invalid_pos uint32 = h.invalid_pos_ + var i uint32 + for i = 0; i < 1<<17; i++ { + h.buckets_[i] = invalid_pos + } +} + +func leftChildIndexH10(self *h10, pos uint) uint { + return 2 * (pos & self.window_mask_) +} + +func rightChildIndexH10(self *h10, pos uint) uint { + return 2*(pos&self.window_mask_) + 1 +} + +/* Stores the hash of the next 4 bytes and in a single tree-traversal, the + hash bucket's binary tree is searched for matches and is re-rooted at the + current position. + + If less than 128 data is available, the hash bucket of the + current position is searched for matches, but the state of the hash table + is not changed, since we can not know the final sorting order of the + current (incomplete) sequence. + + This function must be called with increasing cur_ix positions. */ +func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var max_comp_len uint = brotli_min_size_t(max_length, 128) + var should_reroot_tree bool = (max_length >= 128) + var key uint32 = hashBytesH10(data[cur_ix_masked:]) + var forest []uint32 = self.forest + var prev_ix uint = uint(self.buckets_[key]) + var node_left uint = leftChildIndexH10(self, cur_ix) + var node_right uint = rightChildIndexH10(self, cur_ix) + var best_len_left uint = 0 + var best_len_right uint = 0 + var depth_remaining uint + /* The forest index of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The forest index of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the rightmost node of the left subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + + /* The match length of the leftmost node of the right subtree of the new + root, updated as we traverse and re-root the tree of the hash bucket. */ + if should_reroot_tree { + self.buckets_[key] = uint32(cur_ix) + } + + for depth_remaining = 64; ; depth_remaining-- { + var backward uint = cur_ix - prev_ix + var prev_ix_masked uint = prev_ix & ring_buffer_mask + if backward == 0 || backward > max_backward || depth_remaining == 0 { + if should_reroot_tree { + forest[node_left] = self.invalid_pos_ + forest[node_right] = self.invalid_pos_ + } + + break + } + { + var cur_len uint = brotli_min_size_t(best_len_left, best_len_right) + var len uint + assert(cur_len <= 128) + len = cur_len + findMatchLengthWithLimit(data[cur_ix_masked+cur_len:], data[prev_ix_masked+cur_len:], max_length-cur_len) + if matches != nil && len > *best_len { + *best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + + if len >= max_comp_len { + if should_reroot_tree { + forest[node_left] = forest[leftChildIndexH10(self, prev_ix)] + forest[node_right] = forest[rightChildIndexH10(self, prev_ix)] + } + + break + } + + if data[cur_ix_masked+len] > data[prev_ix_masked+len] { + best_len_left = uint(len) + if should_reroot_tree { + forest[node_left] = uint32(prev_ix) + } + + node_left = rightChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_left]) + } else { + best_len_right = uint(len) + if should_reroot_tree { + forest[node_right] = uint32(prev_ix) + } + + node_right = leftChildIndexH10(self, prev_ix) + prev_ix = uint(forest[node_right]) + } + } + } + + return matches +} + +/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the + length of max_length and stores the position cur_ix in the hash table. + + Sets *num_matches to the number of matches found, and stores the found + matches in matches[0] to matches[*num_matches - 1]. The matches will be + sorted by strictly increasing length and (non-strictly) increasing + distance. */ +func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint { + var orig_matches []backwardMatch = matches + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var best_len uint = 1 + var short_match_max_backward uint + if params.quality != hqZopflificationQuality { + short_match_max_backward = 16 + } else { + short_match_max_backward = 64 + } + var stop uint = cur_ix - short_match_max_backward + var dict_matches [maxStaticDictionaryMatchLen + 1]uint32 + var i uint + if cur_ix < short_match_max_backward { + stop = 0 + } + for i = cur_ix - 1; i > stop && best_len <= 2; i-- { + var prev_ix uint = i + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if data[cur_ix_masked] != data[prev_ix] || data[cur_ix_masked+1] != data[prev_ix+1] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len > best_len { + best_len = uint(len) + initBackwardMatch(&matches[0], backward, uint(len)) + matches = matches[1:] + } + } + } + + if best_len < max_length { + matches = storeAndFindMatchesH10(handle, data, cur_ix, ring_buffer_mask, max_length, max_backward, &best_len, matches) + } + + for i = 0; i <= maxStaticDictionaryMatchLen; i++ { + dict_matches[i] = kInvalidMatch + } + { + var minlen uint = brotli_max_size_t(4, best_len+1) + if findAllStaticDictionaryMatches(dictionary, data[cur_ix_masked:], minlen, max_length, dict_matches[0:]) { + var maxlen uint = brotli_min_size_t(maxStaticDictionaryMatchLen, max_length) + var l uint + for l = minlen; l <= maxlen; l++ { + var dict_id uint32 = dict_matches[l] + if dict_id < kInvalidMatch { + var distance uint = max_backward + gap + uint(dict_id>>5) + 1 + if distance <= params.dist.max_distance { + initDictionaryBackwardMatch(&matches[0], distance, l, uint(dict_id&31)) + matches = matches[1:] + } + } + } + } + } + + return uint(-cap(matches) + cap(orig_matches)) +} + +/* Stores the hash of the next 4 bytes and re-roots the binary tree at the + current sequence, without returning any matches. + REQUIRES: ix + 128 <= end-of-current-block */ +func (h *h10) Store(data []byte, mask uint, ix uint) { + var max_backward uint = h.window_mask_ - windowGap + 1 + /* Maximum distance is window size - 16, see section 9.1. of the spec. */ + storeAndFindMatchesH10(h, data, ix, mask, 128, max_backward, nil, nil) +} + +func (h *h10) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint = ix_start + var j uint = ix_start + if ix_start+63 <= ix_end { + i = ix_end - 63 + } + + if ix_start+512 <= i { + for ; j < i; j += 8 { + h.Store(data, mask, j) + } + } + + for ; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *h10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 128 { + var i_start uint = position - 128 + 1 + var i_end uint = brotli_min_size_t(position, i_start+num_bytes) + /* Store the last `128 - 1` positions in the hasher. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + + var i uint + for i = i_start; i < i_end; i++ { + /* Maximum distance is window size - 16, see section 9.1. of the spec. + Furthermore, we have to make sure that we don't look further back + from the start of the next block than the window size, otherwise we + could access already overwritten areas of the ring-buffer. */ + var max_backward uint = h.window_mask_ - brotli_max_size_t(windowGap-1, position-i) + + /* We know that i + 128 <= position + num_bytes, i.e. the + end of the current block and that we have at least + 128 tail in the ring-buffer. */ + storeAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil) + } + } +} + +/* MAX_NUM_MATCHES == 64 + MAX_TREE_SEARCH_DEPTH */ +const maxNumMatchesH10 = 128 + +func (*h10) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + panic("unimplemented") +} + +func (*h10) PrepareDistanceCache(distance_cache []int) { + panic("unimplemented") +} diff --git a/vendor/github.com/andybalholm/brotli/h5.go b/vendor/github.com/andybalholm/brotli/h5.go new file mode 100644 index 00000000000..f391b73fdd7 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h5.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h5) HashTypeLength() uint { + return 4 +} + +func (*h5) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH5(data []byte, shift int) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h5 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h5) Initialize(params *encoderParams) { + h.hash_shift_ = 32 - h.params.bucket_bits + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH5(data[i:], h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h5) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h5) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH5 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/h6.go b/vendor/github.com/andybalholm/brotli/h6.go new file mode 100644 index 00000000000..80bb224aa87 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/h6.go @@ -0,0 +1,216 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (bucket_size_) to a ring buffer of + fixed size (block_size_). The ring buffer contains the last block_size_ + index positions of the given hash key in the compressed data. */ +func (*h6) HashTypeLength() uint { + return 8 +} + +func (*h6) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place the address in. */ +func hashBytesH6(data []byte, mask uint64, shift int) uint32 { + var h uint64 = (binary.LittleEndian.Uint64(data) & mask) * kHashMul64Long + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(h >> uint(shift)) +} + +type h6 struct { + hasherCommon + bucket_size_ uint + block_size_ uint + hash_shift_ int + hash_mask_ uint64 + block_mask_ uint32 + num []uint16 + buckets []uint32 +} + +func (h *h6) Initialize(params *encoderParams) { + h.hash_shift_ = 64 - h.params.bucket_bits + h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len) + h.bucket_size_ = uint(1) << uint(h.params.bucket_bits) + h.block_size_ = uint(1) << uint(h.params.block_bits) + h.block_mask_ = uint32(h.block_size_ - 1) + h.num = make([]uint16, h.bucket_size_) + h.buckets = make([]uint32, h.block_size_*h.bucket_size_) +} + +func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) { + var num []uint16 = h.num + var partial_prepare_threshold uint = h.bucket_size_ >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = hashBytesH6(data[i:], h.hash_mask_, h.hash_shift_) + num[key] = 0 + } + } else { + for i := 0; i < int(h.bucket_size_); i++ { + num[i] = 0 + } + } +} + +/* Look at 4 bytes at &data[ix & mask]. + Compute a hash from these, and store the value of ix at that position. */ +func (h *h6) Store(data []byte, mask uint, ix uint) { + var num []uint16 = h.num + var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_) + var minor_ix uint = uint(num[key]) & uint(h.block_mask_) + var offset uint = minor_ix + uint(key<= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (h *h6) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCacheH6 once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var num []uint16 = h.num + var buckets []uint32 = h.buckets + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var i uint + var bucket []uint32 + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i = 0; i < uint(h.params.num_last_distances_to_check); i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = uint(cur_ix - backward) + if prev_ix >= cur_ix { + continue + } + + if backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 3 || (len == 2 && i < 2) { + /* Comparing for >= 2 does not change the semantics, but just saves for + a few unnecessary binary logarithms in backward reference score, + since we are not interested in such short matches. */ + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(i) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var key uint32 = hashBytesH6(data[cur_ix_masked:], h.hash_mask_, h.hash_shift_) + bucket = buckets[key< h.block_size_ { + down = uint(num[key]) - h.block_size_ + } else { + down = 0 + } + for i = uint(num[key]); i > down; { + var prev_ix uint + i-- + prev_ix = uint(bucket[uint32(i)&h.block_mask_]) + var backward uint = cur_ix - prev_ix + if backward > max_backward { + break + } + + prev_ix &= ring_buffer_mask + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix) + num[key]++ + } + + if min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash.go b/vendor/github.com/andybalholm/brotli/hash.go new file mode 100644 index 00000000000..00f812e87ec --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash.go @@ -0,0 +1,342 @@ +package brotli + +import ( + "encoding/binary" + "fmt" +) + +type hasherCommon struct { + params hasherParams + is_prepared_ bool + dict_num_lookups uint + dict_num_matches uint +} + +func (h *hasherCommon) Common() *hasherCommon { + return h +} + +type hasherHandle interface { + Common() *hasherCommon + Initialize(params *encoderParams) + Prepare(one_shot bool, input_size uint, data []byte) + StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) + HashTypeLength() uint + StoreLookahead() uint + PrepareDistanceCache(distance_cache []int) + FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) + StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) + Store(data []byte, mask uint, ix uint) +} + +const kCutoffTransformsCount uint32 = 10 + +/* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */ +/* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */ +const kCutoffTransforms uint64 = 0x071B520ADA2D3200 + +type hasherSearchResult struct { + len uint + distance uint + score uint + len_code_delta int +} + +/* kHashMul32 multiplier has these properties: + * The multiplier must be odd. Otherwise we may lose the highest bit. + * No long streaks of ones or zeros. + * There is no effort to ensure that it is a prime, the oddity is enough + for this use. + * The number has been tuned heuristically against compression benchmarks. */ +const kHashMul32 uint32 = 0x1E35A7BD + +const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD + +const kHashMul64Long uint64 = 0x1FE35A7BD3579BD3 + +func hash14(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> (32 - 14) +} + +func prepareDistanceCache(distance_cache []int, num_distances int) { + if num_distances > 4 { + var last_distance int = distance_cache[0] + distance_cache[4] = last_distance - 1 + distance_cache[5] = last_distance + 1 + distance_cache[6] = last_distance - 2 + distance_cache[7] = last_distance + 2 + distance_cache[8] = last_distance - 3 + distance_cache[9] = last_distance + 3 + if num_distances > 10 { + var next_last_distance int = distance_cache[1] + distance_cache[10] = next_last_distance - 1 + distance_cache[11] = next_last_distance + 1 + distance_cache[12] = next_last_distance - 2 + distance_cache[13] = next_last_distance + 2 + distance_cache[14] = next_last_distance - 3 + distance_cache[15] = next_last_distance + 3 + } + } +} + +const literalByteScore = 135 + +const distanceBitPenalty = 30 + +/* Score must be positive after applying maximal penalty. */ +const scoreBase = (distanceBitPenalty * 8 * 8) + +/* Usually, we always choose the longest backward reference. This function + allows for the exception of that rule. + + If we choose a backward reference that is further away, it will + usually be coded with more bits. We approximate this by assuming + log2(distance). If the distance can be expressed in terms of the + last four distances, we use some heuristic constants to estimate + the bits cost. For the first up to four literals we use the bit + cost of the literals from the literal cost model, after that we + use the average bit cost of the cost model. + + This function is used to sometimes discard a longer backward reference + when it is not much longer and the bit cost for encoding it is more + than the saved literals. + + backward_reference_offset MUST be positive. */ +func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint { + return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset)) +} + +func backwardReferenceScoreUsingLastDistance(copy_length uint) uint { + return literalByteScore*uint(copy_length) + scoreBase + 15 +} + +func backwardReferencePenaltyUsingLastDistance(distance_short_code uint) uint { + return uint(39) + ((0x1CA10 >> (distance_short_code & 0xE)) & 0xE) +} + +func testStaticDictionaryItem(dictionary *encoderDictionary, item uint, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult) bool { + var len uint + var word_idx uint + var offset uint + var matchlen uint + var backward uint + var score uint + len = item & 0x1F + word_idx = item >> 5 + offset = uint(dictionary.words.offsets_by_length[len]) + len*word_idx + if len > max_length { + return false + } + + matchlen = findMatchLengthWithLimit(data, dictionary.words.data[offset:], uint(len)) + if matchlen+uint(dictionary.cutoffTransformsCount) <= len || matchlen == 0 { + return false + } + { + var cut uint = len - matchlen + var transform_id uint = (cut << 2) + uint((dictionary.cutoffTransforms>>(cut*6))&0x3F) + backward = max_backward + 1 + word_idx + (transform_id << dictionary.words.size_bits_by_length[len]) + } + + if backward > max_distance { + return false + } + + score = backwardReferenceScore(matchlen, backward) + if score < out.score { + return false + } + + out.len = matchlen + out.len_code_delta = int(len) - int(matchlen) + out.distance = backward + out.score = score + return true +} + +func searchInStaticDictionary(dictionary *encoderDictionary, handle hasherHandle, data []byte, max_length uint, max_backward uint, max_distance uint, out *hasherSearchResult, shallow bool) { + var key uint + var i uint + var self *hasherCommon = handle.Common() + if self.dict_num_matches < self.dict_num_lookups>>7 { + return + } + + key = uint(hash14(data) << 1) + for i = 0; ; (func() { i++; key++ })() { + var tmp uint + if shallow { + tmp = 1 + } else { + tmp = 2 + } + if i >= tmp { + break + } + var item uint = uint(dictionary.hash_table[key]) + self.dict_num_lookups++ + if item != 0 { + var item_matches bool = testStaticDictionaryItem(dictionary, item, data, max_length, max_backward, max_distance, out) + if item_matches { + self.dict_num_matches++ + } + } + } +} + +type backwardMatch struct { + distance uint32 + length_and_code uint32 +} + +func initBackwardMatch(self *backwardMatch, dist uint, len uint) { + self.distance = uint32(dist) + self.length_and_code = uint32(len << 5) +} + +func initDictionaryBackwardMatch(self *backwardMatch, dist uint, len uint, len_code uint) { + self.distance = uint32(dist) + var tmp uint + if len == len_code { + tmp = 0 + } else { + tmp = len_code + } + self.length_and_code = uint32(len<<5 | tmp) +} + +func backwardMatchLength(self *backwardMatch) uint { + return uint(self.length_and_code >> 5) +} + +func backwardMatchLengthCode(self *backwardMatch) uint { + var code uint = uint(self.length_and_code) & 31 + if code != 0 { + return code + } else { + return backwardMatchLength(self) + } +} + +func hasherReset(handle hasherHandle) { + if handle == nil { + return + } + handle.Common().is_prepared_ = false +} + +func newHasher(typ int) hasherHandle { + switch typ { + case 2: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 1, + hashLen: 5, + useDictionary: true, + } + case 3: + return &hashLongestMatchQuickly{ + bucketBits: 16, + bucketSweep: 2, + hashLen: 5, + useDictionary: false, + } + case 4: + return &hashLongestMatchQuickly{ + bucketBits: 17, + bucketSweep: 4, + hashLen: 5, + useDictionary: true, + } + case 5: + return new(h5) + case 6: + return new(h6) + case 10: + return new(h10) + case 35: + return &hashComposite{ + ha: newHasher(3), + hb: &hashRolling{jump: 4}, + } + case 40: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 4, + } + case 41: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 1, + bankBits: 16, + numLastDistancesToCheck: 10, + } + case 42: + return &hashForgetfulChain{ + bucketBits: 15, + numBanks: 512, + bankBits: 9, + numLastDistancesToCheck: 16, + } + case 54: + return &hashLongestMatchQuickly{ + bucketBits: 20, + bucketSweep: 4, + hashLen: 7, + useDictionary: false, + } + case 55: + return &hashComposite{ + ha: newHasher(54), + hb: &hashRolling{jump: 4}, + } + case 65: + return &hashComposite{ + ha: newHasher(6), + hb: &hashRolling{jump: 1}, + } + } + + panic(fmt.Sprintf("unknown hasher type: %d", typ)) +} + +func hasherSetup(handle *hasherHandle, params *encoderParams, data []byte, position uint, input_size uint, is_last bool) { + var self hasherHandle = nil + var common *hasherCommon = nil + var one_shot bool = (position == 0 && is_last) + if *handle == nil { + chooseHasher(params, ¶ms.hasher) + self = newHasher(params.hasher.type_) + + *handle = self + common = self.Common() + common.params = params.hasher + self.Initialize(params) + } + + self = *handle + common = self.Common() + if !common.is_prepared_ { + self.Prepare(one_shot, input_size, data) + + if position == 0 { + common.dict_num_lookups = 0 + common.dict_num_matches = 0 + } + + common.is_prepared_ = true + } +} + +func initOrStitchToPreviousBlock(handle *hasherHandle, data []byte, mask uint, params *encoderParams, position uint, input_size uint, is_last bool) { + var self hasherHandle + hasherSetup(handle, params, data, position, input_size, is_last) + self = *handle + self.StitchToPreviousBlock(input_size, position, data, mask) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_composite.go b/vendor/github.com/andybalholm/brotli/hash_composite.go new file mode 100644 index 00000000000..a65fe2e6a9a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_composite.go @@ -0,0 +1,93 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (h *hashComposite) HashTypeLength() uint { + var a uint = h.ha.HashTypeLength() + var b uint = h.hb.HashTypeLength() + if a > b { + return a + } else { + return b + } +} + +func (h *hashComposite) StoreLookahead() uint { + var a uint = h.ha.StoreLookahead() + var b uint = h.hb.StoreLookahead() + if a > b { + return a + } else { + return b + } +} + +/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A + and HASHER_B. */ +type hashComposite struct { + hasherCommon + ha hasherHandle + hb hasherHandle + params *encoderParams +} + +func (h *hashComposite) Initialize(params *encoderParams) { + h.params = params +} + +/* TODO: Initialize of the hashers is defered to Prepare (and params + remembered here) because we don't get the one_shot and input_size params + here that are needed to know the memory size of them. Instead provide + those params to all hashers InitializehashComposite */ +func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) { + if h.ha == nil { + var common_a *hasherCommon + var common_b *hasherCommon + + common_a = h.ha.Common() + common_a.params = h.params.hasher + common_a.is_prepared_ = false + common_a.dict_num_lookups = 0 + common_a.dict_num_matches = 0 + h.ha.Initialize(h.params) + + common_b = h.hb.Common() + common_b.params = h.params.hasher + common_b.is_prepared_ = false + common_b.dict_num_lookups = 0 + common_b.dict_num_matches = 0 + h.hb.Initialize(h.params) + } + + h.ha.Prepare(one_shot, input_size, data) + h.hb.Prepare(one_shot, input_size, data) +} + +func (h *hashComposite) Store(data []byte, mask uint, ix uint) { + h.ha.Store(data, mask, ix) + h.hb.Store(data, mask, ix) +} + +func (h *hashComposite) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + h.ha.StoreRange(data, mask, ix_start, ix_end) + h.hb.StoreRange(data, mask, ix_start, ix_end) +} + +func (h *hashComposite) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) + h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask) +} + +func (h *hashComposite) PrepareDistanceCache(distance_cache []int) { + h.ha.PrepareDistanceCache(distance_cache) + h.hb.PrepareDistanceCache(distance_cache) +} + +func (h *hashComposite) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + h.ha.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) + h.hb.FindLongestMatch(dictionary, data, ring_buffer_mask, distance_cache, cur_ix, max_length, max_backward, gap, max_distance, out) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go new file mode 100644 index 00000000000..306e46d3dba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go @@ -0,0 +1,252 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func (*hashForgetfulChain) HashTypeLength() uint { + return 4 +} + +func (*hashForgetfulChain) StoreLookahead() uint { + return 4 +} + +/* HashBytes is the function that chooses the bucket to place the address in.*/ +func (h *hashForgetfulChain) HashBytes(data []byte) uint { + var hash uint32 = binary.LittleEndian.Uint32(data) * kHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint(hash >> (32 - h.bucketBits)) +} + +type slot struct { + delta uint16 + next uint16 +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + Hashes are stored in chains which are bucketed to groups. Group of chains + share a storage "bank". When more than "bank size" chain nodes are added, + oldest nodes are replaced; this way several chains may share a tail. */ +type hashForgetfulChain struct { + hasherCommon + + bucketBits uint + numBanks uint + bankBits uint + numLastDistancesToCheck int + + addr []uint32 + head []uint16 + tiny_hash [65536]byte + banks [][]slot + free_slot_idx []uint16 + max_hops uint +} + +func (h *hashForgetfulChain) Initialize(params *encoderParams) { + var q uint + if params.quality > 6 { + q = 7 + } else { + q = 8 + } + h.max_hops = q << uint(params.quality-4) + + bankSize := 1 << h.bankBits + bucketSize := 1 << h.bucketBits + + h.addr = make([]uint32, bucketSize) + h.head = make([]uint16, bucketSize) + h.banks = make([][]slot, h.numBanks) + for i := range h.banks { + h.banks[i] = make([]slot, bankSize) + } + h.free_slot_idx = make([]uint16, h.numBanks) +} + +func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte) { + var partial_prepare_threshold uint = (1 << h.bucketBits) >> 6 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var bucket uint = h.HashBytes(data[i:]) + + /* See InitEmpty comment. */ + h.addr[bucket] = 0xCCCCCCCC + + h.head[bucket] = 0xCCCC + } + } else { + /* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position + processed by hasher never reaches 3GB + 64M; this makes all new chains + to be terminated after the first node. */ + for i := range h.addr { + h.addr[i] = 0xCCCCCCCC + } + + for i := range h.head { + h.head[i] = 0 + } + } + + h.tiny_hash = [65536]byte{} + for i := range h.free_slot_idx { + h.free_slot_idx[i] = 0 + } +} + +/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend + node to corresponding chain; also update tiny_hash for current position. */ +func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) { + var key uint = h.HashBytes(data[ix&mask:]) + var bank uint = key & (h.numBanks - 1) + idx := uint(h.free_slot_idx[bank]) & ((1 << h.bankBits) - 1) + h.free_slot_idx[bank]++ + var delta uint = ix - uint(h.addr[key]) + h.tiny_hash[uint16(ix)] = byte(key) + if delta > 0xFFFF { + delta = 0xFFFF + } + h.banks[bank][idx].delta = uint16(delta) + h.banks[bank][idx].next = h.head[key] + h.addr[key] = uint32(ix) + h.head[key] = uint16(idx) +} + +func (h *hashForgetfulChain) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashForgetfulChain) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ring_buffer_mask, position-3) + h.Store(ringbuffer, ring_buffer_mask, position-2) + h.Store(ringbuffer, ring_buffer_mask, position-1) + } +} + +func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) { + prepareDistanceCache(distance_cache, h.numLastDistancesToCheck) +} + +/* Find a longest backward match of &data[cur_ix] up to the length of + max_length and stores the position cur_ix in the hash table. + + REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache + values; if this method is invoked repeatedly with the same distance + cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = out.len + var key uint = h.HashBytes(data[cur_ix_masked:]) + var tiny_hash byte = byte(key) + /* Don't accept a short copy from far away. */ + out.len = 0 + + out.len_code_delta = 0 + + /* Try last distance first. */ + for i := 0; i < h.numLastDistancesToCheck; i++ { + var backward uint = uint(distance_cache[i]) + var prev_ix uint = (cur_ix - backward) + + /* For distance code 0 we want to consider 2-byte matches. */ + if i > 0 && h.tiny_hash[uint16(prev_ix)] != tiny_hash { + continue + } + if prev_ix >= cur_ix || backward > max_backward { + continue + } + + prev_ix &= ring_buffer_mask + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 2 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + if i != 0 { + score -= backwardReferencePenaltyUsingLastDistance(uint(i)) + } + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + } + { + var bank uint = key & (h.numBanks - 1) + var backward uint = 0 + var hops uint = h.max_hops + var delta uint = cur_ix - uint(h.addr[key]) + var slot uint = uint(h.head[key]) + for { + tmp6 := hops + hops-- + if tmp6 == 0 { + break + } + var prev_ix uint + var last uint = slot + backward += delta + if backward > max_backward { + break + } + prev_ix = (cur_ix - backward) & ring_buffer_mask + slot = uint(h.banks[bank][last].next) + delta = uint(h.banks[bank][last].delta) + if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] { + continue + } + { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + /* Comparing for >= 3 does not change the semantics, but just saves + for a few unnecessary binary logarithms in backward reference + score, since we are not interested in such short matches. */ + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = best_score + } + } + } + } + + h.Store(data, ring_buffer_mask, cur_ix) + } + + if out.score == min_score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false) + } +} diff --git a/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go new file mode 100644 index 00000000000..9375dc15539 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go @@ -0,0 +1,214 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* For BUCKET_SWEEP == 1, enabling the dictionary lookup makes compression + a little faster (0.5% - 1%) and it compresses 0.15% better on small text + and HTML inputs. */ + +func (*hashLongestMatchQuickly) HashTypeLength() uint { + return 8 +} + +func (*hashLongestMatchQuickly) StoreLookahead() uint { + return 8 +} + +/* HashBytes is the function that chooses the bucket to place + the address in. The HashLongestMatch and hashLongestMatchQuickly + classes have separate, different implementations of hashing. */ +func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 { + var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64) + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return uint32(hash >> (64 - h.bucketBits)) +} + +/* A (forgetful) hash table to the data seen by the compressor, to + help create backward references to previous data. + + This is a hash map of fixed size (1 << 16). Starting from the + given index, 1 buckets are used to store values of a key. */ +type hashLongestMatchQuickly struct { + hasherCommon + + bucketBits uint + bucketSweep int + hashLen uint + useDictionary bool + + buckets []uint32 +} + +func (h *hashLongestMatchQuickly) Initialize(params *encoderParams) { + h.buckets = make([]uint32, 1<> 7 + /* Partial preparation is 100 times slower (per socket). */ + if one_shot && input_size <= partial_prepare_threshold { + var i uint + for i = 0; i < input_size; i++ { + var key uint32 = h.HashBytes(data[i:]) + for j := 0; j < h.bucketSweep; j++ { + h.buckets[key+uint32(j)] = 0 + } + } + } else { + /* It is not strictly necessary to fill this buffer here, but + not filling will make the results of the compression stochastic + (but correct). This is because random data would cause the + system to find accidentally good backward references here and there. */ + for i := range h.buckets { + h.buckets[i] = 0 + } + } +} + +/* Look at 5 bytes at &data[ix & mask]. + Compute a hash from these, and store the value somewhere within + [ix .. ix+3]. */ +func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) { + var key uint32 = h.HashBytes(data[ix&mask:]) + var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep) + /* Wiggle the value with the bucket sweep range. */ + h.buckets[key+off] = uint32(ix) +} + +func (h *hashLongestMatchQuickly) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { + var i uint + for i = ix_start; i < ix_end; i++ { + h.Store(data, mask, i) + } +} + +func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) { + if num_bytes >= h.HashTypeLength()-1 && position >= 3 { + /* Prepare the hashes for three last bytes of the last write. + These could not be calculated before, since they require knowledge + of both the previous and the current block. */ + h.Store(ringbuffer, ringbuffer_mask, position-3) + h.Store(ringbuffer, ringbuffer_mask, position-2) + h.Store(ringbuffer, ringbuffer_mask, position-1) + } +} + +func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) { +} + +/* Find a longest backward match of &data[cur_ix & ring_buffer_mask] + up to the length of max_length and stores the position cur_ix in the + hash table. + + Does not look for matches longer than max_length. + Does not look for matches further away than max_backward. + Writes the best match into |out|. + |out|->score is updated only if a better match is found. */ +func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var best_len_in uint = out.len + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var key uint32 = h.HashBytes(data[cur_ix_masked:]) + var compare_char int = int(data[cur_ix_masked+best_len_in]) + var min_score uint = out.score + var best_score uint = out.score + var best_len uint = best_len_in + var cached_backward uint = uint(distance_cache[0]) + var prev_ix uint = cur_ix - cached_backward + var bucket []uint32 + out.len_code_delta = 0 + if prev_ix < cur_ix { + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char == int(data[prev_ix+best_len]) { + var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScoreUsingLastDistance(uint(len)) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = uint(len) + out.distance = cached_backward + out.score = best_score + compare_char = int(data[cur_ix_masked+best_len]) + if h.bucketSweep == 1 { + h.buckets[key] = uint32(cur_ix) + return + } + } + } + } + } + + if h.bucketSweep == 1 { + var backward uint + var len uint + + /* Only one to look for, don't bother to prepare for a loop. */ + prev_ix = uint(h.buckets[key]) + + h.buckets[key] = uint32(cur_ix) + backward = cur_ix - prev_ix + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len_in]) { + return + } + + if backward == 0 || backward > max_backward { + return + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + out.len = uint(len) + out.distance = backward + out.score = score + return + } + } + } else { + bucket = h.buckets[key:] + var i int + prev_ix = uint(bucket[0]) + bucket = bucket[1:] + for i = 0; i < h.bucketSweep; (func() { i++; tmp3 := bucket; bucket = bucket[1:]; prev_ix = uint(tmp3[0]) })() { + var backward uint = cur_ix - prev_ix + var len uint + prev_ix &= uint(uint32(ring_buffer_mask)) + if compare_char != int(data[prev_ix+best_len]) { + continue + } + + if backward == 0 || backward > max_backward { + continue + } + + len = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length) + if len >= 4 { + var score uint = backwardReferenceScore(uint(len), backward) + if best_score < score { + best_score = score + best_len = uint(len) + out.len = best_len + out.distance = backward + out.score = score + compare_char = int(data[cur_ix_masked+best_len]) + } + } + } + } + + if h.useDictionary && min_score == out.score { + searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, true) + } + + h.buckets[key+uint32((cur_ix>>3)%uint(h.bucketSweep))] = uint32(cur_ix) +} diff --git a/vendor/github.com/andybalholm/brotli/hash_rolling.go b/vendor/github.com/andybalholm/brotli/hash_rolling.go new file mode 100644 index 00000000000..6630fc07e4b --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/hash_rolling.go @@ -0,0 +1,168 @@ +package brotli + +/* Copyright 2018 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* NOTE: this hasher does not search in the dictionary. It is used as + backup-hasher, the main hasher already searches in it. */ + +const kRollingHashMul32 uint32 = 69069 + +const kInvalidPosHashRolling uint32 = 0xffffffff + +/* This hasher uses a longer forward length, but returning a higher value here + will hurt compression by the main hasher when combined with a composite + hasher. The hasher tests for forward itself instead. */ +func (*hashRolling) HashTypeLength() uint { + return 4 +} + +func (*hashRolling) StoreLookahead() uint { + return 4 +} + +/* Computes a code from a single byte. A lookup table of 256 values could be + used, but simply adding 1 works about as good. */ +func (*hashRolling) HashByte(b byte) uint32 { + return uint32(b) + 1 +} + +func (h *hashRolling) HashRollingFunctionInitial(state uint32, add byte, factor uint32) uint32 { + return uint32(factor*state + h.HashByte(add)) +} + +func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, factor uint32, factor_remove uint32) uint32 { + return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem)) +} + +/* Rolling hash for long distance long string matches. Stores one position + per bucket, bucket key is computed over a long region. */ +type hashRolling struct { + hasherCommon + + jump int + + state uint32 + table []uint32 + next_ix uint + factor uint32 + factor_remove uint32 +} + +func (h *hashRolling) Initialize(params *encoderParams) { + h.state = 0 + h.next_ix = 0 + + h.factor = kRollingHashMul32 + + /* Compute the factor of the oldest byte to remove: factor**steps modulo + 0xffffffff (the multiplications rely on 32-bit overflow) */ + h.factor_remove = 1 + + for i := 0; i < 32; i += h.jump { + h.factor_remove *= h.factor + } + + h.table = make([]uint32, 16777216) + for i := 0; i < 16777216; i++ { + h.table[i] = kInvalidPosHashRolling + } +} + +func (h *hashRolling) Prepare(one_shot bool, input_size uint, data []byte) { + /* Too small size, cannot use this hasher. */ + if input_size < 32 { + return + } + h.state = 0 + for i := 0; i < 32; i += h.jump { + h.state = h.HashRollingFunctionInitial(h.state, data[i], h.factor) + } +} + +func (*hashRolling) Store(data []byte, mask uint, ix uint) { +} + +func (*hashRolling) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) { +} + +func (h *hashRolling) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) { + var position_masked uint + /* In this case we must re-initialize the hasher from scratch from the + current position. */ + + var available uint = num_bytes + if position&uint(h.jump-1) != 0 { + var diff uint = uint(h.jump) - (position & uint(h.jump-1)) + if diff > available { + available = 0 + } else { + available = available - diff + } + position += diff + } + + position_masked = position & ring_buffer_mask + + /* wrapping around ringbuffer not handled. */ + if available > ring_buffer_mask-position_masked { + available = ring_buffer_mask - position_masked + } + + h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:]) + h.next_ix = position +} + +func (*hashRolling) PrepareDistanceCache(distance_cache []int) { +} + +func (h *hashRolling) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { + var cur_ix_masked uint = cur_ix & ring_buffer_mask + var pos uint = h.next_ix + + if cur_ix&uint(h.jump-1) != 0 { + return + } + + /* Not enough lookahead */ + if max_length < 32 { + return + } + + for pos = h.next_ix; pos <= cur_ix; pos += uint(h.jump) { + var code uint32 = h.state & ((16777216 * 64) - 1) + var rem byte = data[pos&ring_buffer_mask] + var add byte = data[(pos+32)&ring_buffer_mask] + var found_ix uint = uint(kInvalidPosHashRolling) + + h.state = h.HashRollingFunction(h.state, add, rem, h.factor, h.factor_remove) + + if code < 16777216 { + found_ix = uint(h.table[code]) + h.table[code] = uint32(pos) + if pos == cur_ix && uint32(found_ix) != kInvalidPosHashRolling { + /* The cast to 32-bit makes backward distances up to 4GB work even + if cur_ix is above 4GB, despite using 32-bit values in the table. */ + var backward uint = uint(uint32(cur_ix - found_ix)) + if backward <= max_backward { + var found_ix_masked uint = found_ix & ring_buffer_mask + var len uint = findMatchLengthWithLimit(data[found_ix_masked:], data[cur_ix_masked:], max_length) + if len >= 4 && len > out.len { + var score uint = backwardReferenceScore(uint(len), backward) + if score > out.score { + out.len = uint(len) + out.distance = backward + out.score = score + out.len_code_delta = 0 + } + } + } + } + } + } + + h.next_ix = cur_ix + uint(h.jump) +} diff --git a/vendor/github.com/andybalholm/brotli/histogram.go b/vendor/github.com/andybalholm/brotli/histogram.go new file mode 100644 index 00000000000..0346622beb3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/histogram.go @@ -0,0 +1,226 @@ +package brotli + +import "math" + +/* The distance symbols effectively used by "Large Window Brotli" (32-bit). */ +const numHistogramDistanceSymbols = 544 + +type histogramLiteral struct { + data_ [numLiteralSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearLiteral(self *histogramLiteral) { + self.data_ = [numLiteralSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsLiteral(array []histogramLiteral, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearLiteral(&array[i:][0]) + } +} + +func histogramAddLiteral(self *histogramLiteral, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorLiteral(self *histogramLiteral, p []byte, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramLiteral(self *histogramLiteral, v *histogramLiteral) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numLiteralSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeLiteral() uint { + return numLiteralSymbols +} + +type histogramCommand struct { + data_ [numCommandSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearCommand(self *histogramCommand) { + self.data_ = [numCommandSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsCommand(array []histogramCommand, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearCommand(&array[i:][0]) + } +} + +func histogramAddCommand(self *histogramCommand, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorCommand(self *histogramCommand, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramCommand(self *histogramCommand, v *histogramCommand) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numCommandSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeCommand() uint { + return numCommandSymbols +} + +type histogramDistance struct { + data_ [numDistanceSymbols]uint32 + total_count_ uint + bit_cost_ float64 +} + +func histogramClearDistance(self *histogramDistance) { + self.data_ = [numDistanceSymbols]uint32{} + self.total_count_ = 0 + self.bit_cost_ = math.MaxFloat64 +} + +func clearHistogramsDistance(array []histogramDistance, length uint) { + var i uint + for i = 0; i < length; i++ { + histogramClearDistance(&array[i:][0]) + } +} + +func histogramAddDistance(self *histogramDistance, val uint) { + self.data_[val]++ + self.total_count_++ +} + +func histogramAddVectorDistance(self *histogramDistance, p []uint16, n uint) { + self.total_count_ += n + n += 1 + for { + n-- + if n == 0 { + break + } + self.data_[p[0]]++ + p = p[1:] + } +} + +func histogramAddHistogramDistance(self *histogramDistance, v *histogramDistance) { + var i uint + self.total_count_ += v.total_count_ + for i = 0; i < numDistanceSymbols; i++ { + self.data_[i] += v.data_[i] + } +} + +func histogramDataSizeDistance() uint { + return numDistanceSymbols +} + +type blockSplitIterator struct { + split_ *blockSplit + idx_ uint + type_ uint + length_ uint +} + +func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) { + self.split_ = split + self.idx_ = 0 + self.type_ = 0 + if len(split.lengths) > 0 { + self.length_ = uint(split.lengths[0]) + } else { + self.length_ = 0 + } +} + +func blockSplitIteratorNext(self *blockSplitIterator) { + if self.length_ == 0 { + self.idx_++ + self.type_ = uint(self.split_.types[self.idx_]) + self.length_ = uint(self.split_.lengths[self.idx_]) + } + + self.length_-- +} + +func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) { + var pos uint = start_pos + var literal_it blockSplitIterator + var insert_and_copy_it blockSplitIterator + var dist_it blockSplitIterator + + initBlockSplitIterator(&literal_it, literal_split) + initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split) + initBlockSplitIterator(&dist_it, dist_split) + for i := range cmds { + var cmd *command = &cmds[i] + var j uint + blockSplitIteratorNext(&insert_and_copy_it) + histogramAddCommand(&insert_and_copy_histograms[insert_and_copy_it.type_], uint(cmd.cmd_prefix_)) + + /* TODO: unwrap iterator blocks. */ + for j = uint(cmd.insert_len_); j != 0; j-- { + var context uint + blockSplitIteratorNext(&literal_it) + context = literal_it.type_ + if context_modes != nil { + var lut contextLUT = getContextLUT(context_modes[context]) + context = (context << literalContextBits) + uint(getContext(prev_byte, prev_byte2, lut)) + } + + histogramAddLiteral(&literal_histograms[context], uint(ringbuffer[pos&mask])) + prev_byte2 = prev_byte + prev_byte = ringbuffer[pos&mask] + pos++ + } + + pos += uint(commandCopyLen(cmd)) + if commandCopyLen(cmd) != 0 { + prev_byte2 = ringbuffer[(pos-2)&mask] + prev_byte = ringbuffer[(pos-1)&mask] + if cmd.cmd_prefix_ >= 128 { + var context uint + blockSplitIteratorNext(&dist_it) + context = uint(uint32(dist_it.type_< bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// acceptSpec describes an Accept* header. +type acceptSpec struct { + Value string + Q float64 +} + +// parseAccept parses Accept* headers. +func parseAccept(header http.Header, key string) (specs []acceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec acceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} diff --git a/vendor/github.com/andybalholm/brotli/huffman.go b/vendor/github.com/andybalholm/brotli/huffman.go new file mode 100644 index 00000000000..182f3d2a552 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/huffman.go @@ -0,0 +1,653 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +const huffmanMaxCodeLength = 15 + +/* Maximum possible Huffman table size for an alphabet size of (index * 32), + max code length 15 and root table bits 8. */ +var kMaxHuffmanTableSize = []uint16{ + 256, + 402, + 436, + 468, + 500, + 534, + 566, + 598, + 630, + 662, + 694, + 726, + 758, + 790, + 822, + 854, + 886, + 920, + 952, + 984, + 1016, + 1048, + 1080, + 1112, + 1144, + 1176, + 1208, + 1240, + 1272, + 1304, + 1336, + 1368, + 1400, + 1432, + 1464, + 1496, + 1528, +} + +/* BROTLI_NUM_BLOCK_LEN_SYMBOLS == 26 */ +const huffmanMaxSize26 = 396 + +/* BROTLI_MAX_BLOCK_TYPE_SYMBOLS == 258 */ +const huffmanMaxSize258 = 632 + +/* BROTLI_MAX_CONTEXT_MAP_SYMBOLS == 272 */ +const huffmanMaxSize272 = 646 + +const huffmanMaxCodeLengthCodeLength = 5 + +/* Do not create this struct directly - use the ConstructHuffmanCode + * constructor below! */ +type huffmanCode struct { + bits byte + value uint16 +} + +func constructHuffmanCode(bits byte, value uint16) huffmanCode { + var h huffmanCode + h.bits = bits + h.value = value + return h +} + +/* Builds Huffman lookup table assuming code lengths are in symbol order. */ + +/* Builds Huffman lookup table assuming code lengths are in symbol order. + Returns size of resulting table. */ + +/* Builds a simple Huffman table. The |num_symbols| parameter is to be + interpreted as follows: 0 means 1 symbol, 1 means 2 symbols, + 2 means 3 symbols, 3 means 4 symbols with lengths [2, 2, 2, 2], + 4 means 4 symbols with lengths [1, 2, 3, 3]. */ + +/* Contains a collection of Huffman trees with the same alphabet size. */ +/* max_symbol is needed due to simple codes since log2(alphabet_size) could be + greater than log2(max_symbol). */ +type huffmanTreeGroup struct { + htrees [][]huffmanCode + codes []huffmanCode + alphabet_size uint16 + max_symbol uint16 + num_htrees uint16 +} + +const reverseBitsMax = 8 + +const reverseBitsBase = 0 + +var kReverseBits = [1 << reverseBitsMax]byte{ + 0x00, + 0x80, + 0x40, + 0xC0, + 0x20, + 0xA0, + 0x60, + 0xE0, + 0x10, + 0x90, + 0x50, + 0xD0, + 0x30, + 0xB0, + 0x70, + 0xF0, + 0x08, + 0x88, + 0x48, + 0xC8, + 0x28, + 0xA8, + 0x68, + 0xE8, + 0x18, + 0x98, + 0x58, + 0xD8, + 0x38, + 0xB8, + 0x78, + 0xF8, + 0x04, + 0x84, + 0x44, + 0xC4, + 0x24, + 0xA4, + 0x64, + 0xE4, + 0x14, + 0x94, + 0x54, + 0xD4, + 0x34, + 0xB4, + 0x74, + 0xF4, + 0x0C, + 0x8C, + 0x4C, + 0xCC, + 0x2C, + 0xAC, + 0x6C, + 0xEC, + 0x1C, + 0x9C, + 0x5C, + 0xDC, + 0x3C, + 0xBC, + 0x7C, + 0xFC, + 0x02, + 0x82, + 0x42, + 0xC2, + 0x22, + 0xA2, + 0x62, + 0xE2, + 0x12, + 0x92, + 0x52, + 0xD2, + 0x32, + 0xB2, + 0x72, + 0xF2, + 0x0A, + 0x8A, + 0x4A, + 0xCA, + 0x2A, + 0xAA, + 0x6A, + 0xEA, + 0x1A, + 0x9A, + 0x5A, + 0xDA, + 0x3A, + 0xBA, + 0x7A, + 0xFA, + 0x06, + 0x86, + 0x46, + 0xC6, + 0x26, + 0xA6, + 0x66, + 0xE6, + 0x16, + 0x96, + 0x56, + 0xD6, + 0x36, + 0xB6, + 0x76, + 0xF6, + 0x0E, + 0x8E, + 0x4E, + 0xCE, + 0x2E, + 0xAE, + 0x6E, + 0xEE, + 0x1E, + 0x9E, + 0x5E, + 0xDE, + 0x3E, + 0xBE, + 0x7E, + 0xFE, + 0x01, + 0x81, + 0x41, + 0xC1, + 0x21, + 0xA1, + 0x61, + 0xE1, + 0x11, + 0x91, + 0x51, + 0xD1, + 0x31, + 0xB1, + 0x71, + 0xF1, + 0x09, + 0x89, + 0x49, + 0xC9, + 0x29, + 0xA9, + 0x69, + 0xE9, + 0x19, + 0x99, + 0x59, + 0xD9, + 0x39, + 0xB9, + 0x79, + 0xF9, + 0x05, + 0x85, + 0x45, + 0xC5, + 0x25, + 0xA5, + 0x65, + 0xE5, + 0x15, + 0x95, + 0x55, + 0xD5, + 0x35, + 0xB5, + 0x75, + 0xF5, + 0x0D, + 0x8D, + 0x4D, + 0xCD, + 0x2D, + 0xAD, + 0x6D, + 0xED, + 0x1D, + 0x9D, + 0x5D, + 0xDD, + 0x3D, + 0xBD, + 0x7D, + 0xFD, + 0x03, + 0x83, + 0x43, + 0xC3, + 0x23, + 0xA3, + 0x63, + 0xE3, + 0x13, + 0x93, + 0x53, + 0xD3, + 0x33, + 0xB3, + 0x73, + 0xF3, + 0x0B, + 0x8B, + 0x4B, + 0xCB, + 0x2B, + 0xAB, + 0x6B, + 0xEB, + 0x1B, + 0x9B, + 0x5B, + 0xDB, + 0x3B, + 0xBB, + 0x7B, + 0xFB, + 0x07, + 0x87, + 0x47, + 0xC7, + 0x27, + 0xA7, + 0x67, + 0xE7, + 0x17, + 0x97, + 0x57, + 0xD7, + 0x37, + 0xB7, + 0x77, + 0xF7, + 0x0F, + 0x8F, + 0x4F, + 0xCF, + 0x2F, + 0xAF, + 0x6F, + 0xEF, + 0x1F, + 0x9F, + 0x5F, + 0xDF, + 0x3F, + 0xBF, + 0x7F, + 0xFF, +} + +const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase)) + +/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX), + where reverse(value, len) is the bit-wise reversal of the len least + significant bits of value. */ +func reverseBits8(num uint64) uint64 { + return uint64(kReverseBits[num]) +} + +/* Stores code in table[0], table[step], table[2*step], ..., table[end] */ +/* Assumes that end is an integer multiple of step */ +func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) { + for { + end -= step + table[end] = code + if end <= 0 { + break + } + } +} + +/* Returns the table width of the next 2nd level table. |count| is the histogram + of bit lengths for the remaining symbols, |len| is the code length of the + next processed symbol. */ +func nextTableBitSize(count []uint16, len int, root_bits int) int { + var left int = 1 << uint(len-root_bits) + for len < huffmanMaxCodeLength { + left -= int(count[len]) + if left <= 0 { + break + } + len++ + left <<= 1 + } + + return len - root_bits +} + +func buildCodeLengthsHuffmanTable(table []huffmanCode, code_lengths []byte, count []uint16) { + var code huffmanCode /* current table entry */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* step size to replicate values in current table */ /* size of current table */ /* symbols sorted by code length */ + var symbol int + var key uint64 + var key_step uint64 + var step int + var table_size int + var sorted [codeLengthCodes]int + var offset [huffmanMaxCodeLengthCodeLength + 1]int + var bits int + var bits_count int + /* offsets in sorted table for each length */ + assert(huffmanMaxCodeLengthCodeLength <= reverseBitsMax) + + /* Generate offsets into sorted symbol table by code length. */ + symbol = -1 + + bits = 1 + var i int + for i = 0; i < huffmanMaxCodeLengthCodeLength; i++ { + symbol += int(count[bits]) + offset[bits] = symbol + bits++ + } + + /* Symbols with code length 0 are placed after all other symbols. */ + offset[0] = codeLengthCodes - 1 + + /* Sort symbols by length, by symbol order within each length. */ + symbol = codeLengthCodes + + for { + var i int + for i = 0; i < 6; i++ { + symbol-- + sorted[offset[code_lengths[symbol]]] = symbol + offset[code_lengths[symbol]]-- + } + if symbol == 0 { + break + } + } + + table_size = 1 << huffmanMaxCodeLengthCodeLength + + /* Special case: all symbols but one have 0 code length. */ + if offset[0] == 0 { + code = constructHuffmanCode(0, uint16(sorted[0])) + for key = 0; key < uint64(table_size); key++ { + table[key] = code + } + + return + } + + /* Fill in table. */ + key = 0 + + key_step = reverseBitsLowest + symbol = 0 + bits = 1 + step = 2 + for { + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + code = constructHuffmanCode(byte(bits), uint16(sorted[symbol])) + symbol++ + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > huffmanMaxCodeLengthCodeLength { + break + } + } +} + +func buildHuffmanTable(root_table []huffmanCode, root_bits int, symbol_lists symbolList, count []uint16) uint32 { + var code huffmanCode /* current table entry */ /* next available space in table */ /* current code length */ /* symbol index in original or sorted table */ /* prefix code */ /* prefix code addend */ /* 2nd level table prefix code */ /* 2nd level table prefix code addend */ /* step size to replicate values in current table */ /* key length of current table */ /* size of current table */ /* sum of root table size and 2nd level table sizes */ + var table []huffmanCode + var len int + var symbol int + var key uint64 + var key_step uint64 + var sub_key uint64 + var sub_key_step uint64 + var step int + var table_bits int + var table_size int + var total_size int + var max_length int = -1 + var bits int + var bits_count int + + assert(root_bits <= reverseBitsMax) + assert(huffmanMaxCodeLength-root_bits <= reverseBitsMax) + + for symbolListGet(symbol_lists, max_length) == 0xFFFF { + max_length-- + } + max_length += huffmanMaxCodeLength + 1 + + table = root_table + table_bits = root_bits + table_size = 1 << uint(table_bits) + total_size = table_size + + /* Fill in the root table. Reduce the table size to if possible, + and create the repetitions by memcpy. */ + if table_bits > max_length { + table_bits = max_length + table_size = 1 << uint(table_bits) + } + + key = 0 + key_step = reverseBitsLowest + bits = 1 + step = 2 + for { + symbol = bits - (huffmanMaxCodeLength + 1) + for bits_count = int(count[bits]); bits_count != 0; bits_count-- { + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(bits), uint16(symbol)) + replicateValue(table[reverseBits8(key):], step, table_size, code) + key += key_step + } + + step <<= 1 + key_step >>= 1 + bits++ + if bits > table_bits { + break + } + } + + /* If root_bits != table_bits then replicate to fill the remaining slots. */ + for total_size != table_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + /* Fill in 2nd level tables and add pointers to root table. */ + key_step = reverseBitsLowest >> uint(root_bits-1) + + sub_key = reverseBitsLowest << 1 + sub_key_step = reverseBitsLowest + len = root_bits + 1 + step = 2 + for ; len <= max_length; len++ { + symbol = len - (huffmanMaxCodeLength + 1) + for ; count[len] != 0; count[len]-- { + if sub_key == reverseBitsLowest<<1 { + table = table[table_size:] + table_bits = nextTableBitSize(count, int(len), root_bits) + table_size = 1 << uint(table_bits) + total_size += table_size + sub_key = reverseBits8(key) + key += key_step + root_table[sub_key] = constructHuffmanCode(byte(table_bits+root_bits), uint16(uint64(uint(-cap(table)+cap(root_table)))-sub_key)) + sub_key = 0 + } + + symbol = int(symbolListGet(symbol_lists, symbol)) + code = constructHuffmanCode(byte(len-root_bits), uint16(symbol)) + replicateValue(table[reverseBits8(sub_key):], step, table_size, code) + sub_key += sub_key_step + } + + step <<= 1 + sub_key_step >>= 1 + } + + return uint32(total_size) +} + +func buildSimpleHuffmanTable(table []huffmanCode, root_bits int, val []uint16, num_symbols uint32) uint32 { + var table_size uint32 = 1 + var goal_size uint32 = 1 << uint(root_bits) + switch num_symbols { + case 0: + table[0] = constructHuffmanCode(0, val[0]) + + case 1: + if val[1] > val[0] { + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(1, val[1]) + } else { + table[0] = constructHuffmanCode(1, val[1]) + table[1] = constructHuffmanCode(1, val[0]) + } + + table_size = 2 + + case 2: + table[0] = constructHuffmanCode(1, val[0]) + table[2] = constructHuffmanCode(1, val[0]) + if val[2] > val[1] { + table[1] = constructHuffmanCode(2, val[1]) + table[3] = constructHuffmanCode(2, val[2]) + } else { + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[1]) + } + + table_size = 4 + + case 3: + var i int + var k int + for i = 0; i < 3; i++ { + for k = i + 1; k < 4; k++ { + if val[k] < val[i] { + var t uint16 = val[k] + val[k] = val[i] + val[i] = t + } + } + } + + table[0] = constructHuffmanCode(2, val[0]) + table[2] = constructHuffmanCode(2, val[1]) + table[1] = constructHuffmanCode(2, val[2]) + table[3] = constructHuffmanCode(2, val[3]) + table_size = 4 + + case 4: + if val[3] < val[2] { + var t uint16 = val[3] + val[3] = val[2] + val[2] = t + } + + table[0] = constructHuffmanCode(1, val[0]) + table[1] = constructHuffmanCode(2, val[1]) + table[2] = constructHuffmanCode(1, val[0]) + table[3] = constructHuffmanCode(3, val[2]) + table[4] = constructHuffmanCode(1, val[0]) + table[5] = constructHuffmanCode(2, val[1]) + table[6] = constructHuffmanCode(1, val[0]) + table[7] = constructHuffmanCode(3, val[3]) + table_size = 8 + } + + for table_size != goal_size { + copy(table[table_size:], table[:uint(table_size)]) + table_size <<= 1 + } + + return goal_size +} diff --git a/vendor/github.com/andybalholm/brotli/literal_cost.go b/vendor/github.com/andybalholm/brotli/literal_cost.go new file mode 100644 index 00000000000..5a9ace94ee0 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/literal_cost.go @@ -0,0 +1,182 @@ +package brotli + +func utf8Position(last uint, c uint, clamp uint) uint { + if c < 128 { + return 0 /* Next one is the 'Byte 1' again. */ + } else if c >= 192 { /* Next one is the 'Byte 2' of utf-8 encoding. */ + return brotli_min_size_t(1, clamp) + } else { + /* Let's decide over the last byte if this ends the sequence. */ + if last < 0xE0 { + return 0 /* Completed two or three byte coding. */ /* Next one is the 'Byte 3' of utf-8 encoding. */ + } else { + return brotli_min_size_t(2, clamp) + } + } +} + +func decideMultiByteStatsLevel(pos uint, len uint, mask uint, data []byte) uint { + var counts = [3]uint{0} /* should be 2, but 1 compresses better. */ + var max_utf8 uint = 1 + var last_c uint = 0 + var i uint + for i = 0; i < len; i++ { + var c uint = uint(data[(pos+i)&mask]) + counts[utf8Position(last_c, c, 2)]++ + last_c = c + } + + if counts[2] < 500 { + max_utf8 = 1 + } + + if counts[1]+counts[2] < 25 { + max_utf8 = 0 + } + + return max_utf8 +} + +func estimateBitCostsForLiteralsUTF8(pos uint, len uint, mask uint, data []byte, cost []float32) { + var max_utf8 uint = decideMultiByteStatsLevel(pos, uint(len), mask, data) + /* Bootstrap histograms. */ + var histogram = [3][256]uint{[256]uint{0}} + var window_half uint = 495 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var in_window_utf8 = [3]uint{0} + /* max_utf8 is 0 (normal ASCII single byte modeling), + 1 (for 2-byte UTF-8 modeling), or 2 (for 3-byte UTF-8 modeling). */ + + var i uint + { + var last_c uint = 0 + var utf8_pos uint = 0 + for i = 0; i < in_window; i++ { + var c uint = uint(data[(pos+i)&mask]) + histogram[utf8_pos][c]++ + in_window_utf8[utf8_pos]++ + utf8_pos = utf8Position(last_c, c, max_utf8) + last_c = c + } + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + if i >= window_half { + var c uint + var last_c uint + if i < window_half+1 { + c = 0 + } else { + c = uint(data[(pos+i-window_half-1)&mask]) + } + if i < window_half+2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-window_half-2)&mask]) + } + /* Remove a byte in the past. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i-window_half)&mask]]-- + in_window_utf8[utf8_pos2]-- + } + + if i+window_half < len { + var c uint = uint(data[(pos+i+window_half-1)&mask]) + var last_c uint = uint(data[(pos+i+window_half-2)&mask]) + /* Add a byte in the future. */ + + var utf8_pos2 uint = utf8Position(last_c, c, max_utf8) + histogram[utf8_pos2][data[(pos+i+window_half)&mask]]++ + in_window_utf8[utf8_pos2]++ + } + { + var c uint + var last_c uint + if i < 1 { + c = 0 + } else { + c = uint(data[(pos+i-1)&mask]) + } + if i < 2 { + last_c = 0 + } else { + last_c = uint(data[(pos+i-2)&mask]) + } + var utf8_pos uint = utf8Position(last_c, c, max_utf8) + var masked_pos uint = (pos + i) & mask + var histo uint = histogram[utf8_pos][data[masked_pos]] + var lit_cost float64 + if histo == 0 { + histo = 1 + } + + lit_cost = fastLog2(in_window_utf8[utf8_pos]) - fastLog2(histo) + lit_cost += 0.02905 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + /* Make the first bytes more expensive -- seems to help, not sure why. + Perhaps because the entropy source is changing its properties + rapidly in the beginning of the file, perhaps because the beginning + of the data is a statistical "anomaly". */ + if i < 2000 { + lit_cost += 0.7 - (float64(2000-i) / 2000.0 * 0.35) + } + + cost[i] = float32(lit_cost) + } + } +} + +func estimateBitCostsForLiterals(pos uint, len uint, mask uint, data []byte, cost []float32) { + if isMostlyUTF8(data, pos, mask, uint(len), kMinUTF8Ratio) { + estimateBitCostsForLiteralsUTF8(pos, uint(len), mask, data, cost) + return + } else { + var histogram = [256]uint{0} + var window_half uint = 2000 + var in_window uint = brotli_min_size_t(window_half, uint(len)) + var i uint + /* Bootstrap histogram. */ + for i = 0; i < in_window; i++ { + histogram[data[(pos+i)&mask]]++ + } + + /* Compute bit costs with sliding window. */ + for i = 0; i < len; i++ { + var histo uint + if i >= window_half { + /* Remove a byte in the past. */ + histogram[data[(pos+i-window_half)&mask]]-- + + in_window-- + } + + if i+window_half < len { + /* Add a byte in the future. */ + histogram[data[(pos+i+window_half)&mask]]++ + + in_window++ + } + + histo = histogram[data[(pos+i)&mask]] + if histo == 0 { + histo = 1 + } + { + var lit_cost float64 = fastLog2(in_window) - fastLog2(histo) + lit_cost += 0.029 + if lit_cost < 1.0 { + lit_cost *= 0.5 + lit_cost += 0.5 + } + + cost[i] = float32(lit_cost) + } + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go new file mode 100644 index 00000000000..507d1cae64c --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go @@ -0,0 +1,34 @@ +package matchfinder + +// An absoluteMatch is like a Match, but it stores indexes into the byte +// stream instead of lengths. +type absoluteMatch struct { + // Start is the index of the first byte. + Start int + + // End is the index of the byte after the last byte + // (so that End - Start = Length). + End int + + // Match is the index of the previous data that matches + // (Start - Match = Distance). + Match int +} + +// A matchEmitter manages the output of matches for a MatchFinder. +type matchEmitter struct { + // Dst is the destination slice that Matches are added to. + Dst []Match + + // NextEmit is the index of the next byte to emit. + NextEmit int +} + +func (e *matchEmitter) emit(m absoluteMatch) { + e.Dst = append(e.Dst, Match{ + Unmatched: m.Start - e.NextEmit, + Length: m.End - m.Start, + Distance: m.Start - m.Match, + }) + e.NextEmit = m.End +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m0.go b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go new file mode 100644 index 00000000000..773b7c49f3f --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go @@ -0,0 +1,169 @@ +package matchfinder + +import ( + "encoding/binary" +) + +// M0 is an implementation of the MatchFinder interface based +// on the algorithm used by snappy, but modified to be more like the algorithm +// used by compression level 0 of the brotli reference implementation. +// +// It has a maximum block size of 65536 bytes. +type M0 struct { + // Lazy turns on "lazy matching," for higher compression but less speed. + Lazy bool + + MaxDistance int + MaxLength int +} + +func (M0) Reset() {} + +const ( + m0HashLen = 5 + + m0TableBits = 14 + m0TableSize = 1 << m0TableBits + m0Shift = 32 - m0TableBits + // m0TableMask is redundant, but helps the compiler eliminate bounds + // checks. + m0TableMask = m0TableSize - 1 +) + +func (m M0) hash(data uint64) uint64 { + hash := (data << (64 - 8*m0HashLen)) * hashMul64 + return hash >> (64 - m0TableBits) +} + +// FindMatches looks for matches in src, appends them to dst, and returns dst. +// src must not be longer than 65536 bytes. +func (m M0) FindMatches(dst []Match, src []byte) []Match { + const inputMargin = 16 - 1 + const minNonLiteralBlockSize = 1 + 1 + inputMargin + + if len(src) < minNonLiteralBlockSize { + dst = append(dst, Match{ + Unmatched: len(src), + }) + return dst + } + if len(src) > 65536 { + panic("block too long") + } + + var table [m0TableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := m.hash(binary.LittleEndian.Uint64(src[s:])) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&m0TableMask]) + table[nextHash&m0TableMask] = uint16(s) + nextHash = m.hash(binary.LittleEndian.Uint64(src[nextS:])) + if m.MaxDistance != 0 && s-candidate > m.MaxDistance { + continue + } + if binary.LittleEndian.Uint32(src[s:]) == binary.LittleEndian.Uint32(src[candidate:]) { + break + } + } + + // Invariant: we have a 4-byte match at s. + base := s + s = extendMatch(src, candidate+4, s+4) + + origBase := base + if m.Lazy && base+1 < sLimit { + newBase := base + 1 + h := m.hash(binary.LittleEndian.Uint64(src[newBase:])) + newCandidate := int(table[h&m0TableMask]) + table[h&m0TableMask] = uint16(newBase) + okDistance := true + if m.MaxDistance != 0 && newBase-newCandidate > m.MaxDistance { + okDistance = false + } + if okDistance && binary.LittleEndian.Uint32(src[newBase:]) == binary.LittleEndian.Uint32(src[newCandidate:]) { + newS := extendMatch(src, newCandidate+4, newBase+4) + if newS-newBase > s-base+1 { + s = newS + base = newBase + candidate = newCandidate + } + } + } + + if m.MaxLength != 0 && s-base > m.MaxLength { + s = base + m.MaxLength + } + dst = append(dst, Match{ + Unmatched: base - nextEmit, + Length: s - base, + Distance: base - candidate, + }) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if m.Lazy { + // If lazy matching is enabled, we update the hash table for + // every byte in the match. + for i := origBase + 2; i < s-1; i++ { + x := binary.LittleEndian.Uint64(src[i:]) + table[m.hash(x)&m0TableMask] = uint16(i) + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := binary.LittleEndian.Uint64(src[s-1:]) + prevHash := m.hash(x >> 0) + table[prevHash&m0TableMask] = uint16(s - 1) + nextHash = m.hash(x >> 8) + } + +emitRemainder: + if nextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - nextEmit, + }) + } + return dst +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go new file mode 100644 index 00000000000..818947255df --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go @@ -0,0 +1,308 @@ +package matchfinder + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +// M4 is an implementation of the MatchFinder +// interface that uses a hash table to find matches, +// optional match chains, +// and the advanced parsing technique from +// https://fastcompression.blogspot.com/2011/12/advanced-parsing-strategies.html. +type M4 struct { + // MaxDistance is the maximum distance (in bytes) to look back for + // a match. The default is 65535. + MaxDistance int + + // MinLength is the length of the shortest match to return. + // The default is 4. + MinLength int + + // HashLen is the number of bytes to use to calculate the hashes. + // The maximum is 8 and the default is 6. + HashLen int + + // TableBits is the number of bits in the hash table indexes. + // The default is 17 (128K entries). + TableBits int + + // ChainLength is how many entries to search on the "match chain" of older + // locations with the same hash as the current location. + ChainLength int + + // DistanceBitCost is used when comparing two matches to see + // which is better. The comparison is primarily based on the length + // of the matches, but it can also take the distance into account, + // in terms of the number of bits needed to represent the distance. + // One byte of length is given a score of 256, so 32 (256/8) would + // be a reasonable first guess for the value of one bit. + // (The default is 0, which bases the comparison solely on length.) + DistanceBitCost int + + table []uint32 + chain []uint16 + + history []byte +} + +func (q *M4) Reset() { + for i := range q.table { + q.table[i] = 0 + } + q.history = q.history[:0] + q.chain = q.chain[:0] +} + +func (q *M4) score(m absoluteMatch) int { + return (m.End-m.Start)*256 + (bits.LeadingZeros32(uint32(m.Start-m.Match))-32)*q.DistanceBitCost +} + +func (q *M4) FindMatches(dst []Match, src []byte) []Match { + if q.MaxDistance == 0 { + q.MaxDistance = 65535 + } + if q.MinLength == 0 { + q.MinLength = 4 + } + if q.HashLen == 0 { + q.HashLen = 6 + } + if q.TableBits == 0 { + q.TableBits = 17 + } + if len(q.table) < 1< q.MaxDistance*2 { + // Trim down the history buffer. + delta := len(q.history) - q.MaxDistance + copy(q.history, q.history[delta:]) + q.history = q.history[:q.MaxDistance] + if q.ChainLength > 0 { + q.chain = q.chain[:q.MaxDistance] + } + + for i, v := range q.table { + newV := int(v) - delta + if newV < 0 { + newV = 0 + } + q.table[i] = uint32(newV) + } + } + + // Append src to the history buffer. + e.NextEmit = len(q.history) + q.history = append(q.history, src...) + if q.ChainLength > 0 { + q.chain = append(q.chain, make([]uint16, len(src))...) + } + src = q.history + + // matches stores the matches that have been found but not emitted, + // in reverse order. (matches[0] is the most recent one.) + var matches [3]absoluteMatch + for i := e.NextEmit; i < len(src)-7; i++ { + if matches[0] != (absoluteMatch{}) && i >= matches[0].End { + // We have found some matches, and we're far enough along that we probably + // won't find overlapping matches, so we might as well emit them. + if matches[1] != (absoluteMatch{}) { + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } + } + e.emit(matches[0]) + matches = [3]absoluteMatch{} + } + + // Calculate and store the hash. + h := ((binary.LittleEndian.Uint64(src[i:]) & (1<<(8*q.HashLen) - 1)) * hashMul64) >> (64 - q.TableBits) + candidate := int(q.table[h]) + q.table[h] = uint32(i) + if q.ChainLength > 0 && candidate != 0 { + delta := i - candidate + if delta < 1<<16 { + q.chain[i] = uint16(delta) + } + } + + if i < matches[0].End && i != matches[0].End+2-q.HashLen { + continue + } + if candidate == 0 || i-candidate > q.MaxDistance { + continue + } + + // Look for a match. + var currentMatch absoluteMatch + + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > 0 { + currentMatch = m + } + } + + for j := 0; j < q.ChainLength; j++ { + delta := q.chain[candidate] + if delta == 0 { + break + } + candidate -= int(delta) + if candidate <= 0 || i-candidate > q.MaxDistance { + break + } + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { + currentMatch = m + } + } + } + + if currentMatch.End-currentMatch.Start < q.MinLength { + continue + } + + overlapPenalty := 0 + if matches[0] != (absoluteMatch{}) { + overlapPenalty = 275 + if currentMatch.Start <= matches[1].End { + // This match would completely replace the previous match, + // so there is no penalty for overlap. + overlapPenalty = 0 + } + } + + if q.score(currentMatch) <= q.score(matches[0])+overlapPenalty { + continue + } + + matches = [3]absoluteMatch{ + currentMatch, + matches[0], + matches[1], + } + + if matches[2] == (absoluteMatch{}) { + continue + } + + // We have three matches, so it's time to emit one and/or eliminate one. + switch { + case matches[0].Start < matches[2].End: + // The first and third matches overlap; discard the one in between. + matches = [3]absoluteMatch{ + matches[0], + matches[2], + absoluteMatch{}, + } + + case matches[0].Start < matches[2].End+q.MinLength: + // The first and third matches don't overlap, but there's no room for + // another match between them. Emit the first match and discard the second. + e.emit(matches[2]) + matches = [3]absoluteMatch{ + matches[0], + absoluteMatch{}, + absoluteMatch{}, + } + + default: + // Emit the first match, shortening it if necessary to avoid overlap with the second. + if matches[2].End > matches[1].Start { + matches[2].End = matches[1].Start + } + if matches[2].End-matches[2].Start >= q.MinLength && q.score(matches[2]) > 0 { + e.emit(matches[2]) + } + matches[2] = absoluteMatch{} + } + } + + // We've found all the matches now; emit the remaining ones. + if matches[1] != (absoluteMatch{}) { + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } + } + if matches[0] != (absoluteMatch{}) { + e.emit(matches[0]) + } + + dst = e.Dst + if e.NextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - e.NextEmit, + }) + } + + return dst +} + +const hashMul64 = 0x1E35A7BD1E35A7BD + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + switch runtime.GOARCH { + case "amd64": + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + for j+8 < len(src) { + iBytes := binary.LittleEndian.Uint64(src[i:]) + jBytes := binary.LittleEndian.Uint64(src[j:]) + if iBytes != jBytes { + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + return j + bits.TrailingZeros64(iBytes^jBytes)>>3 + } + i, j = i+8, j+8 + } + case "386": + // On a 32-bit CPU, we do it 4 bytes at a time. + for j+4 < len(src) { + iBytes := binary.LittleEndian.Uint32(src[i:]) + jBytes := binary.LittleEndian.Uint32(src[j:]) + if iBytes != jBytes { + return j + bits.TrailingZeros32(iBytes^jBytes)>>3 + } + i, j = i+4, j+4 + } + } + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +// Given a 4-byte match at src[start] and src[candidate], extendMatch2 extends it +// upward as far as possible, and downward no farther than to min. +func extendMatch2(src []byte, start, candidate, min int) absoluteMatch { + end := extendMatch(src, candidate+4, start+4) + for start > min && candidate > 0 && src[start-1] == src[candidate-1] { + start-- + candidate-- + } + return absoluteMatch{ + Start: start, + End: end, + Match: candidate, + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go new file mode 100644 index 00000000000..f6bcfdb39cd --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go @@ -0,0 +1,103 @@ +// The matchfinder package defines reusable components for data compression. +// +// Many compression libraries have two main parts: +// - Something that looks for repeated sequences of bytes +// - An encoder for the compressed data format (often an entropy coder) +// +// Although these are logically two separate steps, the implementations are +// usually closely tied together. You can't use flate's matcher with snappy's +// encoder, for example. This package defines interfaces and an intermediate +// representation to allow mixing and matching compression components. +package matchfinder + +import "io" + +// A Match is the basic unit of LZ77 compression. +type Match struct { + Unmatched int // the number of unmatched bytes since the previous match + Length int // the number of bytes in the matched string; it may be 0 at the end of the input + Distance int // how far back in the stream to copy from +} + +// A MatchFinder performs the LZ77 stage of compression, looking for matches. +type MatchFinder interface { + // FindMatches looks for matches in src, appends them to dst, and returns dst. + FindMatches(dst []Match, src []byte) []Match + + // Reset clears any internal state, preparing the MatchFinder to be used with + // a new stream. + Reset() +} + +// An Encoder encodes the data in its final format. +type Encoder interface { + // Encode appends the encoded format of src to dst, using the match + // information from matches. + Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte + + // Reset clears any internal state, preparing the Encoder to be used with + // a new stream. + Reset() +} + +// A Writer uses MatchFinder and Encoder to write compressed data to Dest. +type Writer struct { + Dest io.Writer + MatchFinder MatchFinder + Encoder Encoder + + // BlockSize is the number of bytes to compress at a time. If it is zero, + // each Write operation will be treated as one block. + BlockSize int + + err error + inBuf []byte + outBuf []byte + matches []Match +} + +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + + if w.BlockSize == 0 { + return w.writeBlock(p, false) + } + + w.inBuf = append(w.inBuf, p...) + var pos int + for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize { + w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false) + } + if pos > 0 { + n := copy(w.inBuf, w.inBuf[pos:]) + w.inBuf = w.inBuf[:n] + } + + return len(p), w.err +} + +func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) { + w.outBuf = w.outBuf[:0] + w.matches = w.MatchFinder.FindMatches(w.matches[:0], p) + w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock) + _, w.err = w.Dest.Write(w.outBuf) + return len(p), w.err +} + +func (w *Writer) Close() error { + w.writeBlock(w.inBuf, true) + w.inBuf = w.inBuf[:0] + return w.err +} + +func (w *Writer) Reset(newDest io.Writer) { + w.MatchFinder.Reset() + w.Encoder.Reset() + w.err = nil + w.inBuf = w.inBuf[:0] + w.outBuf = w.outBuf[:0] + w.matches = w.matches[:0] + w.Dest = newDest +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go new file mode 100644 index 00000000000..75ecc5908b9 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go @@ -0,0 +1,53 @@ +package matchfinder + +import "fmt" + +// A TextEncoder is an Encoder that produces a human-readable representation of +// the LZ77 compression. Matches are replaced with symbols. +type TextEncoder struct{} + +func (t TextEncoder) Reset() {} + +func (t TextEncoder) Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte { + pos := 0 + for _, m := range matches { + if m.Unmatched > 0 { + dst = append(dst, src[pos:pos+m.Unmatched]...) + pos += m.Unmatched + } + if m.Length > 0 { + dst = append(dst, []byte(fmt.Sprintf("<%d,%d>", m.Length, m.Distance))...) + pos += m.Length + } + } + if pos < len(src) { + dst = append(dst, src[pos:]...) + } + return dst +} + +// A NoMatchFinder implements MatchFinder, but doesn't find any matches. +// It can be used to implement the equivalent of the standard library flate package's +// HuffmanOnly setting. +type NoMatchFinder struct{} + +func (n NoMatchFinder) Reset() {} + +func (n NoMatchFinder) FindMatches(dst []Match, src []byte) []Match { + return append(dst, Match{ + Unmatched: len(src), + }) +} + +// AutoReset wraps a MatchFinder that can return references to data in previous +// blocks, and calls Reset before each block. It is useful for (e.g.) using a +// snappy Encoder with a MatchFinder designed for flate. (Snappy doesn't +// support references between blocks.) +type AutoReset struct { + MatchFinder +} + +func (a AutoReset) FindMatches(dst []Match, src []byte) []Match { + a.Reset() + return a.MatchFinder.FindMatches(dst, src) +} diff --git a/vendor/github.com/andybalholm/brotli/memory.go b/vendor/github.com/andybalholm/brotli/memory.go new file mode 100644 index 00000000000..a07c7050a07 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/memory.go @@ -0,0 +1,66 @@ +package brotli + +/* Copyright 2016 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* +Dynamically grows array capacity to at least the requested size +T: data type +A: array +C: capacity +R: requested size +*/ +func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) { + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + var new_array []byte = make([]byte, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + + *c = new_size + } +} + +func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) { + var new_array []uint32 + if *c < r { + var new_size uint = *c + if new_size == 0 { + new_size = r + } + + for new_size < r { + new_size *= 2 + } + + if cap(*a) < int(new_size) { + new_array = make([]uint32, new_size) + if *c != 0 { + copy(new_array, (*a)[:*c]) + } + + *a = new_array + } else { + *a = (*a)[:new_size] + } + *c = new_size + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock.go b/vendor/github.com/andybalholm/brotli/metablock.go new file mode 100644 index 00000000000..3014df8cdf1 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock.go @@ -0,0 +1,574 @@ +package brotli + +import ( + "sync" +) + +/* Copyright 2014 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Algorithms for distributing the literals and commands of a metablock between + block types and contexts. */ + +type metaBlockSplit struct { + literal_split blockSplit + command_split blockSplit + distance_split blockSplit + literal_context_map []uint32 + literal_context_map_size uint + distance_context_map []uint32 + distance_context_map_size uint + literal_histograms []histogramLiteral + literal_histograms_size uint + command_histograms []histogramCommand + command_histograms_size uint + distance_histograms []histogramDistance + distance_histograms_size uint +} + +var metaBlockPool sync.Pool + +func getMetaBlockSplit() *metaBlockSplit { + mb, _ := metaBlockPool.Get().(*metaBlockSplit) + + if mb == nil { + mb = &metaBlockSplit{} + } else { + initBlockSplit(&mb.literal_split) + initBlockSplit(&mb.command_split) + initBlockSplit(&mb.distance_split) + mb.literal_context_map = mb.literal_context_map[:0] + mb.literal_context_map_size = 0 + mb.distance_context_map = mb.distance_context_map[:0] + mb.distance_context_map_size = 0 + mb.literal_histograms = mb.literal_histograms[:0] + mb.command_histograms = mb.command_histograms[:0] + mb.distance_histograms = mb.distance_histograms[:0] + } + return mb +} + +func freeMetaBlockSplit(mb *metaBlockSplit) { + metaBlockPool.Put(mb) +} + +func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) { + var dist_params *distanceParams = ¶ms.dist + var alphabet_size uint32 + var max_distance uint32 + + dist_params.distance_postfix_bits = npostfix + dist_params.num_direct_distance_codes = ndirect + + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), maxDistanceBits)) + max_distance = ndirect + (1 << (maxDistanceBits + npostfix + 2)) - (1 << (npostfix + 2)) + + if params.large_window { + var bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} + var postfix uint32 = 1 << npostfix + alphabet_size = uint32(distanceAlphabetSize(uint(npostfix), uint(ndirect), largeMaxDistanceBits)) + + /* The maximum distance is set so that no distance symbol used can encode + a distance larger than BROTLI_MAX_ALLOWED_DISTANCE with all + its extra bits set. */ + if ndirect < bound[npostfix] { + max_distance = maxAllowedDistance - (bound[npostfix] - ndirect) + } else if ndirect >= bound[npostfix]+postfix { + max_distance = (3 << 29) - 4 + (ndirect - bound[npostfix]) + } else { + max_distance = maxAllowedDistance + } + } + + dist_params.alphabet_size = alphabet_size + dist_params.max_distance = uint(max_distance) +} + +func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) { + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + return + } + + for i := range cmds { + var cmd *command = &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) + } + } +} + +func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool { + var equal_params bool = false + var dist_prefix uint16 + var dist_extra uint32 + var extra_bits float64 = 0.0 + var histo histogramDistance + histogramClearDistance(&histo) + + if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes { + equal_params = true + } + + for i := range cmds { + cmd := &cmds[i] + if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { + if equal_params { + dist_prefix = cmd.dist_prefix_ + } else { + var distance uint32 = commandRestoreDistanceCode(cmd, orig_params) + if distance > uint32(new_params.max_distance) { + return false + } + + prefixEncodeCopyDistance(uint(distance), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &dist_prefix, &dist_extra) + } + + histogramAddDistance(&histo, uint(dist_prefix)&0x3FF) + extra_bits += float64(dist_prefix >> 10) + } + } + + *cost = populationCostDistance(&histo) + extra_bits + return true +} + +var buildMetaBlock_kMaxNumberOfHistograms uint = 256 + +func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) { + var distance_histograms []histogramDistance + var literal_histograms []histogramLiteral + var literal_context_modes []int = nil + var literal_histograms_size uint + var distance_histograms_size uint + var i uint + var literal_context_multiplier uint = 1 + var npostfix uint32 + var ndirect_msb uint32 = 0 + var check_orig bool = true + var best_dist_cost float64 = 1e99 + var orig_params encoderParams = *params + /* Histogram ids need to fit in one byte. */ + + var new_params encoderParams = *params + + for npostfix = 0; npostfix <= maxNpostfix; npostfix++ { + for ; ndirect_msb < 16; ndirect_msb++ { + var ndirect uint32 = ndirect_msb << npostfix + var skip bool + var dist_cost float64 + initDistanceParams(&new_params, npostfix, ndirect) + if npostfix == orig_params.dist.distance_postfix_bits && ndirect == orig_params.dist.num_direct_distance_codes { + check_orig = false + } + + skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost) + if skip || (dist_cost > best_dist_cost) { + break + } + + best_dist_cost = dist_cost + params.dist = new_params.dist + } + + if ndirect_msb > 0 { + ndirect_msb-- + } + ndirect_msb /= 2 + } + + if check_orig { + var dist_cost float64 + computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost) + if dist_cost < best_dist_cost { + /* NB: currently unused; uncomment when more param tuning is added. */ + /* best_dist_cost = dist_cost; */ + params.dist = orig_params.dist + } + } + + recomputeDistancePrefixes(cmds, &orig_params.dist, ¶ms.dist) + + splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split) + + if !params.disable_literal_context_modeling { + literal_context_multiplier = 1 << literalContextBits + literal_context_modes = make([]int, (mb.literal_split.num_types)) + for i = 0; i < mb.literal_split.num_types; i++ { + literal_context_modes[i] = literal_context_mode + } + } + + literal_histograms_size = mb.literal_split.num_types * literal_context_multiplier + literal_histograms = make([]histogramLiteral, literal_histograms_size) + clearHistogramsLiteral(literal_histograms, literal_histograms_size) + + distance_histograms_size = mb.distance_split.num_types << distanceContextBits + distance_histograms = make([]histogramDistance, distance_histograms_size) + clearHistogramsDistance(distance_histograms, distance_histograms_size) + + mb.command_histograms_size = mb.command_split.num_types + if cap(mb.command_histograms) < int(mb.command_histograms_size) { + mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size)) + } else { + mb.command_histograms = mb.command_histograms[:mb.command_histograms_size] + } + clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size) + + buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms) + literal_context_modes = nil + + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + mb.literal_histograms_size = mb.literal_context_map_size + if cap(mb.literal_histograms) < int(mb.literal_histograms_size) { + mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size)) + } else { + mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size] + } + + clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map) + literal_histograms = nil + + if params.disable_literal_context_modeling { + /* Distribute assignment to all contexts. */ + for i = mb.literal_split.num_types; i != 0; { + var j uint = 0 + i-- + for ; j < 1< 0 { + var entropy [maxStaticContexts]float64 + var combined_histo []histogramLiteral = make([]histogramLiteral, (2 * num_contexts)) + var combined_entropy [2 * maxStaticContexts]float64 + var diff = [2]float64{0.0} + /* Try merging the set of histograms for the current block type with the + respective set of histograms for the last and second last block types. + Decide over the split based on the total reduction of entropy across + all contexts. */ + + var i uint + for i = 0; i < num_contexts; i++ { + var curr_histo_ix uint = self.curr_histogram_ix_ + i + var j uint + entropy[i] = bitsEntropy(histograms[curr_histo_ix].data_[:], self.alphabet_size_) + for j = 0; j < 2; j++ { + var jx uint = j*num_contexts + i + var last_histogram_ix uint = self.last_histogram_ix_[j] + i + combined_histo[jx] = histograms[curr_histo_ix] + histogramAddHistogramLiteral(&combined_histo[jx], &histograms[last_histogram_ix]) + combined_entropy[jx] = bitsEntropy(combined_histo[jx].data_[0:], self.alphabet_size_) + diff[j] += combined_entropy[jx] - entropy[i] - last_entropy[jx] + } + } + + if split.num_types < self.max_block_types_ && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = split.num_types * num_contexts + for i = 0; i < num_contexts; i++ { + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = entropy[i] + } + + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_ += num_contexts + if self.curr_histogram_ix_ < *self.histograms_size_ { + clearHistogramsLiteral(self.histograms_[self.curr_histogram_ix_:], self.num_contexts_) + } + + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[num_contexts+i] + last_entropy[num_contexts+i] = last_entropy[i] + last_entropy[i] = combined_entropy[num_contexts+i] + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.num_blocks_++ + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + for i = 0; i < num_contexts; i++ { + histograms[self.last_histogram_ix_[0]+i] = combined_histo[i] + last_entropy[i] = combined_entropy[i] + if split.num_types == 1 { + last_entropy[num_contexts+i] = last_entropy[i] + } + + histogramClearLiteral(&histograms[self.curr_histogram_ix_+i]) + } + + self.block_size_ = 0 + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + + combined_histo = nil + } + + if is_final { + *self.histograms_size_ = split.num_types * num_contexts + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current block type and context. When the + current block reaches the target size, decides on merging the block. */ +func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + contextBlockSplitterFinishBlock(self, false) /* is_final = */ + } +} + +func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) { + var i uint + mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits + if cap(mb.literal_context_map) < int(mb.literal_context_map_size) { + mb.literal_context_map = make([]uint32, (mb.literal_context_map_size)) + } else { + mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size] + } + + for i = 0; i < mb.literal_split.num_types; i++ { + var offset uint32 = uint32(i * num_contexts) + var j uint + for j = 0; j < 1<= 128 { + blockSplitterAddSymbolDistance(&dist_blocks, uint(cmd.dist_prefix_)&0x3FF) + } + } + } + + if num_contexts == 1 { + blockSplitterFinishBlockLiteral(&lit_blocks.plain, true) /* is_final = */ + } else { + contextBlockSplitterFinishBlock(&lit_blocks.ctx, true) /* is_final = */ + } + + blockSplitterFinishBlockCommand(&cmd_blocks, true) /* is_final = */ + blockSplitterFinishBlockDistance(&dist_blocks, true) /* is_final = */ + + if num_contexts > 1 { + mapStaticContexts(num_contexts, static_context_map, mb) + } +} + +func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) { + if num_contexts == 1 { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb) + } else { + buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb) + } +} + +func optimizeHistograms(num_distance_codes uint32, mb *metaBlockSplit) { + var good_for_rle [numCommandSymbols]byte + var i uint + for i = 0; i < mb.literal_histograms_size; i++ { + optimizeHuffmanCountsForRLE(256, mb.literal_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.command_histograms_size; i++ { + optimizeHuffmanCountsForRLE(numCommandSymbols, mb.command_histograms[i].data_[:], good_for_rle[:]) + } + + for i = 0; i < mb.distance_histograms_size; i++ { + optimizeHuffmanCountsForRLE(uint(num_distance_codes), mb.distance_histograms[i].data_[:], good_for_rle[:]) + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_command.go b/vendor/github.com/andybalholm/brotli/metablock_command.go new file mode 100644 index 00000000000..14c7b77135d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_command.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterCommand struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramCommand + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramCommand, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramCommand, (*histograms_size)) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearCommand(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramCommand = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramCommand + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramCommand(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearCommand(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) { + histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockCommand(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_distance.go b/vendor/github.com/andybalholm/brotli/metablock_distance.go new file mode 100644 index 00000000000..5110a810e96 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_distance.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterDistance struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramDistance + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramDistance, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramDistance, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearDistance(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramDistance = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramDistance + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramDistance(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearDistance(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) { + histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockDistance(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/metablock_literal.go b/vendor/github.com/andybalholm/brotli/metablock_literal.go new file mode 100644 index 00000000000..307f8da88f4 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/metablock_literal.go @@ -0,0 +1,165 @@ +package brotli + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Greedy block splitter for one block category (literal, command or distance). + */ +type blockSplitterLiteral struct { + alphabet_size_ uint + min_block_size_ uint + split_threshold_ float64 + num_blocks_ uint + split_ *blockSplit + histograms_ []histogramLiteral + histograms_size_ *uint + target_block_size_ uint + block_size_ uint + curr_histogram_ix_ uint + last_histogram_ix_ [2]uint + last_entropy_ [2]float64 + merge_last_count_ uint +} + +func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, min_block_size uint, split_threshold float64, num_symbols uint, split *blockSplit, histograms *[]histogramLiteral, histograms_size *uint) { + var max_num_blocks uint = num_symbols/min_block_size + 1 + var max_num_types uint = brotli_min_size_t(max_num_blocks, maxNumberOfBlockTypes+1) + /* We have to allocate one more histogram than the maximum number of block + types for the current histogram when the meta-block is too big. */ + self.alphabet_size_ = alphabet_size + + self.min_block_size_ = min_block_size + self.split_threshold_ = split_threshold + self.num_blocks_ = 0 + self.split_ = split + self.histograms_size_ = histograms_size + self.target_block_size_ = min_block_size + self.block_size_ = 0 + self.curr_histogram_ix_ = 0 + self.merge_last_count_ = 0 + brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks) + brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks) + self.split_.num_blocks = max_num_blocks + *histograms_size = max_num_types + if histograms == nil || cap(*histograms) < int(*histograms_size) { + *histograms = make([]histogramLiteral, *histograms_size) + } else { + *histograms = (*histograms)[:*histograms_size] + } + self.histograms_ = *histograms + + /* Clear only current histogram. */ + histogramClearLiteral(&self.histograms_[0]) + + self.last_histogram_ix_[1] = 0 + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] +} + +/* Does either of three things: + (1) emits the current block with a new block type; + (2) emits the current block with the type of the second last block; + (3) merges the current block with the last block. */ +func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) { + var split *blockSplit = self.split_ + var last_entropy []float64 = self.last_entropy_[:] + var histograms []histogramLiteral = self.histograms_ + self.block_size_ = brotli_max_size_t(self.block_size_, self.min_block_size_) + if self.num_blocks_ == 0 { + /* Create first block. */ + split.lengths[0] = uint32(self.block_size_) + + split.types[0] = 0 + last_entropy[0] = bitsEntropy(histograms[0].data_[:], self.alphabet_size_) + last_entropy[1] = last_entropy[0] + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + } else if self.block_size_ > 0 { + var entropy float64 = bitsEntropy(histograms[self.curr_histogram_ix_].data_[:], self.alphabet_size_) + var combined_histo [2]histogramLiteral + var combined_entropy [2]float64 + var diff [2]float64 + var j uint + for j = 0; j < 2; j++ { + var last_histogram_ix uint = self.last_histogram_ix_[j] + combined_histo[j] = histograms[self.curr_histogram_ix_] + histogramAddHistogramLiteral(&combined_histo[j], &histograms[last_histogram_ix]) + combined_entropy[j] = bitsEntropy(combined_histo[j].data_[0:], self.alphabet_size_) + diff[j] = combined_entropy[j] - entropy - last_entropy[j] + } + + if split.num_types < maxNumberOfBlockTypes && diff[0] > self.split_threshold_ && diff[1] > self.split_threshold_ { + /* Create new block. */ + split.lengths[self.num_blocks_] = uint32(self.block_size_) + + split.types[self.num_blocks_] = byte(split.num_types) + self.last_histogram_ix_[1] = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = uint(byte(split.num_types)) + last_entropy[1] = last_entropy[0] + last_entropy[0] = entropy + self.num_blocks_++ + split.num_types++ + self.curr_histogram_ix_++ + if self.curr_histogram_ix_ < *self.histograms_size_ { + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + } + self.block_size_ = 0 + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else if diff[1] < diff[0]-20.0 { + split.lengths[self.num_blocks_] = uint32(self.block_size_) + split.types[self.num_blocks_] = split.types[self.num_blocks_-2] + /* Combine this block with second last block. */ + + var tmp uint = self.last_histogram_ix_[0] + self.last_histogram_ix_[0] = self.last_histogram_ix_[1] + self.last_histogram_ix_[1] = tmp + histograms[self.last_histogram_ix_[0]] = combined_histo[1] + last_entropy[1] = last_entropy[0] + last_entropy[0] = combined_entropy[1] + self.num_blocks_++ + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_ = 0 + self.target_block_size_ = self.min_block_size_ + } else { + /* Combine this block with last block. */ + split.lengths[self.num_blocks_-1] += uint32(self.block_size_) + + histograms[self.last_histogram_ix_[0]] = combined_histo[0] + last_entropy[0] = combined_entropy[0] + if split.num_types == 1 { + last_entropy[1] = last_entropy[0] + } + + self.block_size_ = 0 + histogramClearLiteral(&histograms[self.curr_histogram_ix_]) + self.merge_last_count_++ + if self.merge_last_count_ > 1 { + self.target_block_size_ += self.min_block_size_ + } + } + } + + if is_final { + *self.histograms_size_ = split.num_types + split.num_blocks = self.num_blocks_ + } +} + +/* Adds the next symbol to the current histogram. When the current histogram + reaches the target size, decides on merging the block. */ +func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) { + histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol) + self.block_size_++ + if self.block_size_ == self.target_block_size_ { + blockSplitterFinishBlockLiteral(self, false) /* is_final = */ + } +} diff --git a/vendor/github.com/andybalholm/brotli/params.go b/vendor/github.com/andybalholm/brotli/params.go new file mode 100644 index 00000000000..0a4c6875212 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/params.go @@ -0,0 +1,37 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Parameters for the Brotli encoder with chosen quality levels. */ +type hasherParams struct { + type_ int + bucket_bits int + block_bits int + hash_len int + num_last_distances_to_check int +} + +type distanceParams struct { + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + alphabet_size uint32 + max_distance uint +} + +/* Encoding parameters */ +type encoderParams struct { + mode int + quality int + lgwin uint + lgblock int + size_hint uint + disable_literal_context_modeling bool + large_window bool + hasher hasherParams + dist distanceParams + dictionary encoderDictionary +} diff --git a/vendor/github.com/andybalholm/brotli/platform.go b/vendor/github.com/andybalholm/brotli/platform.go new file mode 100644 index 00000000000..4ebfb1528ba --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/platform.go @@ -0,0 +1,103 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +func brotli_min_double(a float64, b float64) float64 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_double(a float64, b float64) float64 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_float(a float32, b float32) float32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_float(a float32, b float32) float32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_int(a int, b int) int { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_int(a int, b int) int { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_size_t(a uint, b uint) uint { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_size_t(a uint, b uint) uint { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint32_t(a uint32, b uint32) uint32 { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint32_t(a uint32, b uint32) uint32 { + if a > b { + return a + } else { + return b + } +} + +func brotli_min_uint8_t(a byte, b byte) byte { + if a < b { + return a + } else { + return b + } +} + +func brotli_max_uint8_t(a byte, b byte) byte { + if a > b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix.go b/vendor/github.com/andybalholm/brotli/prefix.go new file mode 100644 index 00000000000..484df0d61ec --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix.go @@ -0,0 +1,30 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Functions for encoding of integers into prefix codes the amount of extra + bits, and the actual values of the extra bits. */ + +/* Here distance_code is an intermediate code, i.e. one of the special codes or + the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */ +func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) { + if distance_code < numDistanceShortCodes+num_direct_codes { + *code = uint16(distance_code) + *extra_bits = 0 + return + } else { + var dist uint = (uint(1) << (postfix_bits + 2)) + (distance_code - numDistanceShortCodes - num_direct_codes) + var bucket uint = uint(log2FloorNonZero(dist) - 1) + var postfix_mask uint = (1 << postfix_bits) - 1 + var postfix uint = dist & postfix_mask + var prefix uint = (dist >> bucket) & 1 + var offset uint = (2 + prefix) << bucket + var nbits uint = bucket - postfix_bits + *code = uint16(nbits<<10 | (numDistanceShortCodes + num_direct_codes + ((2*(nbits-1) + prefix) << postfix_bits) + postfix)) + *extra_bits = uint32((dist - offset) >> postfix_bits) + } +} diff --git a/vendor/github.com/andybalholm/brotli/prefix_dec.go b/vendor/github.com/andybalholm/brotli/prefix_dec.go new file mode 100644 index 00000000000..183f0d53fed --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/prefix_dec.go @@ -0,0 +1,723 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +type cmdLutElement struct { + insert_len_extra_bits byte + copy_len_extra_bits byte + distance_code int8 + context byte + insert_len_offset uint16 + copy_len_offset uint16 +} + +var kCmdLut = [numCommandSymbols]cmdLutElement{ + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, 0, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, 0, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, 0, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, 0, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, 0, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, 0, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, 0, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, 0, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, 0, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, 0, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, 0, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, 0, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, 0, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, 0, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, 0, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, 0, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0000, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0000, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0000, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0000, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0001, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0001, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0001, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0001, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0002, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0002, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0002, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0002, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0003, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0003, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0003, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0003, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0004, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0004, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0004, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0004, 0x0009}, + cmdLutElement{0x00, 0x00, -1, 0x00, 0x0005, 0x0002}, + cmdLutElement{0x00, 0x00, -1, 0x01, 0x0005, 0x0003}, + cmdLutElement{0x00, 0x00, -1, 0x02, 0x0005, 0x0004}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0005}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0006}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0007}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0008}, + cmdLutElement{0x00, 0x00, -1, 0x03, 0x0005, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0006, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0006, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0006, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0006, 0x0009}, + cmdLutElement{0x01, 0x00, -1, 0x00, 0x0008, 0x0002}, + cmdLutElement{0x01, 0x00, -1, 0x01, 0x0008, 0x0003}, + cmdLutElement{0x01, 0x00, -1, 0x02, 0x0008, 0x0004}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0005}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0006}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0007}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0008}, + cmdLutElement{0x01, 0x00, -1, 0x03, 0x0008, 0x0009}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0000, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0000, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0000, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0000, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0001, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0001, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0001, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0001, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0002, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0002, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0002, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0002, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0003, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0003, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0003, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0003, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0004, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0004, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0004, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0004, 0x0036}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000a}, + cmdLutElement{0x00, 0x01, -1, 0x03, 0x0005, 0x000c}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x000e}, + cmdLutElement{0x00, 0x02, -1, 0x03, 0x0005, 0x0012}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x0016}, + cmdLutElement{0x00, 0x03, -1, 0x03, 0x0005, 0x001e}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0026}, + cmdLutElement{0x00, 0x04, -1, 0x03, 0x0005, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0006, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0006, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0006, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0006, 0x0036}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000a}, + cmdLutElement{0x01, 0x01, -1, 0x03, 0x0008, 0x000c}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x000e}, + cmdLutElement{0x01, 0x02, -1, 0x03, 0x0008, 0x0012}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x0016}, + cmdLutElement{0x01, 0x03, -1, 0x03, 0x0008, 0x001e}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0026}, + cmdLutElement{0x01, 0x04, -1, 0x03, 0x0008, 0x0036}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000a, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000a, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000a, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000a, 0x0009}, + cmdLutElement{0x02, 0x00, -1, 0x00, 0x000e, 0x0002}, + cmdLutElement{0x02, 0x00, -1, 0x01, 0x000e, 0x0003}, + cmdLutElement{0x02, 0x00, -1, 0x02, 0x000e, 0x0004}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0005}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0006}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0007}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0008}, + cmdLutElement{0x02, 0x00, -1, 0x03, 0x000e, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x0012, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x0012, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x0012, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x0012, 0x0009}, + cmdLutElement{0x03, 0x00, -1, 0x00, 0x001a, 0x0002}, + cmdLutElement{0x03, 0x00, -1, 0x01, 0x001a, 0x0003}, + cmdLutElement{0x03, 0x00, -1, 0x02, 0x001a, 0x0004}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0005}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0006}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0007}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0008}, + cmdLutElement{0x03, 0x00, -1, 0x03, 0x001a, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0022, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0022, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0022, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0022, 0x0009}, + cmdLutElement{0x04, 0x00, -1, 0x00, 0x0032, 0x0002}, + cmdLutElement{0x04, 0x00, -1, 0x01, 0x0032, 0x0003}, + cmdLutElement{0x04, 0x00, -1, 0x02, 0x0032, 0x0004}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0005}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0006}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0007}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0008}, + cmdLutElement{0x04, 0x00, -1, 0x03, 0x0032, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0042, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0042, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0042, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0042, 0x0009}, + cmdLutElement{0x05, 0x00, -1, 0x00, 0x0062, 0x0002}, + cmdLutElement{0x05, 0x00, -1, 0x01, 0x0062, 0x0003}, + cmdLutElement{0x05, 0x00, -1, 0x02, 0x0062, 0x0004}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0005}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0006}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0007}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0008}, + cmdLutElement{0x05, 0x00, -1, 0x03, 0x0062, 0x0009}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000a, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000a, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000a, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000a, 0x0036}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000a}, + cmdLutElement{0x02, 0x01, -1, 0x03, 0x000e, 0x000c}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x000e}, + cmdLutElement{0x02, 0x02, -1, 0x03, 0x000e, 0x0012}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x0016}, + cmdLutElement{0x02, 0x03, -1, 0x03, 0x000e, 0x001e}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0026}, + cmdLutElement{0x02, 0x04, -1, 0x03, 0x000e, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x0012, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x0012, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x0012, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x0012, 0x0036}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000a}, + cmdLutElement{0x03, 0x01, -1, 0x03, 0x001a, 0x000c}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x000e}, + cmdLutElement{0x03, 0x02, -1, 0x03, 0x001a, 0x0012}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x0016}, + cmdLutElement{0x03, 0x03, -1, 0x03, 0x001a, 0x001e}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0026}, + cmdLutElement{0x03, 0x04, -1, 0x03, 0x001a, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0022, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0022, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0022, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0022, 0x0036}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000a}, + cmdLutElement{0x04, 0x01, -1, 0x03, 0x0032, 0x000c}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x000e}, + cmdLutElement{0x04, 0x02, -1, 0x03, 0x0032, 0x0012}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x0016}, + cmdLutElement{0x04, 0x03, -1, 0x03, 0x0032, 0x001e}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0026}, + cmdLutElement{0x04, 0x04, -1, 0x03, 0x0032, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0042, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0042, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0042, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0042, 0x0036}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000a}, + cmdLutElement{0x05, 0x01, -1, 0x03, 0x0062, 0x000c}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x000e}, + cmdLutElement{0x05, 0x02, -1, 0x03, 0x0062, 0x0012}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x0016}, + cmdLutElement{0x05, 0x03, -1, 0x03, 0x0062, 0x001e}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0026}, + cmdLutElement{0x05, 0x04, -1, 0x03, 0x0062, 0x0036}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0000, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0000, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0000, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0000, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0000, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0000, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0000, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0001, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0001, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0001, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0001, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0001, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0001, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0001, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0002, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0002, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0002, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0002, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0002, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0002, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0002, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0003, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0003, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0003, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0003, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0003, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0003, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0003, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0004, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0004, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0004, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0004, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0004, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0004, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0004, 0x0846}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0046}, + cmdLutElement{0x00, 0x05, -1, 0x03, 0x0005, 0x0066}, + cmdLutElement{0x00, 0x06, -1, 0x03, 0x0005, 0x0086}, + cmdLutElement{0x00, 0x07, -1, 0x03, 0x0005, 0x00c6}, + cmdLutElement{0x00, 0x08, -1, 0x03, 0x0005, 0x0146}, + cmdLutElement{0x00, 0x09, -1, 0x03, 0x0005, 0x0246}, + cmdLutElement{0x00, 0x0a, -1, 0x03, 0x0005, 0x0446}, + cmdLutElement{0x00, 0x18, -1, 0x03, 0x0005, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0006, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0006, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0006, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0006, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0006, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0006, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0006, 0x0846}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0046}, + cmdLutElement{0x01, 0x05, -1, 0x03, 0x0008, 0x0066}, + cmdLutElement{0x01, 0x06, -1, 0x03, 0x0008, 0x0086}, + cmdLutElement{0x01, 0x07, -1, 0x03, 0x0008, 0x00c6}, + cmdLutElement{0x01, 0x08, -1, 0x03, 0x0008, 0x0146}, + cmdLutElement{0x01, 0x09, -1, 0x03, 0x0008, 0x0246}, + cmdLutElement{0x01, 0x0a, -1, 0x03, 0x0008, 0x0446}, + cmdLutElement{0x01, 0x18, -1, 0x03, 0x0008, 0x0846}, + cmdLutElement{0x06, 0x00, -1, 0x00, 0x0082, 0x0002}, + cmdLutElement{0x06, 0x00, -1, 0x01, 0x0082, 0x0003}, + cmdLutElement{0x06, 0x00, -1, 0x02, 0x0082, 0x0004}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0005}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0006}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0007}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0008}, + cmdLutElement{0x06, 0x00, -1, 0x03, 0x0082, 0x0009}, + cmdLutElement{0x07, 0x00, -1, 0x00, 0x00c2, 0x0002}, + cmdLutElement{0x07, 0x00, -1, 0x01, 0x00c2, 0x0003}, + cmdLutElement{0x07, 0x00, -1, 0x02, 0x00c2, 0x0004}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0005}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0006}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0007}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0008}, + cmdLutElement{0x07, 0x00, -1, 0x03, 0x00c2, 0x0009}, + cmdLutElement{0x08, 0x00, -1, 0x00, 0x0142, 0x0002}, + cmdLutElement{0x08, 0x00, -1, 0x01, 0x0142, 0x0003}, + cmdLutElement{0x08, 0x00, -1, 0x02, 0x0142, 0x0004}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0005}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0006}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0007}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0008}, + cmdLutElement{0x08, 0x00, -1, 0x03, 0x0142, 0x0009}, + cmdLutElement{0x09, 0x00, -1, 0x00, 0x0242, 0x0002}, + cmdLutElement{0x09, 0x00, -1, 0x01, 0x0242, 0x0003}, + cmdLutElement{0x09, 0x00, -1, 0x02, 0x0242, 0x0004}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0005}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0006}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0007}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0008}, + cmdLutElement{0x09, 0x00, -1, 0x03, 0x0242, 0x0009}, + cmdLutElement{0x0a, 0x00, -1, 0x00, 0x0442, 0x0002}, + cmdLutElement{0x0a, 0x00, -1, 0x01, 0x0442, 0x0003}, + cmdLutElement{0x0a, 0x00, -1, 0x02, 0x0442, 0x0004}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0005}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0006}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0007}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0008}, + cmdLutElement{0x0a, 0x00, -1, 0x03, 0x0442, 0x0009}, + cmdLutElement{0x0c, 0x00, -1, 0x00, 0x0842, 0x0002}, + cmdLutElement{0x0c, 0x00, -1, 0x01, 0x0842, 0x0003}, + cmdLutElement{0x0c, 0x00, -1, 0x02, 0x0842, 0x0004}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0005}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0006}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0007}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0008}, + cmdLutElement{0x0c, 0x00, -1, 0x03, 0x0842, 0x0009}, + cmdLutElement{0x0e, 0x00, -1, 0x00, 0x1842, 0x0002}, + cmdLutElement{0x0e, 0x00, -1, 0x01, 0x1842, 0x0003}, + cmdLutElement{0x0e, 0x00, -1, 0x02, 0x1842, 0x0004}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0005}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0006}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0007}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0008}, + cmdLutElement{0x0e, 0x00, -1, 0x03, 0x1842, 0x0009}, + cmdLutElement{0x18, 0x00, -1, 0x00, 0x5842, 0x0002}, + cmdLutElement{0x18, 0x00, -1, 0x01, 0x5842, 0x0003}, + cmdLutElement{0x18, 0x00, -1, 0x02, 0x5842, 0x0004}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0005}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0006}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0007}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0008}, + cmdLutElement{0x18, 0x00, -1, 0x03, 0x5842, 0x0009}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000a, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000a, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000a, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000a, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000a, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000a, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000a, 0x0846}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0046}, + cmdLutElement{0x02, 0x05, -1, 0x03, 0x000e, 0x0066}, + cmdLutElement{0x02, 0x06, -1, 0x03, 0x000e, 0x0086}, + cmdLutElement{0x02, 0x07, -1, 0x03, 0x000e, 0x00c6}, + cmdLutElement{0x02, 0x08, -1, 0x03, 0x000e, 0x0146}, + cmdLutElement{0x02, 0x09, -1, 0x03, 0x000e, 0x0246}, + cmdLutElement{0x02, 0x0a, -1, 0x03, 0x000e, 0x0446}, + cmdLutElement{0x02, 0x18, -1, 0x03, 0x000e, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x0012, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x0012, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x0012, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x0012, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x0012, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x0012, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x0012, 0x0846}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0046}, + cmdLutElement{0x03, 0x05, -1, 0x03, 0x001a, 0x0066}, + cmdLutElement{0x03, 0x06, -1, 0x03, 0x001a, 0x0086}, + cmdLutElement{0x03, 0x07, -1, 0x03, 0x001a, 0x00c6}, + cmdLutElement{0x03, 0x08, -1, 0x03, 0x001a, 0x0146}, + cmdLutElement{0x03, 0x09, -1, 0x03, 0x001a, 0x0246}, + cmdLutElement{0x03, 0x0a, -1, 0x03, 0x001a, 0x0446}, + cmdLutElement{0x03, 0x18, -1, 0x03, 0x001a, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0022, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0022, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0022, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0022, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0022, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0022, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0022, 0x0846}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0046}, + cmdLutElement{0x04, 0x05, -1, 0x03, 0x0032, 0x0066}, + cmdLutElement{0x04, 0x06, -1, 0x03, 0x0032, 0x0086}, + cmdLutElement{0x04, 0x07, -1, 0x03, 0x0032, 0x00c6}, + cmdLutElement{0x04, 0x08, -1, 0x03, 0x0032, 0x0146}, + cmdLutElement{0x04, 0x09, -1, 0x03, 0x0032, 0x0246}, + cmdLutElement{0x04, 0x0a, -1, 0x03, 0x0032, 0x0446}, + cmdLutElement{0x04, 0x18, -1, 0x03, 0x0032, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0042, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0042, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0042, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0042, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0042, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0042, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0042, 0x0846}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0046}, + cmdLutElement{0x05, 0x05, -1, 0x03, 0x0062, 0x0066}, + cmdLutElement{0x05, 0x06, -1, 0x03, 0x0062, 0x0086}, + cmdLutElement{0x05, 0x07, -1, 0x03, 0x0062, 0x00c6}, + cmdLutElement{0x05, 0x08, -1, 0x03, 0x0062, 0x0146}, + cmdLutElement{0x05, 0x09, -1, 0x03, 0x0062, 0x0246}, + cmdLutElement{0x05, 0x0a, -1, 0x03, 0x0062, 0x0446}, + cmdLutElement{0x05, 0x18, -1, 0x03, 0x0062, 0x0846}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000a}, + cmdLutElement{0x06, 0x01, -1, 0x03, 0x0082, 0x000c}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x000e}, + cmdLutElement{0x06, 0x02, -1, 0x03, 0x0082, 0x0012}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x0016}, + cmdLutElement{0x06, 0x03, -1, 0x03, 0x0082, 0x001e}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0026}, + cmdLutElement{0x06, 0x04, -1, 0x03, 0x0082, 0x0036}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000a}, + cmdLutElement{0x07, 0x01, -1, 0x03, 0x00c2, 0x000c}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x000e}, + cmdLutElement{0x07, 0x02, -1, 0x03, 0x00c2, 0x0012}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x0016}, + cmdLutElement{0x07, 0x03, -1, 0x03, 0x00c2, 0x001e}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0026}, + cmdLutElement{0x07, 0x04, -1, 0x03, 0x00c2, 0x0036}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000a}, + cmdLutElement{0x08, 0x01, -1, 0x03, 0x0142, 0x000c}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x000e}, + cmdLutElement{0x08, 0x02, -1, 0x03, 0x0142, 0x0012}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x0016}, + cmdLutElement{0x08, 0x03, -1, 0x03, 0x0142, 0x001e}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0026}, + cmdLutElement{0x08, 0x04, -1, 0x03, 0x0142, 0x0036}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000a}, + cmdLutElement{0x09, 0x01, -1, 0x03, 0x0242, 0x000c}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x000e}, + cmdLutElement{0x09, 0x02, -1, 0x03, 0x0242, 0x0012}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x0016}, + cmdLutElement{0x09, 0x03, -1, 0x03, 0x0242, 0x001e}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0026}, + cmdLutElement{0x09, 0x04, -1, 0x03, 0x0242, 0x0036}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000a}, + cmdLutElement{0x0a, 0x01, -1, 0x03, 0x0442, 0x000c}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x000e}, + cmdLutElement{0x0a, 0x02, -1, 0x03, 0x0442, 0x0012}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x0016}, + cmdLutElement{0x0a, 0x03, -1, 0x03, 0x0442, 0x001e}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0026}, + cmdLutElement{0x0a, 0x04, -1, 0x03, 0x0442, 0x0036}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000a}, + cmdLutElement{0x0c, 0x01, -1, 0x03, 0x0842, 0x000c}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x000e}, + cmdLutElement{0x0c, 0x02, -1, 0x03, 0x0842, 0x0012}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x0016}, + cmdLutElement{0x0c, 0x03, -1, 0x03, 0x0842, 0x001e}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0026}, + cmdLutElement{0x0c, 0x04, -1, 0x03, 0x0842, 0x0036}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000a}, + cmdLutElement{0x0e, 0x01, -1, 0x03, 0x1842, 0x000c}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x000e}, + cmdLutElement{0x0e, 0x02, -1, 0x03, 0x1842, 0x0012}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x0016}, + cmdLutElement{0x0e, 0x03, -1, 0x03, 0x1842, 0x001e}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0026}, + cmdLutElement{0x0e, 0x04, -1, 0x03, 0x1842, 0x0036}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000a}, + cmdLutElement{0x18, 0x01, -1, 0x03, 0x5842, 0x000c}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x000e}, + cmdLutElement{0x18, 0x02, -1, 0x03, 0x5842, 0x0012}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x0016}, + cmdLutElement{0x18, 0x03, -1, 0x03, 0x5842, 0x001e}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0026}, + cmdLutElement{0x18, 0x04, -1, 0x03, 0x5842, 0x0036}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0046}, + cmdLutElement{0x06, 0x05, -1, 0x03, 0x0082, 0x0066}, + cmdLutElement{0x06, 0x06, -1, 0x03, 0x0082, 0x0086}, + cmdLutElement{0x06, 0x07, -1, 0x03, 0x0082, 0x00c6}, + cmdLutElement{0x06, 0x08, -1, 0x03, 0x0082, 0x0146}, + cmdLutElement{0x06, 0x09, -1, 0x03, 0x0082, 0x0246}, + cmdLutElement{0x06, 0x0a, -1, 0x03, 0x0082, 0x0446}, + cmdLutElement{0x06, 0x18, -1, 0x03, 0x0082, 0x0846}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0046}, + cmdLutElement{0x07, 0x05, -1, 0x03, 0x00c2, 0x0066}, + cmdLutElement{0x07, 0x06, -1, 0x03, 0x00c2, 0x0086}, + cmdLutElement{0x07, 0x07, -1, 0x03, 0x00c2, 0x00c6}, + cmdLutElement{0x07, 0x08, -1, 0x03, 0x00c2, 0x0146}, + cmdLutElement{0x07, 0x09, -1, 0x03, 0x00c2, 0x0246}, + cmdLutElement{0x07, 0x0a, -1, 0x03, 0x00c2, 0x0446}, + cmdLutElement{0x07, 0x18, -1, 0x03, 0x00c2, 0x0846}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0046}, + cmdLutElement{0x08, 0x05, -1, 0x03, 0x0142, 0x0066}, + cmdLutElement{0x08, 0x06, -1, 0x03, 0x0142, 0x0086}, + cmdLutElement{0x08, 0x07, -1, 0x03, 0x0142, 0x00c6}, + cmdLutElement{0x08, 0x08, -1, 0x03, 0x0142, 0x0146}, + cmdLutElement{0x08, 0x09, -1, 0x03, 0x0142, 0x0246}, + cmdLutElement{0x08, 0x0a, -1, 0x03, 0x0142, 0x0446}, + cmdLutElement{0x08, 0x18, -1, 0x03, 0x0142, 0x0846}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0046}, + cmdLutElement{0x09, 0x05, -1, 0x03, 0x0242, 0x0066}, + cmdLutElement{0x09, 0x06, -1, 0x03, 0x0242, 0x0086}, + cmdLutElement{0x09, 0x07, -1, 0x03, 0x0242, 0x00c6}, + cmdLutElement{0x09, 0x08, -1, 0x03, 0x0242, 0x0146}, + cmdLutElement{0x09, 0x09, -1, 0x03, 0x0242, 0x0246}, + cmdLutElement{0x09, 0x0a, -1, 0x03, 0x0242, 0x0446}, + cmdLutElement{0x09, 0x18, -1, 0x03, 0x0242, 0x0846}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0046}, + cmdLutElement{0x0a, 0x05, -1, 0x03, 0x0442, 0x0066}, + cmdLutElement{0x0a, 0x06, -1, 0x03, 0x0442, 0x0086}, + cmdLutElement{0x0a, 0x07, -1, 0x03, 0x0442, 0x00c6}, + cmdLutElement{0x0a, 0x08, -1, 0x03, 0x0442, 0x0146}, + cmdLutElement{0x0a, 0x09, -1, 0x03, 0x0442, 0x0246}, + cmdLutElement{0x0a, 0x0a, -1, 0x03, 0x0442, 0x0446}, + cmdLutElement{0x0a, 0x18, -1, 0x03, 0x0442, 0x0846}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0046}, + cmdLutElement{0x0c, 0x05, -1, 0x03, 0x0842, 0x0066}, + cmdLutElement{0x0c, 0x06, -1, 0x03, 0x0842, 0x0086}, + cmdLutElement{0x0c, 0x07, -1, 0x03, 0x0842, 0x00c6}, + cmdLutElement{0x0c, 0x08, -1, 0x03, 0x0842, 0x0146}, + cmdLutElement{0x0c, 0x09, -1, 0x03, 0x0842, 0x0246}, + cmdLutElement{0x0c, 0x0a, -1, 0x03, 0x0842, 0x0446}, + cmdLutElement{0x0c, 0x18, -1, 0x03, 0x0842, 0x0846}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0046}, + cmdLutElement{0x0e, 0x05, -1, 0x03, 0x1842, 0x0066}, + cmdLutElement{0x0e, 0x06, -1, 0x03, 0x1842, 0x0086}, + cmdLutElement{0x0e, 0x07, -1, 0x03, 0x1842, 0x00c6}, + cmdLutElement{0x0e, 0x08, -1, 0x03, 0x1842, 0x0146}, + cmdLutElement{0x0e, 0x09, -1, 0x03, 0x1842, 0x0246}, + cmdLutElement{0x0e, 0x0a, -1, 0x03, 0x1842, 0x0446}, + cmdLutElement{0x0e, 0x18, -1, 0x03, 0x1842, 0x0846}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0046}, + cmdLutElement{0x18, 0x05, -1, 0x03, 0x5842, 0x0066}, + cmdLutElement{0x18, 0x06, -1, 0x03, 0x5842, 0x0086}, + cmdLutElement{0x18, 0x07, -1, 0x03, 0x5842, 0x00c6}, + cmdLutElement{0x18, 0x08, -1, 0x03, 0x5842, 0x0146}, + cmdLutElement{0x18, 0x09, -1, 0x03, 0x5842, 0x0246}, + cmdLutElement{0x18, 0x0a, -1, 0x03, 0x5842, 0x0446}, + cmdLutElement{0x18, 0x18, -1, 0x03, 0x5842, 0x0846}, +} diff --git a/vendor/github.com/andybalholm/brotli/quality.go b/vendor/github.com/andybalholm/brotli/quality.go new file mode 100644 index 00000000000..49709a38239 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/quality.go @@ -0,0 +1,196 @@ +package brotli + +const fastOnePassCompressionQuality = 0 + +const fastTwoPassCompressionQuality = 1 + +const zopflificationQuality = 10 + +const hqZopflificationQuality = 11 + +const maxQualityForStaticEntropyCodes = 2 + +const minQualityForBlockSplit = 4 + +const minQualityForNonzeroDistanceParams = 4 + +const minQualityForOptimizeHistograms = 4 + +const minQualityForExtensiveReferenceSearch = 5 + +const minQualityForContextModeling = 5 + +const minQualityForHqContextModeling = 7 + +const minQualityForHqBlockSplitting = 10 + +/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting, + so we buffer at most this much literals and commands. */ +const maxNumDelayedSymbols = 0x2FFF + +/* Returns hash-table size for quality levels 0 and 1. */ +func maxHashTableSize(quality int) uint { + if quality == fastOnePassCompressionQuality { + return 1 << 15 + } else { + return 1 << 17 + } +} + +/* The maximum length for which the zopflification uses distinct distances. */ +const maxZopfliLenQuality10 = 150 + +const maxZopfliLenQuality11 = 325 + +/* Do not thoroughly search when a long copy is found. */ +const longCopyQuickStep = 16384 + +func maxZopfliLen(params *encoderParams) uint { + if params.quality <= 10 { + return maxZopfliLenQuality10 + } else { + return maxZopfliLenQuality11 + } +} + +/* Number of best candidates to evaluate to expand Zopfli chain. */ +func maxZopfliCandidates(params *encoderParams) uint { + if params.quality <= 10 { + return 1 + } else { + return 5 + } +} + +func sanitizeParams(params *encoderParams) { + params.quality = brotli_min_int(maxQuality, brotli_max_int(minQuality, params.quality)) + if params.quality <= maxQualityForStaticEntropyCodes { + params.large_window = false + } + + if params.lgwin < minWindowBits { + params.lgwin = minWindowBits + } else { + var max_lgwin int + if params.large_window { + max_lgwin = largeMaxWindowBits + } else { + max_lgwin = maxWindowBits + } + if params.lgwin > uint(max_lgwin) { + params.lgwin = uint(max_lgwin) + } + } +} + +/* Returns optimized lg_block value. */ +func computeLgBlock(params *encoderParams) int { + var lgblock int = params.lgblock + if params.quality == fastOnePassCompressionQuality || params.quality == fastTwoPassCompressionQuality { + lgblock = int(params.lgwin) + } else if params.quality < minQualityForBlockSplit { + lgblock = 14 + } else if lgblock == 0 { + lgblock = 16 + if params.quality >= 9 && params.lgwin > uint(lgblock) { + lgblock = brotli_min_int(18, int(params.lgwin)) + } + } else { + lgblock = brotli_min_int(maxInputBlockBits, brotli_max_int(minInputBlockBits, lgblock)) + } + + return lgblock +} + +/* Returns log2 of the size of main ring buffer area. + Allocate at least lgwin + 1 bits for the ring buffer so that the newly + added block fits there completely and we still get lgwin bits and at least + read_block_size_bits + 1 bits because the copy tail length needs to be + smaller than ring-buffer size. */ +func computeRbBits(params *encoderParams) int { + return 1 + brotli_max_int(int(params.lgwin), params.lgblock) +} + +func maxMetablockSize(params *encoderParams) uint { + var bits int = brotli_min_int(computeRbBits(params), maxInputBlockBits) + return uint(1) << uint(bits) +} + +/* When searching for backward references and have not seen matches for a long + time, we can skip some match lookups. Unsuccessful match lookups are very + expensive and this kind of a heuristic speeds up compression quite a lot. + At first 8 byte strides are taken and every second byte is put to hasher. + After 4x more literals stride by 16 bytes, every put 4-th byte to hasher. + Applied only to qualities 2 to 9. */ +func literalSpreeLengthForSparseSearch(params *encoderParams) uint { + if params.quality < 9 { + return 64 + } else { + return 512 + } +} + +func chooseHasher(params *encoderParams, hparams *hasherParams) { + if params.quality > 9 { + hparams.type_ = 10 + } else if params.quality == 4 && params.size_hint >= 1<<20 { + hparams.type_ = 54 + } else if params.quality < 5 { + hparams.type_ = params.quality + } else if params.lgwin <= 16 { + if params.quality < 7 { + hparams.type_ = 40 + } else if params.quality < 9 { + hparams.type_ = 41 + } else { + hparams.type_ = 42 + } + } else if params.size_hint >= 1<<20 && params.lgwin >= 19 { + hparams.type_ = 6 + hparams.block_bits = params.quality - 1 + hparams.bucket_bits = 15 + hparams.hash_len = 5 + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } else { + hparams.type_ = 5 + hparams.block_bits = params.quality - 1 + if params.quality < 7 { + hparams.bucket_bits = 14 + } else { + hparams.bucket_bits = 15 + } + if params.quality < 7 { + hparams.num_last_distances_to_check = 4 + } else if params.quality < 9 { + hparams.num_last_distances_to_check = 10 + } else { + hparams.num_last_distances_to_check = 16 + } + } + + if params.lgwin > 24 { + /* Different hashers for large window brotli: not for qualities <= 2, + these are too fast for large window. Not for qualities >= 10: their + hasher already works well with large window. So the changes are: + H3 --> H35: for quality 3. + H54 --> H55: for quality 4 with size hint > 1MB + H6 --> H65: for qualities 5, 6, 7, 8, 9. */ + if hparams.type_ == 3 { + hparams.type_ = 35 + } + + if hparams.type_ == 54 { + hparams.type_ = 55 + } + + if hparams.type_ == 6 { + hparams.type_ = 65 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/reader.go b/vendor/github.com/andybalholm/brotli/reader.go new file mode 100644 index 00000000000..9419c79c17a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/reader.go @@ -0,0 +1,108 @@ +package brotli + +import ( + "errors" + "io" +) + +type decodeError int + +func (err decodeError) Error() string { + return "brotli: " + string(decoderErrorString(int(err))) +} + +var errExcessiveInput = errors.New("brotli: excessive input") +var errInvalidState = errors.New("brotli: invalid state") + +// readBufSize is a "good" buffer size that avoids excessive round-trips +// between C and Go but doesn't waste too much memory on buffering. +// It is arbitrarily chosen to be equal to the constant used in io.Copy. +const readBufSize = 32 * 1024 + +// NewReader creates a new Reader reading the given reader. +func NewReader(src io.Reader) *Reader { + r := new(Reader) + r.Reset(src) + return r +} + +// Reset discards the Reader's state and makes it equivalent to the result of +// its original state from NewReader, but reading from src instead. +// This permits reusing a Reader rather than allocating a new one. +// Error is always nil +func (r *Reader) Reset(src io.Reader) error { + if r.error_code < 0 { + // There was an unrecoverable error, leaving the Reader's state + // undefined. Clear out everything but the buffer. + *r = Reader{buf: r.buf} + } + + decoderStateInit(r) + r.src = src + if r.buf == nil { + r.buf = make([]byte, readBufSize) + } + return nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + if !decoderHasMoreOutput(r) && len(r.in) == 0 { + m, readErr := r.src.Read(r.buf) + if m == 0 { + // If readErr is `nil`, we just proxy underlying stream behavior. + return 0, readErr + } + r.in = r.buf[:m] + } + + if len(p) == 0 { + return 0, nil + } + + for { + var written uint + in_len := uint(len(r.in)) + out_len := uint(len(p)) + in_remaining := in_len + out_remaining := out_len + result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p) + written = out_len - out_remaining + n = int(written) + + switch result { + case decoderResultSuccess: + if len(r.in) > 0 { + return n, errExcessiveInput + } + return n, nil + case decoderResultError: + return n, decodeError(decoderGetErrorCode(r)) + case decoderResultNeedsMoreOutput: + if n == 0 { + return 0, io.ErrShortBuffer + } + return n, nil + case decoderNeedsMoreInput: + } + + if len(r.in) != 0 { + return 0, errInvalidState + } + + // Calling r.src.Read may block. Don't block if we have data to return. + if n > 0 { + return n, nil + } + + // Top off the buffer. + encN, err := r.src.Read(r.buf) + if encN == 0 { + // Not enough data to complete decoding. + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } + return 0, err + } + r.in = r.buf[:encN] + } +} diff --git a/vendor/github.com/andybalholm/brotli/ringbuffer.go b/vendor/github.com/andybalholm/brotli/ringbuffer.go new file mode 100644 index 00000000000..1c8f86feece --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/ringbuffer.go @@ -0,0 +1,134 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of + data in a circular manner: writing a byte writes it to: + `position() % (1 << window_bits)'. + For convenience, the ringBuffer array contains another copy of the + first `1 << tail_bits' bytes: + buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits), + and another copy of the last two bytes: + buffer_[-1] == buffer_[(1 << window_bits) - 1] and + buffer_[-2] == buffer_[(1 << window_bits) - 2]. */ +type ringBuffer struct { + size_ uint32 + mask_ uint32 + tail_size_ uint32 + total_size_ uint32 + cur_size_ uint32 + pos_ uint32 + data_ []byte + buffer_ []byte +} + +func ringBufferInit(rb *ringBuffer) { + rb.pos_ = 0 +} + +func ringBufferSetup(params *encoderParams, rb *ringBuffer) { + var window_bits int = computeRbBits(params) + var tail_bits int = params.lgblock + *(*uint32)(&rb.size_) = 1 << uint(window_bits) + *(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1 + *(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits) + *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_ +} + +const kSlackForEightByteHashingEverywhere uint = 7 + +/* Allocates or re-allocates data_ to the given length + plus some slack + region before and after. Fills the slack regions with zeros. */ +func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) { + var new_data []byte + var i uint + size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere) + if cap(rb.data_) < size { + new_data = make([]byte, size) + } else { + new_data = rb.data_[:size] + } + if rb.data_ != nil { + copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)]) + } + + rb.data_ = new_data + rb.cur_size_ = buflen + rb.buffer_ = rb.data_[2:] + rb.data_[1] = 0 + rb.data_[0] = rb.data_[1] + for i = 0; i < kSlackForEightByteHashingEverywhere; i++ { + rb.buffer_[rb.cur_size_+uint32(i)] = 0 + } +} + +func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + if uint32(masked_pos) < rb.tail_size_ { + /* Just fill the tail buffer with the beginning data. */ + var p uint = uint(rb.size_ + uint32(masked_pos)) + copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))]) + } +} + +/* Push bytes into the ring buffer. */ +func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) { + if rb.pos_ == 0 && uint32(n) < rb.tail_size_ { + /* Special case for the first write: to process the first block, we don't + need to allocate the whole ring-buffer and we don't need the tail + either. However, we do this memory usage optimization only if the + first write is less than the tail size, which is also the input block + size, otherwise it is likely that other blocks will follow and we + will need to reallocate to the full size anyway. */ + rb.pos_ = uint32(n) + + ringBufferInitBuffer(rb.pos_, rb) + copy(rb.buffer_, bytes[:n]) + return + } + + if rb.cur_size_ < rb.total_size_ { + /* Lazily allocate the full buffer. */ + ringBufferInitBuffer(rb.total_size_, rb) + + /* Initialize the last two bytes to zero, so that we don't have to worry + later when we copy the last two bytes to the first two positions. */ + rb.buffer_[rb.size_-2] = 0 + + rb.buffer_[rb.size_-1] = 0 + } + { + var masked_pos uint = uint(rb.pos_ & rb.mask_) + + /* The length of the writes is limited so that we do not need to worry + about a write */ + ringBufferWriteTail(bytes, n, rb) + + if uint32(masked_pos+n) <= rb.size_ { + /* A single write fits. */ + copy(rb.buffer_[masked_pos:], bytes[:n]) + } else { + /* Split into two writes. + Copy into the end of the buffer, including the tail buffer. */ + copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))]) + + /* Copy into the beginning of the buffer */ + copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))]) + } + } + { + var not_first_lap bool = rb.pos_&(1<<31) != 0 + var rb_pos_mask uint32 = (1 << 31) - 1 + rb.data_[0] = rb.buffer_[rb.size_-2] + rb.data_[1] = rb.buffer_[rb.size_-1] + rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask) + if not_first_lap { + /* Wrap, but preserve not-a-first-lap feature. */ + rb.pos_ |= 1 << 31 + } + } +} diff --git a/vendor/github.com/andybalholm/brotli/state.go b/vendor/github.com/andybalholm/brotli/state.go new file mode 100644 index 00000000000..38d753ebe4d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/state.go @@ -0,0 +1,294 @@ +package brotli + +import "io" + +/* Copyright 2015 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Brotli state for partial streaming decoding. */ +const ( + stateUninited = iota + stateLargeWindowBits + stateInitialize + stateMetablockBegin + stateMetablockHeader + stateMetablockHeader2 + stateContextModes + stateCommandBegin + stateCommandInner + stateCommandPostDecodeLiterals + stateCommandPostWrapCopy + stateUncompressed + stateMetadata + stateCommandInnerWrite + stateMetablockDone + stateCommandPostWrite1 + stateCommandPostWrite2 + stateHuffmanCode0 + stateHuffmanCode1 + stateHuffmanCode2 + stateHuffmanCode3 + stateContextMap1 + stateContextMap2 + stateTreeGroup + stateDone +) + +const ( + stateMetablockHeaderNone = iota + stateMetablockHeaderEmpty + stateMetablockHeaderNibbles + stateMetablockHeaderSize + stateMetablockHeaderUncompressed + stateMetablockHeaderReserved + stateMetablockHeaderBytes + stateMetablockHeaderMetadata +) + +const ( + stateUncompressedNone = iota + stateUncompressedWrite +) + +const ( + stateTreeGroupNone = iota + stateTreeGroupLoop +) + +const ( + stateContextMapNone = iota + stateContextMapReadPrefix + stateContextMapHuffman + stateContextMapDecode + stateContextMapTransform +) + +const ( + stateHuffmanNone = iota + stateHuffmanSimpleSize + stateHuffmanSimpleRead + stateHuffmanSimpleBuild + stateHuffmanComplex + stateHuffmanLengthSymbols +) + +const ( + stateDecodeUint8None = iota + stateDecodeUint8Short + stateDecodeUint8Long +) + +const ( + stateReadBlockLengthNone = iota + stateReadBlockLengthSuffix +) + +type Reader struct { + src io.Reader + buf []byte // scratch space for reading from src + in []byte // current chunk to decode; usually aliases buf + + state int + loop_counter int + br bitReader + buffer struct { + u64 uint64 + u8 [8]byte + } + buffer_length uint32 + pos int + max_backward_distance int + max_distance int + ringbuffer_size int + ringbuffer_mask int + dist_rb_idx int + dist_rb [4]int + error_code int + sub_loop_counter uint32 + ringbuffer []byte + ringbuffer_end []byte + htree_command []huffmanCode + context_lookup []byte + context_map_slice []byte + dist_context_map_slice []byte + literal_hgroup huffmanTreeGroup + insert_copy_hgroup huffmanTreeGroup + distance_hgroup huffmanTreeGroup + block_type_trees []huffmanCode + block_len_trees []huffmanCode + trivial_literal_context int + distance_context int + meta_block_remaining_len int + block_length_index uint32 + block_length [3]uint32 + num_block_types [3]uint32 + block_type_rb [6]uint32 + distance_postfix_bits uint32 + num_direct_distance_codes uint32 + distance_postfix_mask int + num_dist_htrees uint32 + dist_context_map []byte + literal_htree []huffmanCode + dist_htree_index byte + repeat_code_len uint32 + prev_code_len uint32 + copy_length int + distance_code int + rb_roundtrips uint + partial_pos_out uint + symbol uint32 + repeat uint32 + space uint32 + table [32]huffmanCode + symbol_lists symbolList + symbols_lists_array [huffmanMaxCodeLength + 1 + numCommandSymbols]uint16 + next_symbol [32]int + code_length_code_lengths [codeLengthCodes]byte + code_length_histo [16]uint16 + htree_index int + next []huffmanCode + context_index uint32 + max_run_length_prefix uint32 + code uint32 + context_map_table [huffmanMaxSize272]huffmanCode + substate_metablock_header int + substate_tree_group int + substate_context_map int + substate_uncompressed int + substate_huffman int + substate_decode_uint8 int + substate_read_block_length int + is_last_metablock uint + is_uncompressed uint + is_metadata uint + should_wrap_ringbuffer uint + canny_ringbuffer_allocation uint + large_window bool + size_nibbles uint + window_bits uint32 + new_ringbuffer_size int + num_literal_htrees uint32 + context_map []byte + context_modes []byte + dictionary *dictionary + transforms *transforms + trivial_literal_contexts [8]uint32 +} + +func decoderStateInit(s *Reader) bool { + s.error_code = 0 /* BROTLI_DECODER_NO_ERROR */ + + initBitReader(&s.br) + s.state = stateUninited + s.large_window = false + s.substate_metablock_header = stateMetablockHeaderNone + s.substate_tree_group = stateTreeGroupNone + s.substate_context_map = stateContextMapNone + s.substate_uncompressed = stateUncompressedNone + s.substate_huffman = stateHuffmanNone + s.substate_decode_uint8 = stateDecodeUint8None + s.substate_read_block_length = stateReadBlockLengthNone + + s.buffer_length = 0 + s.loop_counter = 0 + s.pos = 0 + s.rb_roundtrips = 0 + s.partial_pos_out = 0 + + s.block_type_trees = nil + s.block_len_trees = nil + s.ringbuffer_size = 0 + s.new_ringbuffer_size = 0 + s.ringbuffer_mask = 0 + + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.dist_context_map_slice = nil + + s.sub_loop_counter = 0 + + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil + + s.is_last_metablock = 0 + s.is_uncompressed = 0 + s.is_metadata = 0 + s.should_wrap_ringbuffer = 0 + s.canny_ringbuffer_allocation = 1 + + s.window_bits = 0 + s.max_distance = 0 + s.dist_rb[0] = 16 + s.dist_rb[1] = 15 + s.dist_rb[2] = 11 + s.dist_rb[3] = 4 + s.dist_rb_idx = 0 + s.block_type_trees = nil + s.block_len_trees = nil + + s.symbol_lists.storage = s.symbols_lists_array[:] + s.symbol_lists.offset = huffmanMaxCodeLength + 1 + + s.dictionary = getDictionary() + s.transforms = getTransforms() + + return true +} + +func decoderStateMetablockBegin(s *Reader) { + s.meta_block_remaining_len = 0 + s.block_length[0] = 1 << 24 + s.block_length[1] = 1 << 24 + s.block_length[2] = 1 << 24 + s.num_block_types[0] = 1 + s.num_block_types[1] = 1 + s.num_block_types[2] = 1 + s.block_type_rb[0] = 1 + s.block_type_rb[1] = 0 + s.block_type_rb[2] = 1 + s.block_type_rb[3] = 0 + s.block_type_rb[4] = 1 + s.block_type_rb[5] = 0 + s.context_map = nil + s.context_modes = nil + s.dist_context_map = nil + s.context_map_slice = nil + s.literal_htree = nil + s.dist_context_map_slice = nil + s.dist_htree_index = 0 + s.context_lookup = nil + s.literal_hgroup.codes = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.codes = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.codes = nil + s.distance_hgroup.htrees = nil +} + +func decoderStateCleanupAfterMetablock(s *Reader) { + s.context_modes = nil + s.context_map = nil + s.dist_context_map = nil + s.literal_hgroup.htrees = nil + s.insert_copy_hgroup.htrees = nil + s.distance_hgroup.htrees = nil +} + +func decoderHuffmanTreeGroupInit(s *Reader, group *huffmanTreeGroup, alphabet_size uint32, max_symbol uint32, ntrees uint32) bool { + var max_table_size uint = uint(kMaxHuffmanTableSize[(alphabet_size+31)>>5]) + group.alphabet_size = uint16(alphabet_size) + group.max_symbol = uint16(max_symbol) + group.num_htrees = uint16(ntrees) + group.htrees = make([][]huffmanCode, ntrees) + group.codes = make([]huffmanCode, (uint(ntrees) * max_table_size)) + return !(group.codes == nil) +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict.go b/vendor/github.com/andybalholm/brotli/static_dict.go new file mode 100644 index 00000000000..bc05566d6f8 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict.go @@ -0,0 +1,662 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Class to model the static dictionary. */ + +const maxStaticDictionaryMatchLen = 37 + +const kInvalidMatch uint32 = 0xFFFFFFF + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ +func hash(data []byte) uint32 { + var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32 + + /* The higher bits contain more mixture from the multiplication, + so we take our results from there. */ + return h >> uint(32-kDictNumBits) +} + +func addMatch(distance uint, len uint, len_code uint, matches []uint32) { + var match uint32 = uint32((distance << 5) + len_code) + matches[len] = brotli_min_uint32_t(matches[len], match) +} + +func dictMatchLength(dict *dictionary, data []byte, id uint, len uint, maxlen uint) uint { + var offset uint = uint(dict.offsets_by_length[len]) + len*id + return findMatchLengthWithLimit(dict.data[offset:], data, brotli_min_size_t(uint(len), maxlen)) +} + +func isMatch(d *dictionary, w dictWord, data []byte, max_length uint) bool { + if uint(w.len) > max_length { + return false + } else { + var offset uint = uint(d.offsets_by_length[w.len]) + uint(w.len)*uint(w.idx) + var dict []byte = d.data[offset:] + if w.transform == 0 { + /* Match against base dictionary word. */ + return findMatchLengthWithLimit(dict, data, uint(w.len)) == uint(w.len) + } else if w.transform == 10 { + /* Match against uppercase first transform. + Note that there are only ASCII uppercase words in the lookup table. */ + return dict[0] >= 'a' && dict[0] <= 'z' && (dict[0]^32) == data[0] && findMatchLengthWithLimit(dict[1:], data[1:], uint(w.len)-1) == uint(w.len-1) + } else { + /* Match against uppercase all transform. + Note that there are only ASCII uppercase words in the lookup table. */ + var i uint + for i = 0; i < uint(w.len); i++ { + if dict[i] >= 'a' && dict[i] <= 'z' { + if (dict[i] ^ 32) != data[i] { + return false + } + } else { + if dict[i] != data[i] { + return false + } + } + } + + return true + } + } +} + +func findAllStaticDictionaryMatches(dict *encoderDictionary, data []byte, min_length uint, max_length uint, matches []uint32) bool { + var has_found_match bool = false + { + var offset uint = uint(dict.buckets[hash(data)]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var matchlen uint = dictMatchLength(dict.words, data, id, l, max_length) + var s []byte + var minlen uint + var maxlen uint + var len uint + + /* Transform "" + BROTLI_TRANSFORM_IDENTITY + "" */ + if matchlen == l { + addMatch(id, l, l, matches) + has_found_match = true + } + + /* Transforms "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "" and + "" + BROTLI_TRANSFORM_OMIT_LAST_1 + "ing " */ + if matchlen >= l-1 { + addMatch(id+12*n, l-1, l, matches) + if l+2 < max_length && data[l-1] == 'i' && data[l] == 'n' && data[l+1] == 'g' && data[l+2] == ' ' { + addMatch(id+49*n, l+3, l, matches) + } + + has_found_match = true + } + + /* Transform "" + BROTLI_TRANSFORM_OMIT_LAST_# + "" (# = 2 .. 9) */ + minlen = min_length + + if l > 9 { + minlen = brotli_max_size_t(minlen, l-9) + } + maxlen = brotli_min_size_t(matchlen, l-2) + for len = minlen; len <= maxlen; len++ { + var cut uint = l - len + var transform_id uint = (cut << 2) + uint((dict.cutoffTransforms>>(cut*6))&0x3F) + addMatch(id+transform_id*n, uint(len), l, matches) + has_found_match = true + } + + if matchlen < l || l+6 >= max_length { + continue + } + + s = data[l:] + + /* Transforms "" + BROTLI_TRANSFORM_IDENTITY + */ + if s[0] == ' ' { + addMatch(id+n, l+1, l, matches) + if s[1] == 'a' { + if s[2] == ' ' { + addMatch(id+28*n, l+3, l, matches) + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+46*n, l+4, l, matches) + } + } else if s[2] == 't' { + if s[3] == ' ' { + addMatch(id+60*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == 'd' && s[4] == ' ' { + addMatch(id+10*n, l+5, l, matches) + } + } + } else if s[1] == 'b' { + if s[2] == 'y' && s[3] == ' ' { + addMatch(id+38*n, l+4, l, matches) + } + } else if s[1] == 'i' { + if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+16*n, l+4, l, matches) + } + } else if s[2] == 's' { + if s[3] == ' ' { + addMatch(id+47*n, l+4, l, matches) + } + } + } else if s[1] == 'f' { + if s[2] == 'o' { + if s[3] == 'r' && s[4] == ' ' { + addMatch(id+25*n, l+5, l, matches) + } + } else if s[2] == 'r' { + if s[3] == 'o' && s[4] == 'm' && s[5] == ' ' { + addMatch(id+37*n, l+6, l, matches) + } + } + } else if s[1] == 'o' { + if s[2] == 'f' { + if s[3] == ' ' { + addMatch(id+8*n, l+4, l, matches) + } + } else if s[2] == 'n' { + if s[3] == ' ' { + addMatch(id+45*n, l+4, l, matches) + } + } + } else if s[1] == 'n' { + if s[2] == 'o' && s[3] == 't' && s[4] == ' ' { + addMatch(id+80*n, l+5, l, matches) + } + } else if s[1] == 't' { + if s[2] == 'h' { + if s[3] == 'e' { + if s[4] == ' ' { + addMatch(id+5*n, l+5, l, matches) + } + } else if s[3] == 'a' { + if s[4] == 't' && s[5] == ' ' { + addMatch(id+29*n, l+6, l, matches) + } + } + } else if s[2] == 'o' { + if s[3] == ' ' { + addMatch(id+17*n, l+4, l, matches) + } + } + } else if s[1] == 'w' { + if s[2] == 'i' && s[3] == 't' && s[4] == 'h' && s[5] == ' ' { + addMatch(id+35*n, l+6, l, matches) + } + } + } else if s[0] == '"' { + addMatch(id+19*n, l+1, l, matches) + if s[1] == '>' { + addMatch(id+21*n, l+2, l, matches) + } + } else if s[0] == '.' { + addMatch(id+20*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+31*n, l+2, l, matches) + if s[2] == 'T' && s[3] == 'h' { + if s[4] == 'e' { + if s[5] == ' ' { + addMatch(id+43*n, l+6, l, matches) + } + } else if s[4] == 'i' { + if s[5] == 's' && s[6] == ' ' { + addMatch(id+75*n, l+7, l, matches) + } + } + } + } + } else if s[0] == ',' { + addMatch(id+76*n, l+1, l, matches) + if s[1] == ' ' { + addMatch(id+14*n, l+2, l, matches) + } + } else if s[0] == '\n' { + addMatch(id+22*n, l+1, l, matches) + if s[1] == '\t' { + addMatch(id+50*n, l+2, l, matches) + } + } else if s[0] == ']' { + addMatch(id+24*n, l+1, l, matches) + } else if s[0] == '\'' { + addMatch(id+36*n, l+1, l, matches) + } else if s[0] == ':' { + addMatch(id+51*n, l+1, l, matches) + } else if s[0] == '(' { + addMatch(id+57*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+70*n, l+2, l, matches) + } else if s[1] == '\'' { + addMatch(id+86*n, l+2, l, matches) + } + } else if s[0] == 'a' { + if s[1] == 'l' && s[2] == ' ' { + addMatch(id+84*n, l+3, l, matches) + } + } else if s[0] == 'e' { + if s[1] == 'd' { + if s[2] == ' ' { + addMatch(id+53*n, l+3, l, matches) + } + } else if s[1] == 'r' { + if s[2] == ' ' { + addMatch(id+82*n, l+3, l, matches) + } + } else if s[1] == 's' { + if s[2] == 't' && s[3] == ' ' { + addMatch(id+95*n, l+4, l, matches) + } + } + } else if s[0] == 'f' { + if s[1] == 'u' && s[2] == 'l' && s[3] == ' ' { + addMatch(id+90*n, l+4, l, matches) + } + } else if s[0] == 'i' { + if s[1] == 'v' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+92*n, l+4, l, matches) + } + } else if s[1] == 'z' { + if s[2] == 'e' && s[3] == ' ' { + addMatch(id+100*n, l+4, l, matches) + } + } + } else if s[0] == 'l' { + if s[1] == 'e' { + if s[2] == 's' && s[3] == 's' && s[4] == ' ' { + addMatch(id+93*n, l+5, l, matches) + } + } else if s[1] == 'y' { + if s[2] == ' ' { + addMatch(id+61*n, l+3, l, matches) + } + } + } else if s[0] == 'o' { + if s[1] == 'u' && s[2] == 's' && s[3] == ' ' { + addMatch(id+106*n, l+4, l, matches) + } + } + } else { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data, max_length) { + continue + } + + /* Transform "" + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 44 + } else { + tmp = 9 + } + addMatch(id+uint(tmp)*n, l, l, matches) + + has_found_match = true + if l+1 >= max_length { + continue + } + + /* Transforms "" + kUppercase{First,All} + */ + s = data[l:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 68 + } else { + tmp = 4 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '"' { + var tmp int + if is_all_caps { + tmp = 87 + } else { + tmp = 66 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == '>' { + var tmp int + if is_all_caps { + tmp = 97 + } else { + tmp = 69 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 101 + } else { + tmp = 79 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 114 + } else { + tmp = 88 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == ',' { + var tmp int + if is_all_caps { + tmp = 112 + } else { + tmp = 99 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 107 + } else { + tmp = 58 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } else if s[0] == '\'' { + var tmp int + if is_all_caps { + tmp = 94 + } else { + tmp = 74 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '(' { + var tmp int + if is_all_caps { + tmp = 113 + } else { + tmp = 78 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 105 + } else { + tmp = 104 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 116 + } else { + tmp = 108 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } + } + } + } + } + + /* Transforms with prefixes " " and "." */ + if max_length >= 5 && (data[0] == ' ' || data[0] == '.') { + var is_space bool = (data[0] == ' ') + var offset uint = uint(dict.buckets[hash(data[1:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 { + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + "" and + "." + BROTLI_TRANSFORM_IDENTITY + "" */ + var tmp int + if is_space { + tmp = 6 + } else { + tmp = 32 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + BROTLI_TRANSFORM_IDENTITY + and + "." + BROTLI_TRANSFORM_IDENTITY + + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_space { + tmp = 2 + } else { + tmp = 77 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == '(' { + var tmp int + if is_space { + tmp = 89 + } else { + tmp = 67 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if is_space { + if s[0] == ',' { + addMatch(id+103*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+33*n, l+3, l, matches) + } + } else if s[0] == '.' { + addMatch(id+71*n, l+2, l, matches) + if s[1] == ' ' { + addMatch(id+52*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + addMatch(id+81*n, l+3, l, matches) + } else if s[1] == '\'' { + addMatch(id+98*n, l+3, l, matches) + } + } + } + } else if is_space { + var is_all_caps bool = (w.transform != transformUppercaseFirst) + /* Set is_all_caps=0 for BROTLI_TRANSFORM_UPPERCASE_FIRST and + is_all_caps=1 otherwise (BROTLI_TRANSFORM_UPPERCASE_ALL) + transform. */ + + var s []byte + if !isMatch(dict.words, w, data[1:], max_length-1) { + continue + } + + /* Transforms " " + kUppercase{First,All} + "" */ + var tmp int + if is_all_caps { + tmp = 85 + } else { + tmp = 30 + } + addMatch(id+uint(tmp)*n, l+1, l, matches) + + has_found_match = true + if l+2 >= max_length { + continue + } + + /* Transforms " " + kUppercase{First,All} + */ + s = data[l+1:] + + if s[0] == ' ' { + var tmp int + if is_all_caps { + tmp = 83 + } else { + tmp = 15 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + } else if s[0] == ',' { + if !is_all_caps { + addMatch(id+109*n, l+2, l, matches) + } + + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 111 + } else { + tmp = 65 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '.' { + var tmp int + if is_all_caps { + tmp = 115 + } else { + tmp = 96 + } + addMatch(id+uint(tmp)*n, l+2, l, matches) + if s[1] == ' ' { + var tmp int + if is_all_caps { + tmp = 117 + } else { + tmp = 91 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } else if s[0] == '=' { + if s[1] == '"' { + var tmp int + if is_all_caps { + tmp = 110 + } else { + tmp = 118 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } else if s[1] == '\'' { + var tmp int + if is_all_caps { + tmp = 119 + } else { + tmp = 120 + } + addMatch(id+uint(tmp)*n, l+3, l, matches) + } + } + } + } + } + + if max_length >= 6 { + /* Transforms with prefixes "e ", "s ", ", " and "\xC2\xA0" */ + if (data[1] == ' ' && (data[0] == 'e' || data[0] == 's' || data[0] == ',')) || (data[0] == 0xC2 && data[1] == 0xA0) { + var offset uint = uint(dict.buckets[hash(data[2:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[2:], max_length-2) { + if data[0] == 0xC2 { + addMatch(id+102*n, l+2, l, matches) + has_found_match = true + } else if l+2 < max_length && data[l+2] == ' ' { + var t uint = 13 + if data[0] == 'e' { + t = 18 + } else if data[0] == 's' { + t = 7 + } + addMatch(id+t*n, l+3, l, matches) + has_found_match = true + } + } + } + } + } + + if max_length >= 9 { + /* Transforms with prefixes " the " and ".com/" */ + if (data[0] == ' ' && data[1] == 't' && data[2] == 'h' && data[3] == 'e' && data[4] == ' ') || (data[0] == '.' && data[1] == 'c' && data[2] == 'o' && data[3] == 'm' && data[4] == '/') { + var offset uint = uint(dict.buckets[hash(data[5:])]) + var end bool = offset == 0 + for !end { + w := dict.dict_words[offset] + offset++ + var l uint = uint(w.len) & 0x1F + var n uint = uint(1) << dict.words.size_bits_by_length[l] + var id uint = uint(w.idx) + end = !(w.len&0x80 == 0) + w.len = byte(l) + if w.transform == 0 && isMatch(dict.words, w, data[5:], max_length-5) { + var tmp int + if data[0] == ' ' { + tmp = 41 + } else { + tmp = 72 + } + addMatch(id+uint(tmp)*n, l+5, l, matches) + has_found_match = true + if l+5 < max_length { + var s []byte = data[l+5:] + if data[0] == ' ' { + if l+8 < max_length && s[0] == ' ' && s[1] == 'o' && s[2] == 'f' && s[3] == ' ' { + addMatch(id+62*n, l+9, l, matches) + if l+12 < max_length && s[4] == 't' && s[5] == 'h' && s[6] == 'e' && s[7] == ' ' { + addMatch(id+73*n, l+13, l, matches) + } + } + } + } + } + } + } + } + + return has_found_match +} diff --git a/vendor/github.com/andybalholm/brotli/static_dict_lut.go b/vendor/github.com/andybalholm/brotli/static_dict_lut.go new file mode 100644 index 00000000000..b33963e967a --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/static_dict_lut.go @@ -0,0 +1,75094 @@ +package brotli + +/* Copyright 2017 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Lookup table for static dictionary and transforms. */ + +type dictWord struct { + len byte + transform byte + idx uint16 +} + +const kDictNumBits int = 15 + +const kDictHashMul32 uint32 = 0x1E35A7BD + +var kStaticDictionaryBuckets = [32768]uint16{ + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 6, + 0, + 0, + 0, + 0, + 0, + 20, + 0, + 0, + 0, + 21, + 0, + 22, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23, + 0, + 0, + 25, + 0, + 29, + 0, + 53, + 0, + 0, + 0, + 0, + 0, + 0, + 55, + 0, + 0, + 0, + 0, + 0, + 0, + 61, + 76, + 0, + 0, + 0, + 94, + 0, + 0, + 0, + 0, + 0, + 0, + 96, + 0, + 97, + 0, + 98, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 99, + 101, + 106, + 108, + 0, + 0, + 0, + 0, + 0, + 110, + 0, + 111, + 112, + 0, + 113, + 118, + 124, + 0, + 0, + 0, + 0, + 0, + 125, + 128, + 0, + 0, + 0, + 0, + 129, + 0, + 0, + 131, + 0, + 0, + 0, + 0, + 0, + 0, + 132, + 0, + 0, + 135, + 0, + 0, + 0, + 137, + 0, + 0, + 0, + 0, + 0, + 138, + 139, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 142, + 143, + 144, + 0, + 0, + 0, + 0, + 0, + 145, + 0, + 0, + 0, + 146, + 149, + 151, + 152, + 0, + 0, + 153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 154, + 0, + 0, + 0, + 0, + 0, + 0, + 155, + 0, + 0, + 0, + 0, + 160, + 182, + 0, + 0, + 0, + 0, + 0, + 0, + 183, + 0, + 0, + 0, + 188, + 189, + 0, + 0, + 192, + 0, + 0, + 0, + 0, + 0, + 0, + 194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 197, + 202, + 209, + 0, + 0, + 210, + 0, + 224, + 0, + 0, + 0, + 225, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 231, + 0, + 0, + 0, + 232, + 0, + 240, + 0, + 0, + 242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 244, + 0, + 0, + 0, + 246, + 0, + 0, + 249, + 251, + 253, + 0, + 0, + 0, + 0, + 0, + 258, + 0, + 0, + 261, + 263, + 0, + 0, + 0, + 267, + 0, + 0, + 268, + 0, + 269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 271, + 0, + 0, + 0, + 0, + 0, + 0, + 272, + 0, + 273, + 0, + 277, + 0, + 278, + 286, + 0, + 0, + 0, + 0, + 287, + 0, + 289, + 290, + 291, + 0, + 0, + 0, + 295, + 0, + 0, + 296, + 297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 298, + 0, + 0, + 0, + 299, + 0, + 0, + 305, + 0, + 324, + 0, + 0, + 0, + 0, + 0, + 327, + 0, + 328, + 329, + 0, + 0, + 0, + 0, + 336, + 0, + 0, + 340, + 0, + 341, + 342, + 343, + 0, + 0, + 346, + 0, + 348, + 0, + 0, + 0, + 0, + 0, + 0, + 349, + 351, + 0, + 0, + 355, + 0, + 363, + 0, + 364, + 0, + 368, + 369, + 0, + 370, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 373, + 0, + 375, + 0, + 0, + 0, + 0, + 376, + 377, + 0, + 0, + 394, + 395, + 396, + 0, + 0, + 398, + 0, + 0, + 0, + 0, + 400, + 0, + 0, + 408, + 0, + 0, + 0, + 0, + 420, + 0, + 0, + 0, + 0, + 0, + 0, + 421, + 0, + 0, + 422, + 423, + 0, + 0, + 429, + 435, + 436, + 442, + 0, + 0, + 443, + 0, + 444, + 445, + 453, + 456, + 0, + 457, + 0, + 0, + 0, + 0, + 0, + 458, + 0, + 0, + 0, + 459, + 0, + 0, + 0, + 460, + 0, + 462, + 463, + 465, + 0, + 0, + 0, + 0, + 0, + 0, + 466, + 469, + 0, + 0, + 0, + 0, + 0, + 0, + 470, + 0, + 0, + 0, + 474, + 0, + 476, + 0, + 0, + 0, + 0, + 483, + 0, + 485, + 0, + 0, + 0, + 486, + 0, + 0, + 488, + 491, + 492, + 0, + 0, + 497, + 499, + 500, + 0, + 501, + 0, + 0, + 0, + 505, + 0, + 0, + 506, + 0, + 0, + 0, + 507, + 0, + 0, + 0, + 509, + 0, + 0, + 0, + 0, + 511, + 512, + 519, + 0, + 0, + 0, + 0, + 0, + 0, + 529, + 530, + 0, + 0, + 0, + 534, + 0, + 0, + 0, + 0, + 543, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 553, + 0, + 0, + 0, + 0, + 557, + 560, + 0, + 0, + 0, + 0, + 0, + 0, + 561, + 0, + 564, + 0, + 0, + 0, + 0, + 0, + 0, + 565, + 566, + 0, + 575, + 0, + 619, + 0, + 620, + 0, + 0, + 623, + 624, + 0, + 0, + 0, + 625, + 0, + 0, + 626, + 627, + 0, + 0, + 628, + 0, + 0, + 0, + 0, + 630, + 0, + 631, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 641, + 0, + 0, + 0, + 0, + 643, + 656, + 668, + 0, + 0, + 0, + 673, + 0, + 0, + 0, + 674, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 682, + 0, + 687, + 0, + 690, + 0, + 693, + 699, + 700, + 0, + 0, + 0, + 0, + 0, + 0, + 704, + 705, + 0, + 0, + 0, + 0, + 707, + 710, + 0, + 711, + 0, + 0, + 0, + 0, + 726, + 0, + 0, + 729, + 0, + 0, + 0, + 730, + 731, + 0, + 0, + 0, + 0, + 0, + 752, + 0, + 0, + 0, + 762, + 0, + 763, + 0, + 0, + 767, + 0, + 0, + 0, + 770, + 774, + 0, + 0, + 775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 776, + 0, + 0, + 0, + 777, + 783, + 0, + 0, + 0, + 785, + 788, + 0, + 0, + 0, + 0, + 790, + 0, + 0, + 0, + 793, + 0, + 0, + 0, + 0, + 794, + 0, + 0, + 804, + 819, + 821, + 0, + 827, + 0, + 0, + 0, + 834, + 0, + 0, + 835, + 0, + 0, + 0, + 841, + 0, + 844, + 0, + 850, + 851, + 859, + 0, + 860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 874, + 0, + 876, + 0, + 877, + 890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 893, + 894, + 898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 899, + 0, + 0, + 0, + 900, + 904, + 906, + 0, + 0, + 0, + 907, + 0, + 908, + 909, + 0, + 910, + 0, + 0, + 0, + 0, + 911, + 0, + 0, + 0, + 0, + 0, + 916, + 0, + 0, + 0, + 922, + 925, + 0, + 930, + 0, + 934, + 0, + 0, + 0, + 0, + 0, + 943, + 0, + 0, + 944, + 0, + 953, + 954, + 0, + 0, + 0, + 0, + 0, + 0, + 955, + 0, + 962, + 963, + 0, + 0, + 976, + 0, + 0, + 977, + 978, + 979, + 980, + 0, + 981, + 0, + 0, + 0, + 0, + 984, + 0, + 0, + 985, + 0, + 0, + 987, + 989, + 991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 992, + 0, + 0, + 0, + 993, + 0, + 0, + 0, + 0, + 0, + 0, + 996, + 0, + 0, + 0, + 1000, + 0, + 0, + 0, + 0, + 0, + 1002, + 0, + 0, + 0, + 0, + 1005, + 1007, + 0, + 0, + 0, + 1009, + 0, + 0, + 0, + 1010, + 0, + 0, + 0, + 0, + 0, + 0, + 1011, + 0, + 1012, + 0, + 0, + 0, + 0, + 1014, + 1016, + 0, + 0, + 0, + 1020, + 0, + 1021, + 0, + 0, + 0, + 0, + 1022, + 0, + 0, + 0, + 1024, + 0, + 0, + 0, + 0, + 0, + 0, + 1025, + 0, + 0, + 1026, + 1027, + 0, + 0, + 0, + 0, + 0, + 1031, + 0, + 1033, + 0, + 0, + 0, + 0, + 1034, + 0, + 0, + 0, + 1037, + 1040, + 0, + 0, + 0, + 1042, + 1043, + 0, + 0, + 1053, + 0, + 1054, + 0, + 0, + 1057, + 0, + 0, + 0, + 1058, + 0, + 0, + 1060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1061, + 0, + 0, + 1062, + 0, + 0, + 0, + 0, + 1063, + 0, + 0, + 0, + 0, + 1064, + 0, + 0, + 0, + 0, + 0, + 1065, + 0, + 0, + 0, + 0, + 1066, + 1067, + 0, + 0, + 0, + 1069, + 1070, + 1072, + 0, + 0, + 0, + 0, + 0, + 0, + 1073, + 0, + 1075, + 0, + 0, + 0, + 0, + 0, + 0, + 1080, + 1084, + 0, + 0, + 0, + 0, + 1088, + 0, + 0, + 0, + 0, + 0, + 0, + 1094, + 0, + 1095, + 0, + 1107, + 0, + 0, + 0, + 1112, + 1114, + 0, + 1119, + 0, + 1122, + 0, + 0, + 1126, + 0, + 1129, + 0, + 1130, + 0, + 0, + 0, + 0, + 0, + 1132, + 0, + 0, + 0, + 0, + 0, + 0, + 1144, + 0, + 0, + 1145, + 1146, + 0, + 1148, + 1149, + 0, + 0, + 1150, + 1151, + 0, + 0, + 0, + 0, + 1152, + 0, + 1153, + 0, + 0, + 0, + 0, + 0, + 1154, + 0, + 1163, + 0, + 0, + 0, + 1164, + 0, + 0, + 0, + 0, + 0, + 1165, + 0, + 1167, + 0, + 1170, + 0, + 0, + 0, + 0, + 0, + 1171, + 1172, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1173, + 1175, + 1177, + 0, + 1186, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1195, + 0, + 0, + 1221, + 0, + 0, + 1224, + 0, + 0, + 1227, + 0, + 0, + 0, + 0, + 0, + 1228, + 1229, + 0, + 0, + 1230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1231, + 0, + 0, + 0, + 1233, + 0, + 0, + 1243, + 1244, + 1246, + 1248, + 0, + 0, + 0, + 0, + 1254, + 1255, + 1258, + 1259, + 0, + 0, + 0, + 1260, + 0, + 0, + 1261, + 0, + 0, + 0, + 1262, + 1264, + 0, + 0, + 1265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1266, + 0, + 1267, + 0, + 0, + 0, + 0, + 1273, + 1274, + 1276, + 1289, + 0, + 0, + 1291, + 1292, + 1293, + 0, + 0, + 1294, + 1295, + 1296, + 0, + 0, + 0, + 0, + 1302, + 0, + 1304, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1311, + 1312, + 0, + 1314, + 0, + 1316, + 1320, + 1321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1322, + 1323, + 1324, + 0, + 1335, + 0, + 1336, + 0, + 0, + 0, + 0, + 1341, + 1342, + 0, + 1346, + 0, + 1357, + 0, + 0, + 0, + 1358, + 1360, + 0, + 0, + 0, + 0, + 0, + 0, + 1361, + 0, + 0, + 0, + 1362, + 1365, + 0, + 1366, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1379, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1386, + 0, + 1388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1395, + 0, + 0, + 0, + 0, + 1403, + 0, + 1405, + 0, + 0, + 1407, + 0, + 0, + 0, + 0, + 0, + 1408, + 1409, + 0, + 1410, + 0, + 0, + 0, + 1412, + 1413, + 1416, + 0, + 0, + 1429, + 1451, + 0, + 0, + 1454, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1455, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1456, + 0, + 0, + 0, + 0, + 1459, + 1460, + 1461, + 1475, + 0, + 0, + 0, + 0, + 0, + 0, + 1477, + 0, + 1480, + 0, + 1481, + 0, + 0, + 1486, + 0, + 0, + 1495, + 0, + 0, + 0, + 1496, + 0, + 0, + 1498, + 1499, + 1501, + 1520, + 1521, + 0, + 0, + 0, + 1526, + 0, + 0, + 0, + 0, + 1528, + 1529, + 0, + 1533, + 1536, + 0, + 0, + 0, + 1537, + 1538, + 1549, + 0, + 1550, + 1558, + 1559, + 1572, + 0, + 1573, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1575, + 0, + 0, + 0, + 0, + 0, + 1579, + 0, + 1599, + 0, + 1603, + 0, + 1604, + 0, + 1605, + 0, + 0, + 0, + 0, + 0, + 1608, + 1610, + 0, + 0, + 0, + 0, + 1611, + 0, + 1615, + 0, + 1616, + 1618, + 0, + 1619, + 0, + 0, + 1622, + 0, + 0, + 0, + 0, + 1634, + 0, + 0, + 0, + 1635, + 0, + 0, + 0, + 1641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1643, + 0, + 0, + 0, + 1650, + 0, + 0, + 1652, + 0, + 0, + 0, + 0, + 0, + 1653, + 0, + 0, + 0, + 1654, + 0, + 0, + 0, + 0, + 1655, + 0, + 1662, + 0, + 0, + 1663, + 1664, + 0, + 0, + 1668, + 0, + 0, + 1669, + 1670, + 0, + 1672, + 1673, + 0, + 0, + 0, + 0, + 0, + 1674, + 0, + 0, + 0, + 1675, + 1676, + 1680, + 0, + 1682, + 0, + 0, + 1687, + 0, + 0, + 0, + 0, + 0, + 1704, + 0, + 0, + 1705, + 0, + 0, + 1721, + 0, + 0, + 0, + 0, + 1734, + 1735, + 0, + 0, + 0, + 0, + 1737, + 0, + 0, + 0, + 0, + 1739, + 0, + 0, + 1740, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1741, + 1743, + 0, + 0, + 0, + 0, + 1745, + 0, + 0, + 0, + 1749, + 0, + 0, + 0, + 1751, + 0, + 0, + 0, + 0, + 0, + 0, + 1760, + 0, + 0, + 0, + 0, + 1765, + 0, + 0, + 0, + 0, + 0, + 1784, + 0, + 1785, + 1787, + 0, + 0, + 0, + 0, + 1788, + 1789, + 0, + 0, + 0, + 0, + 1790, + 1791, + 1793, + 0, + 1798, + 1799, + 0, + 0, + 0, + 0, + 1801, + 0, + 1803, + 1805, + 0, + 0, + 0, + 1806, + 1811, + 0, + 1812, + 1814, + 0, + 1821, + 0, + 0, + 0, + 0, + 0, + 1822, + 1833, + 0, + 0, + 0, + 0, + 0, + 0, + 1848, + 0, + 0, + 0, + 0, + 0, + 0, + 1857, + 0, + 0, + 0, + 1859, + 0, + 0, + 0, + 0, + 1861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1866, + 0, + 1921, + 1925, + 0, + 0, + 0, + 1929, + 1930, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1931, + 0, + 0, + 0, + 0, + 1932, + 0, + 0, + 0, + 1934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1946, + 0, + 0, + 1948, + 0, + 0, + 0, + 0, + 1950, + 0, + 1957, + 0, + 1958, + 0, + 0, + 0, + 0, + 0, + 1965, + 1967, + 0, + 0, + 0, + 0, + 1968, + 0, + 1969, + 0, + 1971, + 1972, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1973, + 0, + 0, + 0, + 0, + 1975, + 0, + 0, + 0, + 0, + 1976, + 1979, + 0, + 1982, + 0, + 0, + 0, + 0, + 1984, + 1988, + 0, + 0, + 0, + 0, + 1990, + 2004, + 2008, + 0, + 0, + 0, + 2012, + 2013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2015, + 0, + 2016, + 2017, + 0, + 0, + 0, + 0, + 2021, + 0, + 0, + 2025, + 0, + 0, + 0, + 0, + 0, + 2029, + 2036, + 2040, + 0, + 2042, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2043, + 0, + 0, + 0, + 0, + 0, + 2045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2046, + 2047, + 0, + 2048, + 2049, + 0, + 2059, + 0, + 0, + 2063, + 0, + 2064, + 2065, + 0, + 0, + 2066, + 0, + 0, + 0, + 0, + 0, + 0, + 2069, + 0, + 0, + 0, + 0, + 2070, + 0, + 2071, + 0, + 2072, + 0, + 0, + 0, + 0, + 2080, + 2082, + 2083, + 0, + 0, + 0, + 0, + 0, + 2085, + 0, + 2086, + 2088, + 2089, + 2105, + 0, + 0, + 0, + 0, + 2107, + 0, + 0, + 2116, + 2117, + 0, + 2120, + 0, + 0, + 2122, + 0, + 0, + 0, + 0, + 0, + 2123, + 0, + 0, + 2125, + 2127, + 2128, + 0, + 0, + 0, + 2130, + 0, + 0, + 0, + 2137, + 2139, + 2140, + 2141, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2144, + 2145, + 0, + 0, + 2146, + 2149, + 0, + 0, + 0, + 0, + 2150, + 0, + 0, + 2151, + 2158, + 0, + 2159, + 0, + 2160, + 0, + 0, + 0, + 0, + 0, + 0, + 2161, + 2162, + 0, + 0, + 2194, + 2202, + 0, + 0, + 0, + 0, + 0, + 0, + 2205, + 2217, + 0, + 2220, + 0, + 2221, + 0, + 2222, + 2224, + 0, + 0, + 0, + 0, + 2237, + 0, + 0, + 0, + 0, + 0, + 2238, + 0, + 2239, + 2241, + 0, + 0, + 2242, + 0, + 0, + 0, + 0, + 0, + 2243, + 0, + 0, + 0, + 0, + 0, + 0, + 2252, + 0, + 0, + 2253, + 0, + 0, + 0, + 2257, + 2258, + 0, + 0, + 0, + 2260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2262, + 0, + 2264, + 0, + 0, + 0, + 0, + 0, + 2269, + 2270, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2271, + 0, + 2273, + 0, + 0, + 0, + 0, + 2277, + 0, + 0, + 0, + 0, + 2278, + 0, + 0, + 0, + 0, + 2279, + 0, + 2280, + 0, + 2283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2289, + 2290, + 0, + 0, + 0, + 0, + 2291, + 0, + 2292, + 0, + 0, + 0, + 2293, + 2295, + 2296, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2298, + 0, + 0, + 0, + 0, + 0, + 2303, + 0, + 2305, + 0, + 0, + 2306, + 0, + 2307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2313, + 2314, + 2315, + 2316, + 0, + 0, + 2318, + 0, + 2319, + 0, + 2322, + 0, + 0, + 2323, + 0, + 2324, + 0, + 2326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2335, + 0, + 2336, + 2338, + 2339, + 0, + 2340, + 0, + 0, + 0, + 2355, + 0, + 2375, + 0, + 2382, + 2386, + 0, + 2387, + 0, + 0, + 2394, + 0, + 0, + 0, + 0, + 2395, + 0, + 2397, + 0, + 0, + 0, + 0, + 0, + 2398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2399, + 2402, + 2404, + 2408, + 2411, + 0, + 0, + 0, + 2413, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2415, + 0, + 0, + 2416, + 2417, + 2419, + 0, + 2420, + 0, + 0, + 0, + 0, + 0, + 2425, + 0, + 0, + 0, + 2426, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2427, + 2428, + 0, + 2429, + 0, + 0, + 2430, + 2434, + 0, + 2436, + 0, + 0, + 0, + 0, + 0, + 0, + 2441, + 2442, + 0, + 2445, + 0, + 0, + 2446, + 2457, + 0, + 2459, + 0, + 0, + 2462, + 0, + 2464, + 0, + 2477, + 0, + 2478, + 2486, + 0, + 0, + 0, + 2491, + 0, + 0, + 2493, + 0, + 0, + 2494, + 0, + 2495, + 0, + 2513, + 2523, + 0, + 0, + 0, + 0, + 2524, + 0, + 0, + 0, + 0, + 0, + 0, + 2528, + 2529, + 2530, + 0, + 0, + 2531, + 0, + 2533, + 0, + 0, + 2534, + 2535, + 0, + 2536, + 2537, + 0, + 2538, + 0, + 2539, + 2540, + 0, + 0, + 0, + 2545, + 2546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2548, + 0, + 0, + 2549, + 0, + 2550, + 2555, + 0, + 0, + 0, + 0, + 0, + 2557, + 0, + 2560, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2561, + 0, + 2576, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2577, + 2578, + 0, + 0, + 0, + 2579, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2580, + 0, + 0, + 0, + 0, + 2581, + 0, + 0, + 0, + 0, + 2583, + 0, + 2584, + 0, + 2588, + 2590, + 0, + 0, + 0, + 2591, + 0, + 0, + 0, + 0, + 2593, + 2594, + 0, + 2595, + 0, + 2601, + 2602, + 0, + 0, + 2603, + 0, + 2605, + 0, + 0, + 0, + 2606, + 2607, + 2611, + 0, + 2615, + 0, + 0, + 0, + 2617, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2619, + 0, + 0, + 2620, + 0, + 0, + 0, + 2621, + 0, + 2623, + 0, + 2625, + 0, + 0, + 2628, + 2629, + 0, + 0, + 2635, + 2636, + 2637, + 0, + 0, + 2639, + 0, + 0, + 0, + 2642, + 0, + 0, + 0, + 0, + 2643, + 0, + 2644, + 0, + 2649, + 0, + 0, + 0, + 0, + 0, + 0, + 2655, + 2656, + 0, + 0, + 2657, + 0, + 0, + 0, + 0, + 0, + 2658, + 0, + 0, + 0, + 0, + 0, + 2659, + 0, + 0, + 0, + 0, + 2664, + 2685, + 0, + 2687, + 0, + 2688, + 0, + 0, + 2689, + 0, + 0, + 2694, + 0, + 2695, + 0, + 0, + 2698, + 0, + 2701, + 2706, + 0, + 0, + 0, + 2707, + 0, + 2709, + 2710, + 2711, + 0, + 0, + 0, + 2720, + 2730, + 2735, + 0, + 0, + 0, + 0, + 2738, + 2740, + 0, + 0, + 0, + 0, + 2747, + 0, + 0, + 0, + 0, + 0, + 0, + 2748, + 0, + 0, + 2749, + 0, + 0, + 0, + 0, + 0, + 2750, + 0, + 0, + 2752, + 2754, + 0, + 0, + 0, + 0, + 0, + 2758, + 0, + 0, + 0, + 0, + 2762, + 0, + 0, + 0, + 0, + 2763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2764, + 2767, + 0, + 0, + 0, + 0, + 2768, + 0, + 0, + 2770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2772, + 0, + 0, + 0, + 0, + 0, + 2773, + 2776, + 0, + 0, + 2783, + 0, + 0, + 2784, + 0, + 2789, + 0, + 2790, + 0, + 0, + 0, + 2792, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2793, + 2795, + 0, + 0, + 0, + 0, + 0, + 0, + 2796, + 0, + 0, + 0, + 0, + 0, + 0, + 2797, + 2799, + 0, + 0, + 0, + 0, + 2803, + 0, + 0, + 0, + 0, + 2806, + 0, + 2807, + 2808, + 2817, + 2819, + 0, + 0, + 0, + 0, + 0, + 2821, + 0, + 0, + 0, + 0, + 2822, + 2823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2824, + 0, + 0, + 2828, + 0, + 2834, + 0, + 0, + 0, + 0, + 0, + 0, + 2836, + 0, + 2838, + 0, + 0, + 2839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2841, + 0, + 0, + 0, + 2842, + 0, + 0, + 0, + 0, + 0, + 2843, + 2844, + 0, + 0, + 0, + 0, + 2846, + 0, + 0, + 2847, + 0, + 2849, + 0, + 2853, + 0, + 0, + 0, + 0, + 0, + 2857, + 0, + 0, + 0, + 0, + 2858, + 0, + 2859, + 0, + 0, + 2860, + 0, + 2862, + 2868, + 0, + 0, + 0, + 0, + 2875, + 0, + 2876, + 0, + 0, + 2877, + 2878, + 2884, + 2889, + 2890, + 0, + 0, + 2891, + 0, + 0, + 2892, + 0, + 0, + 0, + 2906, + 2912, + 0, + 2913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2916, + 0, + 2934, + 0, + 0, + 0, + 0, + 0, + 2935, + 0, + 0, + 0, + 0, + 2939, + 0, + 2940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2941, + 0, + 0, + 0, + 2946, + 0, + 2949, + 0, + 0, + 2950, + 2954, + 2955, + 0, + 0, + 0, + 2959, + 2961, + 0, + 0, + 2962, + 0, + 2963, + 0, + 0, + 0, + 0, + 0, + 0, + 2964, + 2965, + 2966, + 2967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2969, + 0, + 0, + 0, + 0, + 0, + 2970, + 2975, + 0, + 2982, + 2983, + 2984, + 0, + 0, + 0, + 0, + 0, + 2989, + 0, + 0, + 2990, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2991, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2998, + 0, + 3000, + 3001, + 0, + 0, + 3002, + 0, + 0, + 0, + 3003, + 0, + 0, + 3012, + 0, + 0, + 3022, + 0, + 0, + 3024, + 0, + 0, + 3025, + 3027, + 0, + 0, + 0, + 3030, + 0, + 0, + 0, + 0, + 3034, + 3035, + 0, + 0, + 3036, + 0, + 3039, + 0, + 3049, + 0, + 0, + 3050, + 0, + 0, + 0, + 0, + 0, + 0, + 3051, + 0, + 3053, + 0, + 0, + 0, + 0, + 3057, + 0, + 3058, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3063, + 0, + 0, + 3073, + 3074, + 3078, + 3079, + 0, + 3080, + 3086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3087, + 0, + 3092, + 0, + 3095, + 0, + 3099, + 0, + 0, + 0, + 3100, + 0, + 3101, + 3102, + 0, + 3122, + 0, + 0, + 0, + 3124, + 0, + 3125, + 0, + 0, + 0, + 0, + 0, + 0, + 3132, + 3134, + 0, + 0, + 3136, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3147, + 0, + 0, + 3149, + 0, + 0, + 0, + 0, + 0, + 3150, + 3151, + 3152, + 0, + 0, + 0, + 0, + 3158, + 0, + 0, + 3160, + 0, + 0, + 3161, + 0, + 0, + 3162, + 0, + 3163, + 3166, + 3168, + 0, + 0, + 3169, + 3170, + 0, + 0, + 3171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3182, + 0, + 3184, + 0, + 0, + 3188, + 0, + 0, + 3194, + 0, + 0, + 0, + 0, + 0, + 0, + 3204, + 0, + 0, + 0, + 0, + 3209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3216, + 3217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3219, + 0, + 0, + 3220, + 3222, + 0, + 3223, + 0, + 0, + 0, + 0, + 3224, + 0, + 3225, + 3226, + 0, + 3228, + 3233, + 0, + 3239, + 3241, + 3242, + 0, + 0, + 3251, + 3252, + 3253, + 3255, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3260, + 0, + 0, + 3261, + 0, + 0, + 0, + 3267, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3271, + 0, + 0, + 0, + 3278, + 0, + 3282, + 0, + 0, + 0, + 3284, + 0, + 0, + 0, + 3285, + 3286, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3287, + 3292, + 0, + 0, + 0, + 0, + 3294, + 3296, + 0, + 0, + 3299, + 3300, + 3301, + 0, + 3302, + 0, + 0, + 0, + 0, + 0, + 3304, + 3306, + 0, + 0, + 0, + 0, + 0, + 0, + 3308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3312, + 3314, + 3315, + 0, + 3318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3319, + 0, + 0, + 0, + 0, + 0, + 3321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3322, + 0, + 0, + 3324, + 3325, + 0, + 0, + 3326, + 0, + 0, + 3328, + 3329, + 3331, + 0, + 0, + 3335, + 0, + 0, + 3337, + 0, + 3338, + 0, + 0, + 0, + 0, + 3343, + 3347, + 0, + 0, + 0, + 3348, + 0, + 0, + 3351, + 0, + 0, + 0, + 0, + 0, + 0, + 3354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3355, + 0, + 0, + 3365, + 3366, + 3367, + 0, + 0, + 0, + 0, + 0, + 0, + 3368, + 3369, + 0, + 3370, + 0, + 0, + 3373, + 0, + 0, + 3376, + 0, + 0, + 3377, + 0, + 3379, + 3387, + 0, + 0, + 0, + 0, + 0, + 3390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3402, + 0, + 3403, + 3436, + 3437, + 3439, + 0, + 0, + 3441, + 0, + 0, + 0, + 3442, + 0, + 0, + 3449, + 0, + 0, + 0, + 3450, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3451, + 0, + 0, + 3452, + 0, + 3453, + 3456, + 0, + 3457, + 0, + 0, + 3458, + 0, + 3459, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3460, + 0, + 0, + 3469, + 3470, + 0, + 0, + 3475, + 0, + 0, + 0, + 3480, + 3487, + 3489, + 0, + 3490, + 0, + 0, + 3491, + 3499, + 0, + 3500, + 0, + 0, + 3501, + 0, + 0, + 0, + 3502, + 0, + 3514, + 0, + 0, + 0, + 3516, + 3517, + 0, + 0, + 0, + 3518, + 0, + 0, + 0, + 0, + 3520, + 3521, + 3522, + 0, + 0, + 3526, + 3530, + 0, + 0, + 0, + 0, + 3531, + 0, + 0, + 0, + 0, + 3536, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3539, + 3541, + 0, + 0, + 3542, + 3544, + 0, + 3547, + 3548, + 0, + 0, + 3550, + 0, + 3553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3554, + 0, + 3555, + 0, + 3558, + 0, + 3559, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3563, + 0, + 3581, + 0, + 0, + 0, + 3599, + 0, + 0, + 0, + 3600, + 0, + 3601, + 0, + 3602, + 3603, + 0, + 0, + 3606, + 3608, + 0, + 3610, + 3611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3612, + 3616, + 3619, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3624, + 3628, + 0, + 3629, + 3634, + 3635, + 0, + 0, + 0, + 0, + 0, + 0, + 3636, + 0, + 3637, + 0, + 0, + 3638, + 3651, + 0, + 0, + 0, + 0, + 0, + 0, + 3652, + 3653, + 0, + 0, + 0, + 0, + 3656, + 3657, + 0, + 0, + 0, + 0, + 0, + 3658, + 0, + 0, + 0, + 0, + 3659, + 0, + 3661, + 3663, + 3664, + 0, + 3665, + 0, + 3692, + 0, + 0, + 0, + 3694, + 3696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3700, + 0, + 0, + 3701, + 0, + 0, + 0, + 3708, + 3709, + 0, + 0, + 0, + 3711, + 3712, + 0, + 0, + 0, + 0, + 0, + 3723, + 0, + 3724, + 3725, + 0, + 0, + 3726, + 0, + 0, + 0, + 0, + 0, + 0, + 3728, + 3729, + 0, + 3734, + 3735, + 3737, + 0, + 0, + 0, + 3743, + 0, + 3745, + 0, + 0, + 3746, + 0, + 0, + 3747, + 3748, + 0, + 3757, + 0, + 3759, + 3766, + 3767, + 0, + 3768, + 0, + 0, + 0, + 0, + 3769, + 0, + 0, + 3771, + 0, + 3774, + 0, + 0, + 0, + 0, + 0, + 0, + 3775, + 0, + 0, + 0, + 0, + 0, + 0, + 3776, + 0, + 3777, + 3786, + 0, + 3788, + 3789, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3791, + 0, + 3811, + 0, + 0, + 0, + 0, + 0, + 3814, + 3815, + 3816, + 3820, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3821, + 0, + 0, + 3825, + 0, + 0, + 0, + 0, + 3835, + 0, + 0, + 3848, + 3849, + 0, + 0, + 0, + 0, + 3850, + 3851, + 3853, + 0, + 0, + 0, + 0, + 3859, + 0, + 3860, + 3862, + 0, + 0, + 0, + 0, + 0, + 3863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3873, + 0, + 3874, + 0, + 3875, + 3886, + 0, + 3887, + 0, + 0, + 0, + 0, + 3892, + 3913, + 0, + 3914, + 0, + 0, + 0, + 3925, + 3931, + 0, + 0, + 0, + 0, + 3934, + 3941, + 3942, + 0, + 0, + 0, + 0, + 3943, + 0, + 0, + 0, + 3944, + 0, + 0, + 0, + 0, + 0, + 3945, + 0, + 3947, + 0, + 0, + 0, + 3956, + 3957, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3958, + 0, + 3959, + 3965, + 0, + 0, + 0, + 0, + 3966, + 0, + 0, + 0, + 3967, + 0, + 0, + 0, + 3968, + 3974, + 0, + 0, + 0, + 0, + 0, + 3975, + 3977, + 3978, + 0, + 0, + 0, + 0, + 3980, + 0, + 3985, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3986, + 4011, + 0, + 0, + 4017, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4018, + 0, + 0, + 0, + 0, + 4019, + 0, + 4023, + 0, + 0, + 0, + 4027, + 4028, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4031, + 4034, + 0, + 0, + 4035, + 4037, + 4039, + 4040, + 0, + 0, + 0, + 0, + 0, + 4059, + 0, + 4060, + 4061, + 0, + 4062, + 4063, + 4066, + 0, + 0, + 4072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4088, + 0, + 0, + 0, + 0, + 0, + 4091, + 0, + 0, + 0, + 0, + 4094, + 4095, + 0, + 0, + 4096, + 0, + 0, + 0, + 0, + 0, + 4098, + 4099, + 0, + 0, + 0, + 4101, + 0, + 4104, + 0, + 0, + 0, + 4105, + 4108, + 0, + 4113, + 0, + 0, + 4115, + 4116, + 0, + 4126, + 0, + 0, + 4127, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4128, + 4132, + 4133, + 0, + 4134, + 0, + 0, + 0, + 4137, + 0, + 0, + 4141, + 0, + 0, + 0, + 0, + 4144, + 4146, + 4147, + 0, + 0, + 0, + 0, + 4148, + 0, + 0, + 4311, + 0, + 0, + 0, + 4314, + 4329, + 0, + 4331, + 4332, + 0, + 4333, + 0, + 4334, + 0, + 0, + 0, + 4335, + 0, + 4336, + 0, + 0, + 0, + 4337, + 0, + 0, + 0, + 4342, + 4345, + 4346, + 4350, + 0, + 4351, + 4352, + 0, + 4354, + 4355, + 0, + 0, + 4364, + 0, + 0, + 0, + 0, + 4369, + 0, + 0, + 0, + 4373, + 0, + 4374, + 0, + 0, + 0, + 0, + 4377, + 0, + 0, + 0, + 0, + 4378, + 0, + 0, + 0, + 4380, + 0, + 0, + 0, + 4381, + 4382, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4384, + 0, + 0, + 0, + 0, + 4385, + 0, + 0, + 0, + 4386, + 0, + 0, + 0, + 4391, + 4398, + 0, + 0, + 0, + 0, + 4407, + 4409, + 0, + 0, + 0, + 0, + 4410, + 0, + 0, + 4411, + 0, + 4414, + 4415, + 4418, + 0, + 4427, + 4428, + 4430, + 0, + 4431, + 0, + 4448, + 0, + 0, + 0, + 0, + 0, + 4449, + 0, + 0, + 0, + 4451, + 4452, + 0, + 4453, + 4454, + 0, + 4456, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4459, + 0, + 4463, + 0, + 0, + 0, + 0, + 0, + 4466, + 0, + 4467, + 0, + 4469, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4470, + 4471, + 0, + 4473, + 0, + 0, + 4475, + 0, + 0, + 0, + 0, + 4477, + 4478, + 0, + 0, + 0, + 4479, + 4481, + 0, + 4482, + 0, + 4484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4486, + 0, + 0, + 4488, + 0, + 0, + 4497, + 0, + 4508, + 0, + 0, + 4510, + 4511, + 0, + 4520, + 4523, + 0, + 4524, + 0, + 4525, + 0, + 4527, + 0, + 0, + 4528, + 0, + 0, + 0, + 0, + 4530, + 0, + 4531, + 0, + 0, + 4532, + 0, + 0, + 0, + 4533, + 0, + 0, + 0, + 0, + 0, + 4535, + 0, + 0, + 0, + 4536, + 0, + 0, + 0, + 0, + 0, + 4541, + 4543, + 4544, + 4545, + 4547, + 0, + 4548, + 0, + 0, + 0, + 0, + 4550, + 4551, + 0, + 4553, + 0, + 0, + 0, + 0, + 4562, + 0, + 0, + 4571, + 0, + 0, + 0, + 4574, + 0, + 0, + 0, + 4575, + 0, + 4576, + 0, + 4577, + 0, + 0, + 0, + 4581, + 0, + 0, + 0, + 0, + 0, + 4582, + 0, + 0, + 4586, + 0, + 0, + 0, + 4588, + 0, + 0, + 4597, + 0, + 4598, + 0, + 0, + 0, + 0, + 4616, + 4617, + 0, + 4618, + 0, + 0, + 0, + 0, + 4619, + 0, + 4620, + 0, + 0, + 4621, + 0, + 4624, + 0, + 0, + 0, + 0, + 0, + 4625, + 0, + 0, + 0, + 0, + 4657, + 0, + 4659, + 0, + 4667, + 0, + 0, + 0, + 4668, + 4670, + 0, + 4672, + 0, + 0, + 0, + 0, + 0, + 4673, + 4676, + 0, + 0, + 0, + 0, + 4687, + 0, + 0, + 0, + 0, + 4697, + 0, + 0, + 0, + 0, + 4699, + 0, + 4701, + 0, + 0, + 0, + 0, + 4702, + 0, + 0, + 4706, + 0, + 0, + 4713, + 0, + 0, + 0, + 4714, + 4715, + 4716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4717, + 0, + 0, + 4720, + 0, + 4721, + 4729, + 4735, + 0, + 0, + 0, + 4737, + 0, + 0, + 0, + 4739, + 0, + 0, + 0, + 4740, + 0, + 0, + 0, + 4741, + 0, + 0, + 0, + 0, + 0, + 4742, + 0, + 4745, + 4746, + 4747, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4748, + 0, + 0, + 0, + 4749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4751, + 4786, + 0, + 4787, + 0, + 4788, + 4796, + 0, + 0, + 4797, + 4798, + 0, + 4799, + 4806, + 4807, + 0, + 0, + 0, + 0, + 4809, + 4810, + 0, + 0, + 0, + 0, + 0, + 0, + 4811, + 0, + 0, + 0, + 0, + 0, + 4812, + 0, + 4813, + 0, + 0, + 4815, + 0, + 4821, + 4822, + 0, + 0, + 0, + 0, + 4823, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4824, + 0, + 0, + 0, + 0, + 4826, + 0, + 0, + 0, + 4828, + 0, + 4829, + 0, + 0, + 0, + 4843, + 0, + 0, + 4847, + 0, + 4853, + 4855, + 4858, + 0, + 0, + 0, + 0, + 0, + 4859, + 0, + 4864, + 0, + 0, + 4879, + 0, + 0, + 0, + 0, + 4880, + 0, + 0, + 0, + 0, + 4881, + 0, + 4882, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4883, + 0, + 0, + 0, + 0, + 4884, + 0, + 0, + 0, + 0, + 0, + 4886, + 4887, + 4888, + 4894, + 4896, + 0, + 4902, + 0, + 0, + 4905, + 0, + 0, + 4915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4916, + 4917, + 4919, + 4921, + 0, + 0, + 0, + 0, + 0, + 4926, + 0, + 0, + 0, + 0, + 4927, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 4929, + 0, + 4930, + 4931, + 0, + 4938, + 0, + 4952, + 0, + 4953, + 4957, + 4960, + 4964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5019, + 5020, + 5022, + 0, + 0, + 0, + 0, + 0, + 5023, + 0, + 0, + 0, + 5024, + 0, + 0, + 0, + 5025, + 0, + 0, + 0, + 0, + 5028, + 0, + 0, + 0, + 0, + 5029, + 5030, + 5031, + 0, + 5033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5034, + 5035, + 0, + 5036, + 0, + 0, + 5037, + 0, + 0, + 0, + 0, + 5038, + 0, + 0, + 5039, + 0, + 0, + 0, + 5041, + 5042, + 0, + 0, + 0, + 0, + 5044, + 5049, + 5054, + 0, + 5055, + 0, + 5057, + 0, + 0, + 0, + 5060, + 0, + 0, + 0, + 0, + 0, + 5063, + 0, + 5064, + 5065, + 0, + 5067, + 0, + 0, + 0, + 5068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5076, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5077, + 0, + 0, + 5078, + 5080, + 0, + 0, + 5083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5085, + 0, + 0, + 0, + 0, + 0, + 0, + 5098, + 5099, + 5101, + 5105, + 5107, + 0, + 5108, + 0, + 5109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5110, + 0, + 0, + 0, + 0, + 0, + 5117, + 5118, + 0, + 5121, + 0, + 5122, + 0, + 0, + 5130, + 0, + 0, + 0, + 5137, + 0, + 0, + 0, + 5148, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5151, + 5154, + 0, + 0, + 0, + 5155, + 0, + 0, + 5156, + 5159, + 5161, + 0, + 0, + 0, + 0, + 5162, + 0, + 0, + 0, + 0, + 5163, + 5164, + 0, + 5166, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5167, + 0, + 0, + 0, + 5172, + 0, + 0, + 0, + 0, + 0, + 0, + 5178, + 5179, + 0, + 0, + 5190, + 0, + 0, + 5191, + 5192, + 5194, + 0, + 0, + 5198, + 5201, + 0, + 0, + 0, + 0, + 0, + 5203, + 0, + 5206, + 5209, + 0, + 0, + 0, + 0, + 0, + 0, + 5213, + 0, + 5214, + 5216, + 0, + 0, + 0, + 0, + 0, + 5217, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5218, + 5219, + 0, + 5231, + 0, + 0, + 5244, + 5249, + 0, + 5254, + 0, + 5255, + 0, + 0, + 5257, + 0, + 0, + 0, + 0, + 0, + 5258, + 0, + 5260, + 5270, + 0, + 5277, + 0, + 0, + 0, + 0, + 0, + 0, + 5280, + 5281, + 5282, + 5283, + 0, + 0, + 0, + 0, + 0, + 5284, + 0, + 5285, + 0, + 0, + 0, + 0, + 0, + 5287, + 5288, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5289, + 5291, + 0, + 0, + 5294, + 0, + 0, + 5295, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5304, + 0, + 0, + 5306, + 5307, + 5308, + 0, + 5309, + 0, + 0, + 5310, + 0, + 0, + 0, + 0, + 5311, + 5312, + 0, + 5313, + 0, + 0, + 0, + 0, + 0, + 5316, + 0, + 0, + 0, + 5317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5325, + 0, + 0, + 0, + 0, + 0, + 0, + 5326, + 0, + 5327, + 5329, + 0, + 5332, + 0, + 0, + 0, + 0, + 5338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5340, + 0, + 0, + 5341, + 0, + 0, + 0, + 5342, + 0, + 5343, + 5344, + 0, + 0, + 5345, + 0, + 0, + 0, + 0, + 0, + 0, + 5347, + 5348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5349, + 0, + 5350, + 0, + 5354, + 0, + 0, + 0, + 0, + 5358, + 0, + 0, + 5359, + 0, + 0, + 5361, + 0, + 0, + 5365, + 0, + 5367, + 0, + 5373, + 0, + 0, + 0, + 5379, + 0, + 0, + 0, + 5380, + 0, + 0, + 0, + 5382, + 0, + 5384, + 0, + 0, + 0, + 0, + 0, + 0, + 5385, + 0, + 0, + 0, + 0, + 5387, + 0, + 0, + 0, + 0, + 0, + 0, + 5388, + 5390, + 5393, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5396, + 0, + 0, + 0, + 0, + 5397, + 5402, + 0, + 0, + 0, + 0, + 0, + 5403, + 0, + 0, + 0, + 5404, + 5405, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5406, + 0, + 0, + 0, + 0, + 5410, + 0, + 0, + 5411, + 0, + 5415, + 0, + 0, + 0, + 0, + 5416, + 5434, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5438, + 0, + 5440, + 0, + 0, + 0, + 0, + 0, + 0, + 5441, + 5442, + 0, + 0, + 0, + 5443, + 5444, + 5447, + 0, + 0, + 5448, + 5449, + 5451, + 0, + 0, + 0, + 5456, + 5457, + 0, + 0, + 0, + 5459, + 0, + 0, + 0, + 5461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5464, + 0, + 5466, + 0, + 0, + 5467, + 0, + 5470, + 0, + 0, + 5473, + 0, + 0, + 5474, + 0, + 0, + 5476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5477, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5484, + 0, + 0, + 5485, + 5486, + 0, + 0, + 0, + 0, + 0, + 5488, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5489, + 0, + 0, + 0, + 0, + 0, + 5507, + 0, + 0, + 0, + 5510, + 0, + 5511, + 0, + 0, + 5512, + 0, + 0, + 0, + 5513, + 0, + 5515, + 0, + 0, + 5516, + 5517, + 0, + 5518, + 0, + 0, + 5522, + 0, + 0, + 0, + 0, + 0, + 5534, + 5535, + 0, + 0, + 5536, + 0, + 5538, + 0, + 0, + 5543, + 0, + 5544, + 0, + 0, + 5545, + 0, + 5547, + 0, + 5557, + 0, + 0, + 5558, + 0, + 5560, + 5567, + 0, + 0, + 0, + 0, + 5568, + 0, + 0, + 0, + 5571, + 5573, + 0, + 5574, + 0, + 5575, + 0, + 0, + 0, + 0, + 5577, + 0, + 0, + 5598, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5600, + 5609, + 0, + 0, + 0, + 0, + 5610, + 0, + 0, + 5612, + 0, + 5624, + 0, + 5625, + 0, + 0, + 0, + 5629, + 0, + 5641, + 0, + 5642, + 5643, + 0, + 0, + 0, + 0, + 0, + 0, + 5651, + 0, + 0, + 0, + 5652, + 5653, + 0, + 5661, + 5662, + 5678, + 0, + 5679, + 0, + 0, + 0, + 0, + 5685, + 5686, + 0, + 0, + 0, + 0, + 0, + 5690, + 5692, + 0, + 5703, + 0, + 0, + 0, + 0, + 0, + 5706, + 0, + 0, + 0, + 0, + 5707, + 0, + 0, + 0, + 0, + 0, + 0, + 5708, + 0, + 0, + 5709, + 0, + 5710, + 0, + 0, + 0, + 5712, + 0, + 5733, + 0, + 5734, + 5735, + 0, + 0, + 5744, + 5751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5752, + 0, + 5754, + 0, + 0, + 0, + 0, + 0, + 0, + 5757, + 5758, + 0, + 5760, + 5761, + 0, + 0, + 0, + 0, + 5763, + 5764, + 5765, + 0, + 5766, + 0, + 5767, + 5768, + 0, + 5770, + 0, + 0, + 0, + 0, + 5776, + 5780, + 0, + 0, + 0, + 0, + 5782, + 0, + 0, + 0, + 0, + 5784, + 0, + 0, + 5788, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5797, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5799, + 0, + 0, + 5801, + 0, + 0, + 0, + 5811, + 0, + 0, + 0, + 0, + 0, + 0, + 5816, + 0, + 0, + 5827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5830, + 5831, + 0, + 0, + 5832, + 0, + 0, + 5833, + 0, + 5835, + 5844, + 5845, + 0, + 5846, + 0, + 0, + 0, + 0, + 0, + 5850, + 0, + 0, + 0, + 0, + 0, + 5852, + 0, + 5855, + 5857, + 0, + 0, + 5859, + 0, + 5861, + 0, + 0, + 5863, + 0, + 5865, + 0, + 0, + 0, + 5873, + 5875, + 0, + 0, + 0, + 5877, + 0, + 5879, + 0, + 0, + 0, + 5888, + 0, + 0, + 5889, + 5891, + 0, + 5894, + 0, + 0, + 0, + 0, + 0, + 0, + 5895, + 0, + 5897, + 0, + 0, + 0, + 0, + 0, + 0, + 5907, + 0, + 5911, + 0, + 0, + 5912, + 0, + 5913, + 5922, + 5924, + 0, + 5927, + 5928, + 0, + 0, + 0, + 0, + 5929, + 5930, + 0, + 5933, + 0, + 0, + 0, + 0, + 5949, + 0, + 0, + 5951, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 5953, + 0, + 0, + 5954, + 0, + 5959, + 5960, + 5961, + 0, + 5964, + 0, + 0, + 0, + 5976, + 5978, + 5987, + 5990, + 0, + 0, + 0, + 0, + 0, + 5991, + 0, + 5992, + 0, + 0, + 0, + 5994, + 5995, + 0, + 0, + 5996, + 0, + 0, + 6001, + 6003, + 0, + 0, + 0, + 0, + 6007, + 0, + 0, + 0, + 0, + 0, + 6008, + 0, + 0, + 6009, + 0, + 6010, + 0, + 0, + 0, + 6011, + 6015, + 0, + 6017, + 0, + 6019, + 0, + 6023, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6026, + 0, + 6030, + 0, + 0, + 6032, + 0, + 0, + 0, + 6033, + 6038, + 6040, + 0, + 0, + 0, + 6041, + 6045, + 0, + 0, + 6046, + 0, + 0, + 6053, + 0, + 0, + 6054, + 0, + 6055, + 0, + 0, + 0, + 0, + 0, + 0, + 6057, + 0, + 6063, + 0, + 0, + 0, + 6064, + 0, + 6066, + 6071, + 6072, + 0, + 0, + 0, + 0, + 0, + 0, + 6075, + 6076, + 0, + 0, + 6077, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6078, + 6079, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6080, + 0, + 6083, + 0, + 0, + 0, + 0, + 0, + 6084, + 0, + 0, + 6088, + 0, + 6089, + 0, + 0, + 6093, + 6105, + 0, + 0, + 6107, + 0, + 6110, + 0, + 0, + 0, + 6111, + 6125, + 6126, + 0, + 0, + 0, + 6129, + 0, + 0, + 0, + 0, + 6130, + 0, + 0, + 0, + 6131, + 6134, + 0, + 0, + 0, + 0, + 0, + 0, + 6142, + 0, + 0, + 0, + 0, + 0, + 6144, + 0, + 0, + 6146, + 6151, + 6153, + 0, + 6156, + 0, + 6163, + 0, + 6180, + 6181, + 0, + 0, + 0, + 0, + 0, + 6182, + 0, + 0, + 0, + 0, + 6184, + 6195, + 0, + 0, + 6206, + 0, + 6208, + 0, + 0, + 6212, + 6213, + 6214, + 0, + 6215, + 0, + 0, + 0, + 6228, + 0, + 0, + 0, + 6234, + 0, + 0, + 0, + 0, + 0, + 0, + 6235, + 6240, + 0, + 6242, + 6243, + 6244, + 0, + 6250, + 6255, + 0, + 0, + 0, + 0, + 0, + 6257, + 0, + 0, + 0, + 6258, + 6278, + 0, + 6284, + 0, + 0, + 0, + 6285, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6286, + 0, + 0, + 0, + 6320, + 0, + 0, + 6322, + 6332, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6334, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6335, + 0, + 0, + 6337, + 0, + 6338, + 0, + 6339, + 6340, + 0, + 0, + 6356, + 6357, + 6369, + 0, + 0, + 0, + 6370, + 6371, + 6372, + 0, + 6373, + 0, + 0, + 0, + 0, + 0, + 6376, + 0, + 0, + 0, + 0, + 0, + 6382, + 6383, + 6384, + 0, + 0, + 0, + 0, + 6386, + 0, + 6389, + 6397, + 6400, + 6411, + 0, + 6414, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6415, + 6416, + 0, + 0, + 0, + 0, + 0, + 0, + 6417, + 0, + 0, + 0, + 0, + 6418, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6420, + 0, + 6421, + 6423, + 6425, + 0, + 6429, + 6430, + 0, + 6433, + 6438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6439, + 6440, + 0, + 0, + 6441, + 0, + 0, + 6444, + 0, + 0, + 0, + 0, + 6446, + 0, + 0, + 0, + 0, + 6447, + 6448, + 0, + 0, + 6450, + 0, + 0, + 0, + 6454, + 0, + 0, + 6455, + 0, + 6461, + 0, + 0, + 0, + 0, + 0, + 0, + 6462, + 0, + 0, + 6463, + 0, + 6464, + 0, + 6465, + 6467, + 0, + 0, + 0, + 6468, + 0, + 6479, + 6480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6481, + 0, + 0, + 6485, + 6487, + 0, + 0, + 0, + 0, + 0, + 0, + 6493, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6494, + 6495, + 6496, + 0, + 0, + 0, + 0, + 0, + 6498, + 0, + 0, + 0, + 6507, + 6508, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6511, + 6512, + 0, + 0, + 0, + 0, + 6513, + 0, + 0, + 0, + 6514, + 0, + 0, + 0, + 0, + 0, + 6516, + 0, + 0, + 6517, + 6518, + 0, + 0, + 0, + 6519, + 6520, + 6521, + 0, + 6523, + 0, + 0, + 0, + 0, + 6524, + 6528, + 0, + 6530, + 0, + 0, + 6532, + 0, + 6578, + 0, + 0, + 0, + 6583, + 0, + 6584, + 0, + 0, + 0, + 6587, + 0, + 0, + 0, + 6590, + 0, + 6591, + 0, + 0, + 0, + 0, + 0, + 6592, + 0, + 0, + 0, + 0, + 6593, + 6594, + 0, + 0, + 0, + 0, + 0, + 6599, + 6600, + 0, + 0, + 6601, + 6602, + 6604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6610, + 6611, + 0, + 6615, + 0, + 6616, + 6618, + 6620, + 0, + 6637, + 0, + 0, + 0, + 0, + 6639, + 0, + 0, + 0, + 0, + 6641, + 0, + 6642, + 0, + 0, + 0, + 6647, + 0, + 6660, + 6663, + 0, + 6664, + 0, + 6666, + 6669, + 0, + 6675, + 6676, + 6677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6678, + 0, + 0, + 0, + 6679, + 0, + 6680, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6693, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6704, + 6705, + 6706, + 0, + 0, + 6711, + 6713, + 0, + 0, + 0, + 0, + 0, + 6716, + 0, + 0, + 0, + 6717, + 0, + 6719, + 6724, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6725, + 6726, + 0, + 0, + 0, + 0, + 0, + 6728, + 6729, + 6735, + 0, + 6737, + 6742, + 0, + 0, + 6743, + 6750, + 0, + 6751, + 0, + 0, + 6752, + 6753, + 0, + 0, + 0, + 0, + 0, + 0, + 6754, + 0, + 0, + 0, + 0, + 0, + 6756, + 0, + 0, + 0, + 0, + 0, + 0, + 6763, + 0, + 0, + 6764, + 6765, + 0, + 0, + 0, + 6770, + 0, + 0, + 0, + 6776, + 6780, + 0, + 6781, + 0, + 0, + 0, + 6783, + 0, + 6784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6785, + 0, + 0, + 0, + 6792, + 0, + 0, + 0, + 6793, + 0, + 0, + 6802, + 0, + 0, + 0, + 0, + 0, + 6803, + 0, + 0, + 0, + 6804, + 0, + 0, + 0, + 6812, + 0, + 0, + 6823, + 0, + 6824, + 6839, + 0, + 0, + 0, + 0, + 6852, + 0, + 0, + 6854, + 0, + 6856, + 6857, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6867, + 0, + 6868, + 6870, + 6872, + 0, + 0, + 0, + 6873, + 6874, + 0, + 0, + 0, + 0, + 0, + 6875, + 0, + 0, + 6877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6878, + 0, + 0, + 0, + 6879, + 0, + 6880, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6887, + 0, + 6888, + 6891, + 6893, + 0, + 6895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6899, + 0, + 0, + 0, + 0, + 6901, + 0, + 0, + 0, + 0, + 6910, + 0, + 6911, + 0, + 0, + 6912, + 0, + 0, + 6913, + 6914, + 0, + 0, + 0, + 6915, + 0, + 0, + 0, + 6916, + 6919, + 0, + 0, + 0, + 0, + 0, + 0, + 6924, + 0, + 6925, + 0, + 0, + 0, + 6926, + 6927, + 6928, + 0, + 6929, + 0, + 6930, + 0, + 0, + 6931, + 6935, + 0, + 6936, + 0, + 0, + 0, + 0, + 6939, + 6940, + 6941, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6942, + 6948, + 6949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6952, + 6954, + 6963, + 6965, + 6966, + 0, + 0, + 6967, + 6968, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6969, + 0, + 0, + 6970, + 6979, + 0, + 0, + 6980, + 0, + 0, + 6983, + 0, + 0, + 0, + 0, + 0, + 6984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6988, + 6990, + 6992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6995, + 0, + 0, + 0, + 7012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7021, + 0, + 0, + 7022, + 7023, + 7028, + 0, + 7030, + 7033, + 0, + 0, + 0, + 0, + 0, + 0, + 7038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7039, + 0, + 0, + 0, + 0, + 0, + 7046, + 0, + 7047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7048, + 7052, + 0, + 0, + 0, + 0, + 0, + 7054, + 0, + 7060, + 0, + 0, + 0, + 0, + 7061, + 0, + 7065, + 0, + 0, + 0, + 0, + 7067, + 7069, + 0, + 7070, + 7071, + 7072, + 0, + 0, + 7078, + 0, + 7080, + 7081, + 0, + 7083, + 0, + 0, + 0, + 7084, + 7087, + 7088, + 0, + 0, + 7090, + 0, + 7093, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7107, + 0, + 0, + 7108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7110, + 0, + 7114, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7115, + 0, + 7116, + 0, + 0, + 0, + 0, + 0, + 7117, + 0, + 0, + 7118, + 0, + 0, + 7124, + 0, + 7125, + 0, + 0, + 7126, + 0, + 0, + 0, + 0, + 7128, + 0, + 0, + 0, + 0, + 0, + 7129, + 0, + 7130, + 0, + 7132, + 7133, + 0, + 0, + 7134, + 0, + 0, + 7139, + 0, + 7148, + 7150, + 0, + 0, + 0, + 0, + 7152, + 0, + 0, + 0, + 7153, + 7156, + 7157, + 0, + 0, + 0, + 0, + 0, + 7158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7163, + 7165, + 7169, + 0, + 7171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7172, + 0, + 7173, + 7181, + 0, + 0, + 0, + 0, + 0, + 7182, + 7185, + 0, + 0, + 0, + 0, + 7187, + 0, + 7201, + 7204, + 0, + 0, + 0, + 0, + 0, + 7206, + 7207, + 0, + 0, + 0, + 0, + 7211, + 7216, + 0, + 7218, + 0, + 0, + 0, + 0, + 7226, + 7228, + 7230, + 7232, + 7233, + 7235, + 7237, + 0, + 0, + 0, + 0, + 7238, + 7241, + 0, + 7242, + 0, + 0, + 7247, + 0, + 0, + 0, + 7266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7289, + 0, + 0, + 7290, + 7291, + 0, + 0, + 7292, + 0, + 7297, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7300, + 0, + 7301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7302, + 0, + 0, + 0, + 0, + 7305, + 0, + 0, + 0, + 0, + 7307, + 0, + 7308, + 0, + 7310, + 0, + 7335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7337, + 0, + 7343, + 7347, + 0, + 0, + 0, + 0, + 0, + 7348, + 0, + 7349, + 7350, + 7352, + 7354, + 0, + 0, + 0, + 0, + 7357, + 0, + 7358, + 7366, + 0, + 7367, + 7368, + 0, + 0, + 7373, + 0, + 0, + 0, + 7374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7376, + 0, + 0, + 0, + 7377, + 0, + 0, + 0, + 0, + 0, + 7378, + 0, + 7379, + 7380, + 0, + 0, + 0, + 0, + 0, + 7383, + 0, + 0, + 7386, + 0, + 0, + 0, + 0, + 7398, + 0, + 0, + 0, + 7399, + 7400, + 0, + 7401, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7402, + 0, + 0, + 0, + 0, + 0, + 7405, + 0, + 0, + 0, + 0, + 0, + 7406, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7421, + 7427, + 7429, + 0, + 0, + 0, + 7435, + 0, + 0, + 7436, + 0, + 0, + 0, + 7437, + 0, + 0, + 0, + 0, + 0, + 0, + 7438, + 7443, + 0, + 7446, + 0, + 7448, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7456, + 0, + 0, + 0, + 0, + 0, + 7457, + 0, + 0, + 7461, + 0, + 0, + 0, + 0, + 0, + 7462, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7463, + 7466, + 7472, + 0, + 7476, + 0, + 0, + 7490, + 0, + 7491, + 0, + 0, + 7493, + 0, + 0, + 0, + 7498, + 7499, + 0, + 0, + 7508, + 0, + 0, + 0, + 0, + 0, + 7512, + 0, + 0, + 0, + 7513, + 7514, + 7516, + 0, + 0, + 0, + 0, + 7518, + 0, + 0, + 7519, + 7521, + 7522, + 0, + 0, + 0, + 7526, + 0, + 0, + 7529, + 0, + 0, + 7531, + 0, + 7536, + 0, + 7538, + 0, + 7539, + 0, + 0, + 7541, + 7542, + 7546, + 0, + 0, + 0, + 0, + 0, + 7547, + 0, + 7548, + 0, + 0, + 0, + 0, + 0, + 7550, + 0, + 0, + 7552, + 7553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7554, + 7563, + 0, + 7573, + 0, + 0, + 0, + 0, + 0, + 0, + 7574, + 7576, + 0, + 7578, + 7581, + 7583, + 0, + 0, + 0, + 7584, + 0, + 7587, + 0, + 0, + 0, + 0, + 0, + 7589, + 0, + 0, + 0, + 7594, + 0, + 0, + 7595, + 0, + 0, + 7600, + 7602, + 7610, + 0, + 0, + 0, + 0, + 0, + 7612, + 0, + 7613, + 7614, + 0, + 0, + 7615, + 0, + 0, + 7616, + 0, + 7620, + 0, + 7621, + 7622, + 0, + 7623, + 0, + 0, + 0, + 0, + 7626, + 0, + 0, + 0, + 0, + 7627, + 7629, + 7631, + 0, + 0, + 7633, + 0, + 0, + 0, + 0, + 0, + 7639, + 0, + 7640, + 7642, + 0, + 0, + 7643, + 0, + 0, + 0, + 0, + 7644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7645, + 0, + 0, + 0, + 0, + 0, + 7661, + 7662, + 7663, + 7665, + 0, + 7666, + 0, + 7667, + 0, + 7684, + 7688, + 7690, + 0, + 7691, + 0, + 0, + 0, + 0, + 0, + 0, + 7692, + 0, + 0, + 7700, + 0, + 7707, + 0, + 7708, + 0, + 7709, + 0, + 7721, + 0, + 0, + 0, + 7722, + 0, + 7724, + 0, + 0, + 0, + 0, + 0, + 0, + 7729, + 7731, + 0, + 7732, + 0, + 7733, + 7735, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7739, + 0, + 0, + 7741, + 7745, + 0, + 7748, + 0, + 0, + 0, + 7751, + 0, + 0, + 0, + 7752, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7753, + 0, + 0, + 7756, + 0, + 7757, + 0, + 7759, + 0, + 7760, + 0, + 0, + 0, + 0, + 7761, + 7768, + 0, + 0, + 7769, + 0, + 0, + 7770, + 0, + 0, + 7771, + 0, + 0, + 7772, + 0, + 0, + 7773, + 0, + 0, + 0, + 0, + 0, + 7778, + 7783, + 0, + 0, + 0, + 0, + 0, + 7784, + 7785, + 0, + 7790, + 0, + 0, + 0, + 0, + 7792, + 0, + 7798, + 0, + 0, + 0, + 0, + 0, + 7799, + 0, + 7810, + 0, + 0, + 7813, + 0, + 7814, + 0, + 7816, + 0, + 7818, + 7824, + 7825, + 7826, + 0, + 7828, + 7830, + 0, + 0, + 0, + 7840, + 0, + 7842, + 0, + 7843, + 0, + 0, + 0, + 0, + 7844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7846, + 0, + 0, + 0, + 0, + 0, + 7856, + 7857, + 7858, + 7862, + 0, + 7865, + 0, + 0, + 7866, + 0, + 0, + 7913, + 0, + 0, + 0, + 0, + 7914, + 0, + 0, + 7915, + 7917, + 7918, + 7919, + 0, + 7920, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7921, + 7922, + 0, + 7924, + 0, + 0, + 7925, + 0, + 0, + 7927, + 0, + 7930, + 7935, + 0, + 0, + 7937, + 0, + 0, + 0, + 0, + 0, + 0, + 7939, + 0, + 7940, + 0, + 0, + 0, + 0, + 0, + 7941, + 0, + 0, + 0, + 0, + 7945, + 0, + 0, + 0, + 0, + 7949, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7950, + 0, + 7953, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7968, + 0, + 0, + 0, + 0, + 7969, + 7972, + 7992, + 0, + 7993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 7994, + 0, + 0, + 0, + 0, + 8007, + 8008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8010, + 0, + 0, + 0, + 8012, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8018, + 0, + 8028, + 8029, + 0, + 0, + 8030, + 0, + 0, + 8032, + 8033, + 0, + 0, + 8034, + 8036, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8037, + 0, + 0, + 0, + 8043, + 8052, + 8059, + 8060, + 0, + 0, + 8061, + 0, + 0, + 0, + 8062, + 0, + 8063, + 0, + 8064, + 0, + 8066, + 8068, + 0, + 0, + 0, + 8080, + 8081, + 0, + 8089, + 0, + 0, + 0, + 0, + 0, + 8092, + 0, + 0, + 0, + 0, + 0, + 0, + 8093, + 8110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8111, + 0, + 0, + 0, + 0, + 0, + 8112, + 8115, + 0, + 8117, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8120, + 8121, + 8122, + 8128, + 8129, + 8130, + 8131, + 0, + 0, + 8139, + 0, + 0, + 8144, + 0, + 0, + 0, + 0, + 8145, + 8146, + 8153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8154, + 0, + 8157, + 8160, + 8162, + 0, + 8164, + 8165, + 0, + 0, + 0, + 0, + 8166, + 8167, + 0, + 0, + 8179, + 0, + 0, + 0, + 8185, + 0, + 0, + 0, + 8186, + 0, + 0, + 8187, + 0, + 0, + 0, + 8188, + 0, + 0, + 0, + 0, + 0, + 8204, + 0, + 0, + 0, + 0, + 8210, + 0, + 0, + 0, + 0, + 0, + 8213, + 0, + 8214, + 0, + 0, + 8215, + 0, + 0, + 0, + 0, + 0, + 0, + 8218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8219, + 0, + 8221, + 0, + 0, + 8222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8225, + 0, + 0, + 0, + 8233, + 0, + 0, + 8242, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8247, + 0, + 8248, + 8252, + 0, + 8256, + 8257, + 0, + 0, + 8261, + 0, + 8264, + 8265, + 0, + 0, + 0, + 0, + 8267, + 0, + 0, + 0, + 8269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8270, + 0, + 0, + 0, + 8278, + 0, + 8279, + 8283, + 0, + 0, + 8285, + 8286, + 8289, + 8292, + 0, + 0, + 0, + 0, + 8293, + 8295, + 8299, + 8300, + 8301, + 0, + 0, + 0, + 0, + 0, + 0, + 8304, + 8307, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8321, + 0, + 0, + 0, + 8322, + 8323, + 8325, + 8326, + 8327, + 0, + 0, + 8332, + 8338, + 0, + 0, + 8340, + 0, + 0, + 0, + 0, + 0, + 8350, + 0, + 0, + 8351, + 0, + 8354, + 8355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8360, + 8372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8377, + 0, + 0, + 0, + 0, + 8380, + 0, + 0, + 0, + 8383, + 0, + 8384, + 0, + 0, + 0, + 0, + 8386, + 8392, + 0, + 0, + 8394, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8396, + 8397, + 0, + 8398, + 0, + 8399, + 0, + 0, + 0, + 0, + 0, + 8400, + 0, + 8401, + 8410, + 8411, + 0, + 8412, + 8413, + 8422, + 0, + 0, + 0, + 0, + 8423, + 0, + 0, + 0, + 0, + 8424, + 0, + 0, + 8425, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8441, + 8442, + 0, + 0, + 0, + 0, + 0, + 0, + 8443, + 0, + 0, + 8444, + 0, + 8447, + 0, + 0, + 0, + 0, + 8451, + 0, + 8458, + 0, + 8462, + 0, + 0, + 8468, + 0, + 8469, + 0, + 0, + 0, + 8470, + 0, + 8473, + 8479, + 8480, + 0, + 0, + 0, + 0, + 8481, + 8483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8484, + 0, + 0, + 8490, + 0, + 0, + 0, + 0, + 0, + 0, + 8491, + 8493, + 8494, + 0, + 8528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8530, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8534, + 8538, + 8540, + 0, + 0, + 8541, + 0, + 0, + 8545, + 0, + 8557, + 0, + 0, + 8569, + 8570, + 0, + 0, + 8571, + 8574, + 8575, + 8579, + 0, + 8583, + 0, + 0, + 0, + 0, + 8591, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8606, + 0, + 8607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8608, + 0, + 0, + 8609, + 0, + 0, + 0, + 8610, + 0, + 0, + 0, + 8611, + 0, + 0, + 8613, + 8617, + 8621, + 0, + 0, + 8622, + 0, + 8623, + 0, + 8624, + 8625, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8637, + 8638, + 8639, + 8650, + 0, + 0, + 0, + 0, + 8652, + 8654, + 8655, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8656, + 0, + 0, + 0, + 0, + 0, + 8657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8658, + 0, + 0, + 8659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8660, + 0, + 0, + 0, + 0, + 0, + 0, + 8661, + 8663, + 8664, + 0, + 0, + 0, + 0, + 8665, + 0, + 8669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8671, + 8674, + 0, + 8684, + 0, + 8686, + 0, + 0, + 0, + 8689, + 0, + 0, + 0, + 8690, + 0, + 8706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8710, + 0, + 8711, + 8713, + 8714, + 8724, + 8727, + 8728, + 8733, + 8736, + 0, + 8737, + 8739, + 0, + 0, + 0, + 0, + 8742, + 8743, + 8745, + 8754, + 0, + 0, + 0, + 0, + 8756, + 0, + 0, + 0, + 0, + 0, + 0, + 8757, + 8760, + 0, + 0, + 0, + 0, + 0, + 8762, + 8763, + 8764, + 0, + 8766, + 8769, + 8770, + 8773, + 0, + 8774, + 0, + 8779, + 0, + 0, + 0, + 0, + 8780, + 0, + 0, + 8781, + 0, + 0, + 8783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8785, + 0, + 0, + 0, + 0, + 8786, + 0, + 0, + 0, + 0, + 8788, + 8790, + 0, + 0, + 0, + 8803, + 0, + 8813, + 8814, + 0, + 0, + 0, + 0, + 0, + 8815, + 8816, + 0, + 0, + 0, + 0, + 8818, + 0, + 0, + 0, + 0, + 8822, + 8828, + 8829, + 0, + 8831, + 0, + 0, + 0, + 0, + 8833, + 0, + 0, + 0, + 8834, + 0, + 0, + 0, + 8835, + 0, + 8836, + 0, + 0, + 0, + 8837, + 0, + 0, + 0, + 0, + 0, + 0, + 8838, + 8839, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8840, + 0, + 0, + 0, + 8841, + 0, + 8842, + 0, + 0, + 0, + 8846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8847, + 0, + 8848, + 0, + 0, + 8864, + 0, + 0, + 8866, + 0, + 0, + 8870, + 8872, + 0, + 0, + 8873, + 8874, + 0, + 0, + 0, + 0, + 0, + 0, + 8875, + 0, + 8876, + 0, + 0, + 0, + 0, + 8896, + 8900, + 0, + 0, + 0, + 0, + 8901, + 0, + 0, + 0, + 0, + 0, + 8904, + 0, + 8907, + 0, + 0, + 0, + 0, + 8911, + 8912, + 8913, + 0, + 0, + 0, + 8914, + 0, + 8915, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8916, + 0, + 0, + 0, + 8929, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 8930, + 0, + 8932, + 0, + 8943, + 0, + 0, + 0, + 8945, + 8947, + 0, + 0, + 0, + 0, + 8949, + 0, + 8950, + 0, + 8954, + 8957, + 0, + 0, + 8970, + 0, + 0, + 0, + 0, + 8971, + 0, + 8996, + 0, + 0, + 0, + 0, + 8997, + 9000, + 0, + 0, + 0, + 0, + 9001, + 9002, + 0, + 9004, + 9009, + 9024, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9027, + 9082, + 0, + 0, + 9083, + 9089, + 0, + 0, + 0, + 0, + 0, + 0, + 9090, + 0, + 0, + 0, + 9092, + 0, + 0, + 9093, + 0, + 9095, + 0, + 0, + 9096, + 9097, + 9101, + 9102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9112, + 0, + 0, + 0, + 0, + 0, + 0, + 9114, + 0, + 0, + 9120, + 0, + 9121, + 9122, + 0, + 0, + 0, + 9123, + 9124, + 0, + 0, + 9125, + 0, + 0, + 9126, + 0, + 9127, + 0, + 0, + 9129, + 9131, + 0, + 0, + 0, + 9132, + 0, + 0, + 9136, + 0, + 9144, + 0, + 0, + 9148, + 0, + 0, + 0, + 0, + 0, + 0, + 9149, + 0, + 9152, + 9163, + 0, + 0, + 9165, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9166, + 0, + 9169, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9170, + 0, + 0, + 0, + 0, + 9172, + 0, + 9174, + 9175, + 9176, + 0, + 9177, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9186, + 0, + 9187, + 0, + 0, + 0, + 9188, + 9189, + 0, + 0, + 9190, + 0, + 0, + 0, + 0, + 9191, + 0, + 0, + 0, + 9193, + 0, + 0, + 0, + 0, + 9197, + 9198, + 0, + 0, + 0, + 9208, + 9211, + 0, + 0, + 0, + 0, + 9216, + 9217, + 0, + 9220, + 0, + 0, + 0, + 0, + 9221, + 9222, + 9223, + 0, + 9224, + 9225, + 0, + 0, + 9227, + 0, + 9228, + 9229, + 0, + 0, + 9230, + 0, + 9232, + 0, + 9233, + 0, + 0, + 0, + 0, + 0, + 9234, + 9235, + 0, + 0, + 9237, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9238, + 9240, + 0, + 0, + 9241, + 0, + 0, + 0, + 0, + 9244, + 0, + 0, + 0, + 0, + 9247, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9248, + 0, + 0, + 0, + 9249, + 0, + 0, + 0, + 0, + 0, + 9250, + 0, + 0, + 0, + 0, + 9251, + 0, + 0, + 9252, + 9255, + 0, + 0, + 0, + 9256, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9257, + 0, + 0, + 9258, + 0, + 0, + 0, + 0, + 0, + 0, + 9259, + 0, + 0, + 0, + 0, + 0, + 9262, + 9263, + 0, + 0, + 9265, + 9266, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9268, + 9271, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9273, + 0, + 0, + 0, + 9276, + 9277, + 9279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9280, + 0, + 0, + 9293, + 0, + 0, + 0, + 0, + 0, + 9297, + 9301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9308, + 9309, + 9313, + 9321, + 9322, + 0, + 9326, + 9327, + 0, + 0, + 9477, + 0, + 9479, + 0, + 0, + 0, + 0, + 9482, + 0, + 0, + 0, + 9483, + 0, + 9484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9485, + 0, + 0, + 9486, + 0, + 0, + 0, + 9489, + 0, + 0, + 0, + 0, + 9490, + 9491, + 0, + 0, + 0, + 0, + 9493, + 0, + 9495, + 9496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9500, + 0, + 9502, + 0, + 0, + 0, + 0, + 0, + 9504, + 9507, + 0, + 9509, + 0, + 9511, + 0, + 0, + 9513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9515, + 0, + 0, + 0, + 0, + 0, + 0, + 9516, + 9517, + 0, + 0, + 0, + 0, + 9532, + 0, + 0, + 9533, + 0, + 0, + 9538, + 0, + 9539, + 9540, + 0, + 0, + 0, + 0, + 9541, + 0, + 0, + 0, + 9542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9544, + 9545, + 0, + 9546, + 0, + 0, + 0, + 0, + 0, + 0, + 9547, + 9548, + 0, + 0, + 0, + 9550, + 0, + 9557, + 0, + 9558, + 0, + 9561, + 0, + 9563, + 9570, + 0, + 9572, + 9574, + 9575, + 0, + 0, + 0, + 9577, + 9592, + 0, + 0, + 9596, + 0, + 0, + 0, + 9598, + 0, + 9600, + 0, + 9601, + 0, + 0, + 0, + 0, + 0, + 0, + 9608, + 0, + 9638, + 9639, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9641, + 0, + 0, + 9643, + 9644, + 9645, + 9646, + 0, + 0, + 0, + 9648, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9650, + 9654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9655, + 0, + 0, + 0, + 0, + 0, + 9656, + 0, + 9657, + 0, + 0, + 0, + 0, + 9658, + 0, + 0, + 9659, + 0, + 0, + 9664, + 0, + 0, + 9665, + 0, + 9667, + 9669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9671, + 0, + 9673, + 9681, + 0, + 0, + 0, + 0, + 9682, + 9683, + 9684, + 0, + 0, + 0, + 0, + 9686, + 9698, + 0, + 0, + 9700, + 9701, + 9702, + 0, + 9703, + 9717, + 0, + 0, + 0, + 0, + 9718, + 0, + 9726, + 0, + 0, + 0, + 0, + 9727, + 0, + 0, + 0, + 9728, + 0, + 9742, + 0, + 9744, + 0, + 0, + 0, + 9750, + 0, + 9754, + 9755, + 0, + 0, + 0, + 0, + 0, + 9756, + 0, + 9757, + 9768, + 0, + 9769, + 0, + 0, + 0, + 9770, + 9771, + 0, + 9773, + 0, + 9774, + 0, + 9775, + 0, + 0, + 0, + 9776, + 9777, + 9784, + 0, + 0, + 0, + 9786, + 0, + 9789, + 0, + 0, + 0, + 0, + 9793, + 9794, + 0, + 0, + 0, + 9808, + 0, + 0, + 0, + 0, + 0, + 9811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9812, + 0, + 9820, + 0, + 9823, + 0, + 9828, + 0, + 0, + 0, + 0, + 9830, + 0, + 0, + 9833, + 9836, + 0, + 0, + 0, + 9840, + 0, + 0, + 0, + 9841, + 0, + 0, + 9842, + 0, + 9845, + 0, + 0, + 0, + 9847, + 9848, + 0, + 0, + 9855, + 0, + 0, + 0, + 0, + 0, + 0, + 9856, + 9863, + 9865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9866, + 9867, + 9868, + 9873, + 9875, + 0, + 0, + 0, + 0, + 0, + 0, + 9880, + 0, + 9886, + 0, + 0, + 0, + 9887, + 0, + 0, + 9891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9906, + 9907, + 9908, + 0, + 0, + 0, + 9909, + 0, + 0, + 0, + 0, + 0, + 0, + 9910, + 0, + 0, + 0, + 0, + 9913, + 0, + 0, + 0, + 0, + 9914, + 0, + 0, + 0, + 0, + 0, + 9922, + 0, + 0, + 0, + 0, + 9923, + 9925, + 0, + 0, + 0, + 0, + 0, + 0, + 9930, + 0, + 0, + 0, + 9931, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9932, + 0, + 9939, + 0, + 0, + 9940, + 9962, + 9966, + 0, + 9969, + 9970, + 0, + 0, + 9974, + 0, + 9979, + 9981, + 9982, + 0, + 0, + 0, + 9985, + 0, + 0, + 0, + 0, + 0, + 0, + 9987, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 9988, + 9993, + 0, + 0, + 9994, + 0, + 0, + 0, + 9997, + 0, + 10004, + 0, + 0, + 0, + 0, + 0, + 10007, + 10019, + 10020, + 10022, + 0, + 0, + 0, + 10031, + 0, + 0, + 0, + 0, + 0, + 10032, + 0, + 0, + 10034, + 0, + 10036, + 0, + 0, + 0, + 0, + 10038, + 0, + 10039, + 10040, + 10041, + 10042, + 0, + 0, + 0, + 0, + 0, + 10043, + 0, + 0, + 0, + 0, + 0, + 10045, + 10054, + 0, + 0, + 0, + 0, + 10055, + 0, + 0, + 10057, + 10058, + 0, + 0, + 0, + 0, + 0, + 0, + 10059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10060, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10063, + 0, + 10066, + 0, + 0, + 0, + 10070, + 0, + 10072, + 0, + 0, + 10076, + 10077, + 0, + 0, + 10084, + 0, + 10087, + 10090, + 10091, + 0, + 0, + 0, + 10094, + 10097, + 0, + 0, + 0, + 0, + 0, + 0, + 10098, + 0, + 0, + 0, + 0, + 0, + 0, + 10103, + 0, + 10104, + 0, + 10108, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10120, + 0, + 0, + 0, + 10122, + 0, + 0, + 10125, + 0, + 0, + 0, + 0, + 10127, + 10128, + 0, + 0, + 10134, + 0, + 10135, + 10136, + 0, + 10137, + 0, + 0, + 10147, + 0, + 10149, + 10150, + 0, + 0, + 10156, + 0, + 10158, + 10159, + 10160, + 10168, + 0, + 0, + 10171, + 0, + 10173, + 0, + 0, + 0, + 10176, + 0, + 0, + 0, + 0, + 10177, + 0, + 0, + 0, + 0, + 10178, + 0, + 0, + 0, + 0, + 10194, + 0, + 10202, + 0, + 0, + 10203, + 10204, + 0, + 10205, + 10206, + 0, + 10207, + 0, + 0, + 0, + 0, + 10209, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10213, + 0, + 0, + 0, + 0, + 0, + 0, + 10217, + 0, + 10229, + 0, + 10230, + 10231, + 0, + 0, + 10232, + 0, + 0, + 10237, + 10238, + 10244, + 0, + 0, + 0, + 0, + 0, + 10250, + 0, + 10252, + 0, + 0, + 0, + 0, + 0, + 0, + 10255, + 0, + 0, + 10257, + 0, + 0, + 0, + 0, + 0, + 0, + 10258, + 0, + 10259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10260, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10284, + 10288, + 10289, + 0, + 0, + 0, + 10290, + 0, + 10296, + 0, + 0, + 0, + 0, + 0, + 10297, + 0, + 0, + 0, + 0, + 0, + 0, + 10298, + 0, + 0, + 0, + 0, + 10299, + 10303, + 0, + 0, + 0, + 0, + 0, + 10306, + 0, + 0, + 0, + 10307, + 0, + 10308, + 0, + 0, + 0, + 0, + 10311, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10315, + 10317, + 0, + 0, + 0, + 10318, + 10319, + 0, + 10321, + 0, + 10326, + 0, + 10328, + 0, + 0, + 0, + 0, + 10329, + 0, + 0, + 10331, + 0, + 10332, + 0, + 0, + 0, + 0, + 0, + 0, + 10334, + 0, + 0, + 10335, + 10338, + 0, + 0, + 0, + 0, + 0, + 10339, + 10349, + 0, + 0, + 0, + 0, + 0, + 0, + 10351, + 0, + 10353, + 0, + 0, + 0, + 0, + 0, + 0, + 10362, + 0, + 10368, + 0, + 10369, + 0, + 0, + 0, + 10372, + 10373, + 0, + 0, + 0, + 0, + 0, + 10374, + 0, + 0, + 0, + 10375, + 0, + 10376, + 0, + 0, + 10386, + 10388, + 10390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10391, + 0, + 0, + 10392, + 10394, + 0, + 0, + 10396, + 0, + 10397, + 0, + 10403, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10404, + 0, + 10405, + 10410, + 0, + 0, + 10411, + 0, + 10412, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10421, + 10422, + 10423, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10425, + 0, + 0, + 10427, + 0, + 0, + 10430, + 0, + 0, + 0, + 0, + 0, + 10432, + 0, + 10433, + 10434, + 0, + 0, + 0, + 0, + 10436, + 10437, + 0, + 10438, + 0, + 10439, + 0, + 10444, + 10446, + 0, + 0, + 0, + 0, + 0, + 10448, + 0, + 0, + 0, + 0, + 0, + 10449, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10451, + 0, + 10453, + 0, + 0, + 0, + 10454, + 10457, + 0, + 0, + 10459, + 0, + 10469, + 0, + 0, + 0, + 0, + 0, + 10472, + 10481, + 0, + 0, + 0, + 0, + 0, + 10482, + 10483, + 0, + 10492, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10499, + 0, + 0, + 0, + 10502, + 0, + 0, + 10510, + 0, + 10521, + 10524, + 0, + 0, + 10525, + 10526, + 10528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10530, + 0, + 0, + 0, + 0, + 10533, + 0, + 10534, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10535, + 10536, + 0, + 0, + 10544, + 0, + 10553, + 10556, + 0, + 10557, + 10559, + 0, + 0, + 0, + 0, + 0, + 10562, + 10563, + 10564, + 0, + 10565, + 0, + 0, + 0, + 10566, + 0, + 10567, + 0, + 0, + 0, + 0, + 10575, + 0, + 0, + 10576, + 0, + 10578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10585, + 10586, + 10587, + 10589, + 0, + 10590, + 0, + 0, + 10594, + 0, + 0, + 0, + 0, + 0, + 10598, + 0, + 0, + 10601, + 0, + 0, + 0, + 10602, + 0, + 10603, + 0, + 10604, + 0, + 10605, + 0, + 0, + 10607, + 0, + 10626, + 0, + 10627, + 0, + 0, + 0, + 0, + 0, + 10629, + 10630, + 10631, + 0, + 0, + 0, + 10646, + 0, + 0, + 0, + 10647, + 0, + 10650, + 0, + 10651, + 0, + 0, + 0, + 10652, + 10653, + 10655, + 0, + 10658, + 0, + 0, + 10659, + 0, + 10667, + 0, + 0, + 0, + 0, + 10669, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10670, + 0, + 0, + 0, + 10671, + 0, + 0, + 0, + 0, + 10672, + 10673, + 0, + 10674, + 0, + 0, + 0, + 10676, + 0, + 0, + 0, + 0, + 0, + 0, + 10678, + 0, + 10682, + 0, + 0, + 10692, + 0, + 10697, + 0, + 0, + 0, + 0, + 10698, + 0, + 0, + 0, + 10700, + 0, + 0, + 0, + 0, + 0, + 10703, + 0, + 10704, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10705, + 0, + 10715, + 10718, + 10720, + 0, + 0, + 10722, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10723, + 0, + 0, + 0, + 0, + 10726, + 0, + 0, + 0, + 0, + 0, + 10727, + 10730, + 10743, + 0, + 0, + 0, + 0, + 0, + 0, + 10744, + 0, + 0, + 10745, + 0, + 0, + 0, + 0, + 0, + 0, + 10748, + 0, + 0, + 0, + 0, + 10750, + 0, + 0, + 10752, + 10753, + 0, + 0, + 0, + 10756, + 0, + 0, + 0, + 0, + 0, + 0, + 10758, + 0, + 0, + 0, + 10759, + 0, + 10769, + 0, + 0, + 10772, + 0, + 0, + 0, + 0, + 0, + 0, + 10773, + 0, + 0, + 0, + 10777, + 0, + 0, + 10779, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10780, + 10784, + 0, + 0, + 0, + 10789, + 0, + 0, + 0, + 10791, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10795, + 0, + 0, + 10796, + 0, + 10808, + 0, + 10809, + 0, + 0, + 0, + 10810, + 0, + 0, + 0, + 10812, + 0, + 0, + 10814, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10815, + 0, + 0, + 0, + 0, + 10816, + 10817, + 0, + 0, + 0, + 0, + 10819, + 0, + 10820, + 0, + 0, + 0, + 0, + 10821, + 10822, + 10823, + 0, + 10826, + 10849, + 0, + 0, + 0, + 0, + 10850, + 0, + 0, + 10852, + 0, + 10853, + 0, + 0, + 10856, + 0, + 0, + 10857, + 10858, + 10859, + 10860, + 0, + 0, + 0, + 0, + 0, + 0, + 10863, + 0, + 10866, + 10867, + 10872, + 10890, + 0, + 0, + 10891, + 10892, + 0, + 0, + 0, + 0, + 0, + 10893, + 0, + 0, + 0, + 10896, + 10899, + 0, + 0, + 10900, + 10902, + 0, + 0, + 0, + 0, + 0, + 10903, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10905, + 0, + 10906, + 0, + 0, + 0, + 0, + 10908, + 10911, + 0, + 10912, + 0, + 0, + 10916, + 0, + 0, + 0, + 0, + 0, + 10917, + 0, + 10918, + 0, + 0, + 0, + 10923, + 0, + 0, + 0, + 0, + 0, + 10924, + 0, + 0, + 10928, + 10929, + 0, + 0, + 10930, + 0, + 0, + 0, + 10932, + 0, + 0, + 0, + 0, + 10939, + 0, + 0, + 10945, + 0, + 0, + 0, + 10947, + 0, + 0, + 10948, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10958, + 0, + 10960, + 10962, + 0, + 0, + 10964, + 0, + 0, + 0, + 10966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 10967, + 0, + 0, + 0, + 10968, + 0, + 0, + 0, + 10973, + 0, + 0, + 0, + 0, + 0, + 10975, + 0, + 0, + 0, + 10976, + 10978, + 0, + 0, + 10982, + 10984, + 10987, + 0, + 0, + 10988, + 0, + 10989, + 0, + 0, + 10991, + 0, + 0, + 0, + 0, + 10992, + 0, + 0, + 0, + 10993, + 0, + 10995, + 0, + 0, + 0, + 10996, + 10997, + 0, + 0, + 0, + 10998, + 0, + 10999, + 0, + 11001, + 0, + 0, + 0, + 0, + 0, + 0, + 11010, + 11012, + 0, + 11013, + 11016, + 11017, + 0, + 0, + 11019, + 11020, + 11021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11022, + 0, + 0, + 11023, + 11029, + 0, + 0, + 0, + 0, + 11031, + 0, + 0, + 0, + 11034, + 0, + 0, + 0, + 0, + 11055, + 0, + 0, + 0, + 0, + 0, + 11056, + 11060, + 0, + 0, + 0, + 0, + 0, + 0, + 11061, + 0, + 0, + 11064, + 11065, + 0, + 11066, + 0, + 11069, + 0, + 11085, + 0, + 0, + 0, + 0, + 0, + 11086, + 0, + 0, + 0, + 11088, + 0, + 0, + 0, + 11094, + 0, + 0, + 0, + 11095, + 11096, + 0, + 0, + 0, + 0, + 0, + 0, + 11097, + 11098, + 0, + 0, + 0, + 0, + 0, + 0, + 11099, + 0, + 0, + 11102, + 11108, + 0, + 0, + 0, + 11109, + 0, + 11114, + 11119, + 0, + 11131, + 0, + 0, + 0, + 11142, + 0, + 0, + 11143, + 0, + 11146, + 0, + 11147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11148, + 0, + 11149, + 11152, + 11153, + 11154, + 0, + 11156, + 0, + 11157, + 0, + 0, + 0, + 11158, + 0, + 0, + 11159, + 11160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11163, + 0, + 0, + 11164, + 11166, + 0, + 0, + 0, + 11172, + 11174, + 0, + 0, + 0, + 11176, + 0, + 0, + 0, + 0, + 0, + 11182, + 11183, + 0, + 0, + 0, + 11184, + 11187, + 0, + 0, + 11188, + 11189, + 0, + 0, + 0, + 0, + 0, + 0, + 11194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11200, + 11202, + 0, + 0, + 0, + 0, + 0, + 0, + 11203, + 0, + 11204, + 0, + 0, + 0, + 0, + 0, + 11205, + 0, + 0, + 0, + 11206, + 0, + 11207, + 0, + 0, + 11209, + 0, + 11211, + 0, + 11214, + 0, + 0, + 11231, + 0, + 0, + 0, + 11293, + 11295, + 0, + 0, + 11296, + 11297, + 11302, + 0, + 0, + 0, + 11307, + 0, + 0, + 0, + 0, + 11309, + 11310, + 0, + 11311, + 0, + 0, + 0, + 11313, + 0, + 11314, + 0, + 0, + 0, + 0, + 11334, + 0, + 11338, + 0, + 0, + 0, + 11339, + 0, + 0, + 0, + 0, + 0, + 11340, + 0, + 11341, + 11342, + 0, + 11344, + 0, + 11345, + 0, + 0, + 0, + 11348, + 11349, + 0, + 0, + 11350, + 0, + 0, + 0, + 11355, + 0, + 0, + 0, + 0, + 0, + 0, + 11356, + 0, + 11357, + 11370, + 0, + 0, + 11371, + 0, + 11374, + 11376, + 0, + 0, + 0, + 11377, + 0, + 0, + 11378, + 11383, + 0, + 11386, + 11399, + 0, + 11400, + 11406, + 0, + 0, + 0, + 11408, + 0, + 0, + 11409, + 11412, + 0, + 0, + 0, + 0, + 11417, + 0, + 0, + 0, + 11418, + 0, + 11421, + 0, + 11426, + 11429, + 0, + 0, + 0, + 0, + 0, + 11430, + 0, + 11437, + 0, + 11438, + 0, + 0, + 0, + 0, + 0, + 11440, + 11453, + 0, + 0, + 0, + 0, + 0, + 0, + 11454, + 0, + 0, + 0, + 0, + 11455, + 0, + 0, + 11456, + 11460, + 11461, + 11463, + 0, + 11469, + 0, + 11473, + 0, + 0, + 0, + 0, + 11474, + 0, + 0, + 0, + 11475, + 0, + 11476, + 11477, + 11480, + 0, + 0, + 0, + 0, + 11481, + 0, + 0, + 11484, + 0, + 0, + 11487, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11497, + 0, + 0, + 11502, + 0, + 11509, + 0, + 0, + 11510, + 11511, + 11513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11515, + 0, + 0, + 0, + 0, + 11516, + 0, + 11520, + 11521, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11529, + 11530, + 11531, + 11534, + 0, + 0, + 11543, + 0, + 0, + 0, + 0, + 0, + 11547, + 0, + 11548, + 0, + 0, + 0, + 0, + 0, + 11552, + 11556, + 0, + 11557, + 0, + 0, + 11559, + 0, + 11560, + 0, + 0, + 0, + 0, + 0, + 0, + 11561, + 0, + 0, + 11563, + 11564, + 0, + 11565, + 0, + 0, + 0, + 0, + 11567, + 0, + 0, + 0, + 11569, + 0, + 11574, + 0, + 11575, + 0, + 0, + 0, + 11577, + 0, + 11578, + 0, + 0, + 0, + 11580, + 11581, + 0, + 0, + 0, + 11582, + 11584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11587, + 0, + 11588, + 11591, + 0, + 11595, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11596, + 0, + 11597, + 0, + 0, + 0, + 0, + 11598, + 11601, + 0, + 0, + 0, + 11602, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11603, + 11604, + 0, + 11606, + 0, + 0, + 11608, + 0, + 0, + 0, + 0, + 11610, + 0, + 0, + 11611, + 0, + 0, + 0, + 0, + 11613, + 0, + 11622, + 0, + 0, + 0, + 11623, + 0, + 0, + 0, + 0, + 11625, + 0, + 0, + 11626, + 11627, + 11628, + 11630, + 0, + 0, + 0, + 0, + 0, + 0, + 11639, + 0, + 0, + 11646, + 0, + 11648, + 11649, + 0, + 11650, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11651, + 0, + 0, + 11652, + 11653, + 11656, + 0, + 0, + 11677, + 11679, + 0, + 0, + 0, + 0, + 11680, + 0, + 0, + 11681, + 0, + 11685, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11688, + 0, + 0, + 0, + 11716, + 0, + 11719, + 0, + 0, + 0, + 0, + 0, + 11721, + 0, + 0, + 11724, + 11743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11745, + 11748, + 11750, + 0, + 0, + 0, + 0, + 0, + 11751, + 0, + 0, + 0, + 11752, + 11754, + 0, + 11755, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11759, + 0, + 0, + 0, + 0, + 0, + 0, + 11760, + 0, + 0, + 0, + 11761, + 0, + 0, + 0, + 0, + 0, + 0, + 11766, + 11767, + 0, + 11772, + 11773, + 0, + 11774, + 0, + 0, + 11775, + 0, + 11777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11778, + 11780, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11783, + 0, + 11784, + 0, + 0, + 0, + 11785, + 0, + 0, + 0, + 11786, + 0, + 0, + 0, + 0, + 11788, + 0, + 0, + 11789, + 11791, + 11792, + 0, + 0, + 0, + 0, + 11795, + 11834, + 11835, + 11836, + 0, + 0, + 11837, + 0, + 0, + 0, + 11838, + 0, + 0, + 11846, + 11851, + 0, + 11852, + 0, + 11869, + 0, + 0, + 0, + 11871, + 0, + 0, + 0, + 11872, + 11874, + 0, + 0, + 0, + 0, + 0, + 0, + 11875, + 0, + 11876, + 11877, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11883, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11884, + 0, + 11885, + 0, + 11886, + 0, + 0, + 11887, + 0, + 11894, + 11895, + 11897, + 11909, + 11910, + 0, + 11912, + 11918, + 0, + 0, + 11920, + 0, + 11922, + 11924, + 11927, + 11928, + 0, + 0, + 0, + 0, + 11929, + 0, + 11934, + 0, + 0, + 0, + 0, + 0, + 11941, + 11943, + 11944, + 0, + 11945, + 0, + 0, + 0, + 0, + 11948, + 11949, + 0, + 0, + 0, + 0, + 11953, + 0, + 11954, + 0, + 11955, + 0, + 11956, + 0, + 0, + 0, + 0, + 0, + 11957, + 0, + 0, + 11959, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 11961, + 0, + 0, + 0, + 0, + 0, + 11978, + 0, + 0, + 0, + 11979, + 11980, + 11986, + 11987, + 0, + 11992, + 0, + 0, + 0, + 0, + 0, + 11993, + 0, + 0, + 0, + 11994, + 0, + 11999, + 12004, + 12005, + 12006, + 0, + 0, + 0, + 0, + 0, + 12011, + 0, + 0, + 12012, + 12014, + 0, + 0, + 12015, + 0, + 0, + 12019, + 12028, + 0, + 0, + 12029, + 0, + 0, + 12032, + 12033, + 0, + 0, + 0, + 0, + 12034, + 0, + 12041, + 12043, + 0, + 0, + 12044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12054, + 12055, + 0, + 12056, + 0, + 0, + 0, + 12060, + 12064, + 0, + 0, + 0, + 0, + 0, + 12065, + 12067, + 12068, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12074, + 0, + 0, + 0, + 12075, + 12076, + 0, + 0, + 0, + 12079, + 0, + 12081, + 12086, + 12087, + 0, + 0, + 12088, + 0, + 0, + 0, + 0, + 12089, + 0, + 12092, + 0, + 0, + 0, + 0, + 12097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12102, + 12103, + 12104, + 12111, + 0, + 0, + 12114, + 12116, + 0, + 0, + 0, + 12118, + 0, + 0, + 0, + 12119, + 12120, + 12128, + 0, + 0, + 0, + 0, + 12130, + 0, + 0, + 0, + 0, + 0, + 0, + 12131, + 0, + 0, + 0, + 12132, + 12134, + 0, + 0, + 0, + 0, + 12137, + 0, + 12139, + 0, + 12141, + 0, + 0, + 12142, + 0, + 0, + 0, + 12144, + 0, + 0, + 0, + 0, + 0, + 12145, + 0, + 12148, + 0, + 12153, + 0, + 0, + 0, + 0, + 12154, + 12171, + 12173, + 0, + 0, + 0, + 12175, + 0, + 0, + 0, + 0, + 12178, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12184, + 0, + 0, + 0, + 12186, + 0, + 0, + 0, + 0, + 0, + 12187, + 12188, + 0, + 0, + 12189, + 0, + 12196, + 0, + 12197, + 0, + 0, + 12198, + 0, + 12201, + 0, + 0, + 0, + 0, + 12203, + 0, + 12209, + 0, + 0, + 0, + 0, + 12210, + 12211, + 12212, + 12213, + 0, + 12217, + 12218, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12222, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12223, + 0, + 0, + 12229, + 0, + 0, + 0, + 0, + 12233, + 0, + 0, + 0, + 0, + 12234, + 0, + 0, + 12236, + 12242, + 0, + 0, + 0, + 12243, + 0, + 0, + 0, + 12244, + 12253, + 0, + 12254, + 12256, + 0, + 12257, + 0, + 0, + 12275, + 0, + 0, + 0, + 0, + 0, + 12277, + 0, + 0, + 0, + 0, + 0, + 12278, + 0, + 12289, + 0, + 0, + 12290, + 0, + 12292, + 12293, + 0, + 0, + 12294, + 0, + 12295, + 0, + 0, + 12296, + 0, + 12297, + 0, + 12298, + 0, + 0, + 0, + 0, + 12301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12309, + 0, + 12338, + 12340, + 0, + 0, + 0, + 0, + 12341, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12342, + 12343, + 0, + 12344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12346, + 0, + 0, + 0, + 0, + 12348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12351, + 0, + 12355, + 12356, + 12357, + 0, + 0, + 12367, + 12370, + 12371, + 0, + 0, + 0, + 0, + 0, + 12372, + 12376, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12379, + 0, + 12382, + 0, + 12383, + 0, + 0, + 12384, + 0, + 0, + 0, + 0, + 12393, + 0, + 0, + 12394, + 0, + 0, + 0, + 0, + 12398, + 12403, + 0, + 0, + 12404, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12410, + 0, + 0, + 0, + 12411, + 0, + 0, + 0, + 12412, + 0, + 0, + 0, + 0, + 12420, + 0, + 12421, + 0, + 0, + 0, + 0, + 0, + 12423, + 0, + 12425, + 12429, + 0, + 0, + 0, + 12431, + 12432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12434, + 0, + 0, + 0, + 0, + 0, + 12435, + 12436, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12437, + 0, + 0, + 0, + 0, + 0, + 12438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12445, + 0, + 0, + 0, + 12450, + 12451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12452, + 12475, + 0, + 0, + 12493, + 12494, + 0, + 0, + 0, + 12495, + 0, + 0, + 0, + 0, + 12496, + 12502, + 12509, + 0, + 0, + 0, + 0, + 12510, + 0, + 12512, + 12513, + 0, + 0, + 0, + 0, + 12514, + 0, + 0, + 0, + 12515, + 0, + 12520, + 0, + 0, + 0, + 12524, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12527, + 0, + 0, + 0, + 12528, + 0, + 0, + 0, + 12529, + 0, + 0, + 0, + 0, + 0, + 12530, + 0, + 12535, + 0, + 0, + 12536, + 0, + 12538, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12540, + 0, + 12548, + 0, + 0, + 0, + 0, + 0, + 12550, + 0, + 0, + 0, + 12551, + 12552, + 0, + 0, + 0, + 12554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12555, + 0, + 0, + 12562, + 0, + 12565, + 0, + 12566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12569, + 0, + 0, + 0, + 12571, + 12574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12577, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12578, + 12579, + 12603, + 0, + 12608, + 0, + 0, + 12611, + 0, + 12612, + 0, + 12615, + 0, + 12625, + 0, + 0, + 0, + 0, + 12627, + 12646, + 0, + 12648, + 0, + 0, + 12657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12670, + 0, + 0, + 12671, + 0, + 12673, + 12677, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12679, + 0, + 12681, + 0, + 12682, + 12693, + 0, + 12694, + 0, + 12697, + 0, + 12701, + 0, + 0, + 0, + 12703, + 12704, + 0, + 0, + 0, + 0, + 12707, + 12737, + 0, + 0, + 12739, + 0, + 0, + 12740, + 0, + 0, + 12742, + 12743, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12745, + 0, + 12746, + 12747, + 0, + 12748, + 0, + 0, + 12759, + 12767, + 0, + 0, + 0, + 0, + 12773, + 0, + 12774, + 12778, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12779, + 0, + 0, + 0, + 0, + 0, + 12780, + 12793, + 0, + 12824, + 0, + 12825, + 0, + 12836, + 0, + 0, + 0, + 0, + 12839, + 0, + 12842, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12843, + 12845, + 0, + 12846, + 0, + 0, + 0, + 0, + 12847, + 0, + 0, + 12850, + 12852, + 12853, + 0, + 0, + 0, + 12854, + 0, + 0, + 0, + 12855, + 0, + 12856, + 0, + 12858, + 0, + 0, + 12859, + 0, + 12862, + 0, + 12863, + 0, + 0, + 12866, + 0, + 12869, + 12872, + 12873, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12875, + 0, + 12877, + 0, + 0, + 12878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12884, + 12885, + 12888, + 0, + 12889, + 0, + 0, + 0, + 0, + 12893, + 0, + 0, + 0, + 12895, + 12896, + 12898, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12902, + 0, + 12909, + 12910, + 0, + 12926, + 0, + 12928, + 0, + 0, + 0, + 12929, + 0, + 12930, + 0, + 0, + 0, + 0, + 12931, + 0, + 12932, + 12933, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12934, + 0, + 12942, + 0, + 0, + 0, + 0, + 12944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12946, + 0, + 0, + 12948, + 0, + 0, + 12949, + 0, + 0, + 0, + 0, + 12950, + 0, + 0, + 0, + 0, + 12951, + 0, + 12952, + 0, + 12953, + 0, + 0, + 0, + 12954, + 12958, + 12959, + 0, + 0, + 0, + 0, + 0, + 12960, + 12964, + 0, + 0, + 0, + 0, + 0, + 12966, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 12970, + 0, + 12971, + 0, + 0, + 0, + 0, + 0, + 0, + 12972, + 0, + 0, + 12982, + 0, + 0, + 0, + 12984, + 12985, + 0, + 12986, + 12996, + 12997, + 13001, + 13002, + 0, + 0, + 0, + 0, + 13004, + 0, + 0, + 13005, + 0, + 0, + 13007, + 13009, + 0, + 13017, + 0, + 0, + 0, + 13020, + 0, + 13021, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13022, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13024, + 13027, + 0, + 0, + 0, + 0, + 0, + 13028, + 0, + 0, + 13029, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13032, + 0, + 13037, + 0, + 0, + 0, + 0, + 0, + 0, + 13040, + 0, + 0, + 13041, + 0, + 0, + 0, + 13043, + 13044, + 13046, + 0, + 0, + 0, + 0, + 13047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13049, + 13054, + 0, + 13056, + 0, + 0, + 13060, + 13061, + 0, + 0, + 0, + 0, + 0, + 13067, + 0, + 0, + 13068, + 0, + 13071, + 0, + 0, + 0, + 0, + 0, + 13077, + 13078, + 0, + 0, + 0, + 0, + 0, + 13079, + 13080, + 13081, + 0, + 13082, + 0, + 0, + 0, + 13085, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13086, + 0, + 13087, + 13088, + 0, + 0, + 0, + 0, + 0, + 13094, + 0, + 13099, + 0, + 13100, + 0, + 0, + 0, + 13101, + 0, + 13125, + 13126, + 13128, + 13129, + 0, + 0, + 13130, + 0, + 13131, + 0, + 0, + 0, + 0, + 0, + 0, + 13134, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13150, + 0, + 13168, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13169, + 0, + 0, + 13170, + 0, + 0, + 0, + 0, + 13174, + 0, + 0, + 0, + 13176, + 0, + 0, + 0, + 0, + 0, + 13177, + 0, + 13178, + 13183, + 13187, + 0, + 0, + 0, + 13189, + 0, + 0, + 13190, + 0, + 0, + 13191, + 0, + 0, + 13206, + 0, + 0, + 0, + 13207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13212, + 0, + 0, + 13219, + 13232, + 0, + 0, + 0, + 13241, + 0, + 13249, + 13253, + 0, + 0, + 0, + 0, + 0, + 13255, + 13259, + 0, + 13260, + 13261, + 0, + 13262, + 0, + 13272, + 0, + 0, + 0, + 0, + 13276, + 0, + 0, + 0, + 0, + 13277, + 13299, + 0, + 0, + 13301, + 13302, + 0, + 0, + 13303, + 0, + 0, + 13305, + 0, + 13310, + 0, + 0, + 0, + 13311, + 0, + 0, + 0, + 0, + 13325, + 0, + 13328, + 0, + 0, + 0, + 13329, + 0, + 0, + 0, + 0, + 0, + 0, + 13330, + 0, + 0, + 13331, + 0, + 13335, + 0, + 0, + 13342, + 0, + 0, + 0, + 0, + 0, + 13343, + 0, + 13354, + 0, + 13362, + 0, + 13366, + 13367, + 13369, + 0, + 0, + 13371, + 13372, + 0, + 13373, + 13374, + 0, + 13376, + 0, + 13380, + 13381, + 13386, + 0, + 13387, + 13388, + 0, + 13389, + 13391, + 13395, + 0, + 0, + 0, + 0, + 0, + 13401, + 13409, + 0, + 13410, + 0, + 0, + 0, + 0, + 13420, + 0, + 0, + 0, + 0, + 0, + 13422, + 0, + 0, + 0, + 0, + 13423, + 0, + 0, + 0, + 0, + 13425, + 0, + 0, + 0, + 0, + 0, + 13427, + 0, + 0, + 0, + 13428, + 0, + 0, + 13430, + 13438, + 0, + 13439, + 0, + 13445, + 0, + 13448, + 13449, + 0, + 0, + 0, + 0, + 0, + 0, + 13451, + 0, + 13457, + 0, + 0, + 0, + 0, + 13458, + 13459, + 0, + 13460, + 0, + 0, + 0, + 0, + 13464, + 13465, + 13466, + 13470, + 0, + 13471, + 13472, + 13474, + 13475, + 0, + 13476, + 0, + 0, + 13478, + 13479, + 0, + 13481, + 0, + 0, + 0, + 0, + 13487, + 0, + 13490, + 0, + 13493, + 0, + 0, + 13494, + 0, + 0, + 13495, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13496, + 13497, + 0, + 13500, + 0, + 0, + 13516, + 13522, + 0, + 0, + 13525, + 13528, + 0, + 0, + 0, + 13530, + 13535, + 0, + 13537, + 13539, + 0, + 13540, + 0, + 13543, + 0, + 13544, + 0, + 0, + 0, + 0, + 0, + 0, + 13545, + 0, + 0, + 0, + 0, + 0, + 0, + 13547, + 0, + 0, + 0, + 13549, + 13555, + 0, + 0, + 0, + 13556, + 13557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13558, + 0, + 13563, + 0, + 0, + 0, + 0, + 13564, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13566, + 0, + 0, + 0, + 0, + 0, + 0, + 13569, + 0, + 0, + 13571, + 0, + 0, + 0, + 0, + 13573, + 0, + 0, + 0, + 0, + 0, + 0, + 13578, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13581, + 0, + 13586, + 0, + 13595, + 0, + 13600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13601, + 13603, + 0, + 13604, + 13605, + 13606, + 13607, + 0, + 0, + 13617, + 13618, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13623, + 0, + 13625, + 13627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13629, + 0, + 0, + 0, + 13634, + 0, + 0, + 0, + 13638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13654, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13656, + 0, + 13659, + 0, + 0, + 13660, + 0, + 0, + 13662, + 0, + 0, + 0, + 13663, + 0, + 13664, + 0, + 0, + 0, + 0, + 0, + 13668, + 0, + 13669, + 13671, + 0, + 0, + 13672, + 0, + 0, + 0, + 0, + 0, + 0, + 13675, + 13685, + 0, + 13686, + 0, + 0, + 0, + 13687, + 0, + 0, + 0, + 13692, + 13694, + 13697, + 0, + 0, + 0, + 13702, + 0, + 0, + 0, + 0, + 0, + 13705, + 0, + 0, + 0, + 0, + 13707, + 0, + 0, + 0, + 13714, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13715, + 0, + 13716, + 13717, + 0, + 0, + 13719, + 13724, + 13730, + 13731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13732, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13734, + 0, + 13736, + 0, + 0, + 13737, + 13738, + 13747, + 0, + 13751, + 0, + 0, + 13752, + 0, + 0, + 0, + 13753, + 0, + 13757, + 0, + 0, + 13762, + 13763, + 0, + 13764, + 13765, + 0, + 13766, + 0, + 0, + 13767, + 0, + 0, + 0, + 13768, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13769, + 0, + 0, + 13772, + 0, + 13775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13776, + 13778, + 13787, + 0, + 0, + 0, + 13797, + 0, + 13798, + 0, + 13801, + 0, + 13804, + 13806, + 0, + 0, + 0, + 0, + 13816, + 13817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13834, + 0, + 13836, + 0, + 0, + 13838, + 0, + 0, + 13839, + 0, + 13840, + 0, + 0, + 0, + 0, + 13842, + 0, + 0, + 0, + 0, + 0, + 0, + 13843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13845, + 0, + 0, + 0, + 0, + 0, + 13858, + 0, + 0, + 13860, + 0, + 0, + 13861, + 0, + 0, + 13862, + 13863, + 0, + 13868, + 0, + 13869, + 13870, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13872, + 0, + 0, + 0, + 0, + 13873, + 13878, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13886, + 0, + 13888, + 13889, + 13890, + 0, + 0, + 13891, + 13894, + 0, + 13897, + 13899, + 13900, + 13904, + 0, + 0, + 13906, + 0, + 0, + 0, + 13909, + 0, + 0, + 0, + 13910, + 0, + 0, + 0, + 13911, + 0, + 0, + 0, + 0, + 0, + 13912, + 13917, + 0, + 0, + 0, + 0, + 13918, + 0, + 13919, + 0, + 0, + 13920, + 0, + 0, + 0, + 13921, + 0, + 0, + 13922, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13924, + 0, + 13927, + 0, + 0, + 0, + 0, + 0, + 13932, + 0, + 13933, + 0, + 13934, + 0, + 0, + 13935, + 0, + 13944, + 0, + 0, + 0, + 13954, + 0, + 0, + 13955, + 0, + 0, + 0, + 0, + 13956, + 0, + 13957, + 0, + 13967, + 13969, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 13970, + 13990, + 0, + 13991, + 13994, + 0, + 13995, + 0, + 0, + 0, + 0, + 13996, + 0, + 0, + 13999, + 0, + 0, + 0, + 14018, + 0, + 14019, + 0, + 14021, + 0, + 0, + 0, + 0, + 0, + 0, + 14041, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14043, + 0, + 0, + 0, + 0, + 14046, + 0, + 0, + 0, + 14048, + 14049, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14051, + 0, + 0, + 14052, + 14056, + 0, + 14063, + 0, + 14064, + 14066, + 0, + 0, + 14067, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14068, + 0, + 0, + 0, + 14072, + 0, + 14074, + 14075, + 0, + 14076, + 14079, + 14085, + 14086, + 14087, + 14093, + 0, + 0, + 0, + 0, + 14095, + 0, + 0, + 0, + 0, + 0, + 0, + 14096, + 14097, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14098, + 0, + 14102, + 0, + 0, + 0, + 0, + 0, + 14103, + 0, + 0, + 0, + 14104, + 0, + 0, + 14105, + 0, + 0, + 0, + 14107, + 14108, + 0, + 0, + 14109, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14117, + 0, + 0, + 0, + 0, + 14118, + 0, + 0, + 0, + 0, + 14119, + 0, + 0, + 14120, + 0, + 0, + 14121, + 0, + 14122, + 14127, + 0, + 14128, + 14136, + 0, + 0, + 14138, + 0, + 14140, + 0, + 0, + 0, + 14141, + 14142, + 0, + 0, + 0, + 0, + 14146, + 0, + 0, + 14149, + 0, + 14151, + 0, + 0, + 0, + 14152, + 0, + 0, + 14153, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14154, + 0, + 14156, + 14157, + 0, + 0, + 14159, + 0, + 14161, + 0, + 0, + 0, + 0, + 14162, + 0, + 0, + 0, + 0, + 0, + 0, + 14163, + 0, + 0, + 14173, + 0, + 0, + 0, + 0, + 0, + 0, + 14174, + 0, + 0, + 14176, + 0, + 0, + 14178, + 0, + 0, + 14179, + 14181, + 0, + 0, + 14182, + 14185, + 14187, + 0, + 14190, + 0, + 0, + 14197, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14198, + 0, + 0, + 0, + 0, + 0, + 0, + 14199, + 14200, + 0, + 0, + 0, + 14204, + 0, + 0, + 14208, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14231, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14234, + 0, + 0, + 14235, + 0, + 0, + 0, + 14240, + 14241, + 0, + 0, + 0, + 14246, + 0, + 0, + 0, + 14247, + 0, + 14250, + 0, + 0, + 14251, + 0, + 0, + 14254, + 0, + 0, + 14256, + 0, + 0, + 0, + 14260, + 0, + 14261, + 0, + 0, + 0, + 0, + 14262, + 14267, + 14269, + 0, + 0, + 14277, + 0, + 0, + 14278, + 0, + 14279, + 14282, + 0, + 0, + 0, + 14283, + 0, + 0, + 0, + 14284, + 14285, + 0, + 0, + 0, + 0, + 14286, + 0, + 0, + 0, + 14288, + 0, + 0, + 0, + 14289, + 0, + 14290, + 0, + 14293, + 14301, + 14302, + 14304, + 14305, + 0, + 14307, + 0, + 14308, + 14309, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14311, + 14312, + 0, + 0, + 14317, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14318, + 0, + 0, + 0, + 0, + 14320, + 0, + 0, + 0, + 0, + 14321, + 14322, + 0, + 0, + 0, + 0, + 0, + 14326, + 14329, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14330, + 14331, + 0, + 0, + 0, + 0, + 14332, + 0, + 0, + 0, + 14333, + 0, + 0, + 14337, + 14340, + 0, + 14341, + 0, + 0, + 14342, + 0, + 14345, + 14346, + 0, + 0, + 14347, + 0, + 14362, + 0, + 0, + 0, + 0, + 0, + 14364, + 14365, + 14371, + 0, + 14373, + 0, + 0, + 14374, + 0, + 14379, + 0, + 14400, + 0, + 0, + 0, + 0, + 0, + 14401, + 0, + 0, + 14405, + 0, + 14406, + 0, + 14408, + 14409, + 0, + 0, + 0, + 14417, + 0, + 0, + 14424, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14430, + 0, + 0, + 0, + 14431, + 0, + 0, + 14435, + 0, + 14440, + 0, + 0, + 0, + 0, + 0, + 0, + 14442, + 0, + 0, + 14443, + 0, + 0, + 0, + 0, + 0, + 14446, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14454, + 0, + 14457, + 0, + 14460, + 0, + 0, + 14466, + 0, + 0, + 0, + 0, + 0, + 14467, + 0, + 0, + 0, + 0, + 0, + 0, + 14469, + 0, + 14477, + 0, + 0, + 0, + 0, + 0, + 0, + 14478, + 14482, + 0, + 0, + 0, + 14483, + 0, + 0, + 0, + 14485, + 14486, + 0, + 0, + 0, + 14487, + 14488, + 14489, + 14492, + 14493, + 14494, + 14495, + 14496, + 14497, + 0, + 14499, + 0, + 14501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14502, + 0, + 14507, + 14512, + 14513, + 14514, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14515, + 14526, + 14530, + 0, + 14537, + 0, + 14544, + 0, + 14547, + 0, + 0, + 14548, + 14550, + 14551, + 0, + 0, + 14552, + 0, + 0, + 0, + 14553, + 0, + 14554, + 0, + 0, + 0, + 0, + 14556, + 14564, + 0, + 0, + 14565, + 14566, + 0, + 0, + 0, + 0, + 0, + 0, + 14568, + 0, + 0, + 14569, + 0, + 0, + 0, + 14571, + 14576, + 0, + 0, + 14577, + 14578, + 14579, + 0, + 0, + 14580, + 0, + 0, + 0, + 0, + 14582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14583, + 0, + 0, + 0, + 0, + 0, + 14587, + 0, + 14588, + 0, + 0, + 14600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14601, + 0, + 0, + 14604, + 14605, + 14611, + 0, + 14613, + 0, + 0, + 0, + 0, + 14615, + 0, + 0, + 0, + 0, + 0, + 0, + 14627, + 0, + 14628, + 0, + 0, + 0, + 0, + 14631, + 0, + 14633, + 14634, + 0, + 0, + 0, + 0, + 14635, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14636, + 0, + 0, + 14639, + 14642, + 0, + 0, + 0, + 0, + 14644, + 0, + 0, + 0, + 0, + 14645, + 14646, + 0, + 14653, + 0, + 0, + 14654, + 0, + 14658, + 0, + 14661, + 0, + 0, + 0, + 14665, + 0, + 0, + 0, + 14668, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14669, + 0, + 0, + 14670, + 0, + 0, + 0, + 14680, + 0, + 0, + 14681, + 0, + 0, + 0, + 0, + 0, + 14682, + 14683, + 0, + 0, + 0, + 0, + 14686, + 0, + 0, + 0, + 0, + 14687, + 14697, + 0, + 0, + 0, + 0, + 14699, + 14705, + 14711, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14712, + 0, + 0, + 0, + 14713, + 0, + 0, + 0, + 0, + 14719, + 0, + 14720, + 14721, + 14726, + 0, + 0, + 0, + 14728, + 14729, + 0, + 0, + 0, + 0, + 14731, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14733, + 14736, + 14737, + 0, + 0, + 14740, + 14742, + 0, + 0, + 0, + 14744, + 14753, + 0, + 0, + 0, + 0, + 14755, + 14758, + 14760, + 0, + 0, + 0, + 0, + 0, + 14761, + 14762, + 14765, + 14771, + 0, + 14772, + 0, + 14773, + 14774, + 0, + 0, + 14775, + 0, + 0, + 14776, + 0, + 0, + 0, + 0, + 14777, + 0, + 14779, + 0, + 0, + 14782, + 0, + 0, + 14785, + 14786, + 14788, + 0, + 0, + 0, + 0, + 0, + 14795, + 0, + 0, + 0, + 0, + 0, + 0, + 14798, + 0, + 14803, + 14804, + 14806, + 0, + 0, + 0, + 14809, + 0, + 0, + 0, + 0, + 0, + 0, + 14810, + 0, + 0, + 0, + 0, + 14811, + 0, + 14812, + 0, + 0, + 0, + 0, + 0, + 14815, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14816, + 0, + 14818, + 0, + 0, + 0, + 0, + 0, + 0, + 14819, + 0, + 14820, + 0, + 14823, + 0, + 0, + 0, + 14824, + 0, + 0, + 14826, + 14827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14830, + 0, + 0, + 0, + 0, + 0, + 14833, + 0, + 14845, + 0, + 0, + 0, + 0, + 0, + 14846, + 0, + 0, + 14847, + 14871, + 0, + 14873, + 0, + 14876, + 0, + 14877, + 14878, + 14880, + 0, + 0, + 0, + 0, + 0, + 14881, + 0, + 14882, + 14894, + 0, + 0, + 0, + 0, + 14895, + 0, + 14907, + 0, + 14908, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14911, + 0, + 0, + 0, + 0, + 14920, + 0, + 0, + 14931, + 0, + 14932, + 14934, + 14935, + 0, + 0, + 14936, + 0, + 14945, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 14947, + 0, + 0, + 14948, + 14949, + 14951, + 0, + 0, + 14952, + 0, + 0, + 0, + 14964, + 14973, + 0, + 0, + 14990, + 0, + 0, + 0, + 0, + 14995, + 0, + 0, + 14998, + 15001, + 0, + 0, + 15002, + 15020, + 0, + 0, + 0, + 0, + 0, + 0, + 15021, + 0, + 15022, + 0, + 0, + 0, + 0, + 15023, + 0, + 0, + 15025, + 15029, + 15033, + 0, + 0, + 0, + 15034, + 0, + 0, + 0, + 15035, + 0, + 0, + 0, + 0, + 0, + 15043, + 15044, + 0, + 0, + 0, + 15045, + 15046, + 15048, + 15050, + 0, + 15065, + 0, + 0, + 0, + 0, + 15066, + 0, + 0, + 15075, + 15082, + 15084, + 0, + 0, + 15085, + 15086, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15088, + 0, + 0, + 0, + 15089, + 0, + 0, + 0, + 0, + 15094, + 0, + 15096, + 0, + 15097, + 0, + 15100, + 0, + 0, + 15102, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15105, + 0, + 0, + 15106, + 0, + 15109, + 15113, + 0, + 0, + 0, + 15115, + 0, + 15118, + 0, + 0, + 0, + 0, + 0, + 0, + 15119, + 0, + 0, + 15120, + 0, + 0, + 0, + 0, + 0, + 15123, + 15129, + 0, + 0, + 0, + 15130, + 0, + 15131, + 0, + 0, + 15134, + 0, + 15135, + 0, + 0, + 0, + 15137, + 15138, + 0, + 0, + 0, + 0, + 0, + 0, + 15139, + 0, + 0, + 0, + 0, + 0, + 15140, + 0, + 0, + 15154, + 15162, + 0, + 15169, + 15170, + 0, + 15175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15177, + 0, + 15178, + 15179, + 0, + 0, + 0, + 0, + 0, + 15183, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15185, + 15187, + 0, + 15194, + 15195, + 15196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15204, + 0, + 0, + 0, + 0, + 15206, + 0, + 0, + 0, + 0, + 0, + 15207, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15213, + 0, + 15214, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15232, + 0, + 0, + 0, + 0, + 15234, + 0, + 15238, + 15240, + 0, + 15248, + 0, + 0, + 0, + 0, + 15250, + 15251, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15252, + 0, + 0, + 0, + 15255, + 15262, + 15266, + 0, + 0, + 0, + 15267, + 0, + 0, + 0, + 15277, + 15279, + 0, + 0, + 0, + 15280, + 15281, + 15282, + 0, + 0, + 0, + 0, + 0, + 15285, + 0, + 0, + 0, + 0, + 15289, + 0, + 0, + 15291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15296, + 15297, + 0, + 0, + 15304, + 0, + 0, + 0, + 0, + 15306, + 0, + 0, + 0, + 0, + 0, + 0, + 15307, + 15308, + 0, + 15309, + 0, + 0, + 15311, + 0, + 0, + 15312, + 15313, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15314, + 15317, + 0, + 0, + 0, + 15318, + 15319, + 0, + 0, + 0, + 0, + 15320, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15321, + 0, + 0, + 0, + 0, + 0, + 15324, + 0, + 15325, + 15326, + 0, + 15330, + 0, + 0, + 0, + 0, + 15334, + 0, + 15335, + 0, + 15341, + 0, + 0, + 15342, + 0, + 0, + 15343, + 15344, + 0, + 0, + 0, + 0, + 15345, + 0, + 0, + 0, + 0, + 15347, + 0, + 0, + 15348, + 15349, + 15350, + 0, + 15356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15357, + 0, + 15358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15359, + 15360, + 15364, + 0, + 15380, + 0, + 0, + 0, + 0, + 0, + 15392, + 0, + 0, + 15393, + 0, + 15395, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15396, + 0, + 0, + 15397, + 15398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15399, + 0, + 15400, + 0, + 0, + 0, + 15402, + 0, + 15405, + 15410, + 0, + 0, + 0, + 0, + 15411, + 0, + 0, + 0, + 15412, + 0, + 15416, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15428, + 0, + 15435, + 0, + 0, + 15438, + 0, + 0, + 0, + 0, + 15439, + 0, + 0, + 0, + 15440, + 0, + 0, + 0, + 15441, + 15449, + 15451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15452, + 0, + 0, + 15455, + 0, + 0, + 0, + 15456, + 0, + 0, + 15458, + 0, + 15460, + 15461, + 0, + 0, + 0, + 0, + 0, + 15462, + 15464, + 0, + 15465, + 0, + 0, + 15466, + 0, + 0, + 15467, + 0, + 0, + 0, + 0, + 0, + 15468, + 0, + 0, + 0, + 0, + 15481, + 0, + 0, + 15484, + 0, + 15485, + 15486, + 0, + 0, + 0, + 15487, + 0, + 0, + 0, + 0, + 0, + 15488, + 0, + 15492, + 15498, + 0, + 0, + 0, + 15499, + 0, + 0, + 0, + 15500, + 0, + 15501, + 0, + 0, + 15512, + 0, + 15522, + 0, + 0, + 0, + 15524, + 0, + 15525, + 15526, + 0, + 0, + 15527, + 0, + 0, + 15545, + 15546, + 0, + 15548, + 15552, + 0, + 15553, + 0, + 0, + 0, + 15554, + 0, + 15555, + 0, + 15557, + 15565, + 15573, + 15577, + 15578, + 0, + 15582, + 0, + 15583, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15586, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15588, + 0, + 0, + 0, + 0, + 0, + 15589, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15593, + 15594, + 0, + 0, + 0, + 0, + 15595, + 0, + 0, + 0, + 0, + 0, + 0, + 15596, + 0, + 0, + 0, + 15597, + 0, + 0, + 0, + 0, + 15600, + 0, + 0, + 15601, + 0, + 0, + 0, + 0, + 15602, + 15603, + 0, + 0, + 0, + 0, + 0, + 0, + 15604, + 0, + 15609, + 0, + 0, + 15612, + 0, + 0, + 15613, + 0, + 0, + 15615, + 15617, + 15618, + 0, + 0, + 15620, + 0, + 15636, + 15637, + 0, + 0, + 15649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15650, + 0, + 0, + 15651, + 0, + 0, + 0, + 15656, + 0, + 15658, + 0, + 0, + 0, + 15664, + 0, + 0, + 15665, + 0, + 0, + 15668, + 0, + 0, + 0, + 0, + 0, + 15669, + 0, + 0, + 15674, + 0, + 0, + 15675, + 0, + 0, + 0, + 0, + 15676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15677, + 0, + 0, + 0, + 0, + 15678, + 0, + 0, + 0, + 0, + 0, + 15679, + 0, + 0, + 15681, + 0, + 15686, + 0, + 0, + 0, + 0, + 15687, + 0, + 15688, + 0, + 0, + 15690, + 0, + 0, + 0, + 15697, + 0, + 15699, + 15700, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15701, + 0, + 15702, + 15703, + 0, + 15704, + 0, + 15705, + 0, + 15707, + 0, + 15709, + 0, + 15712, + 15716, + 0, + 15717, + 0, + 15718, + 15720, + 0, + 0, + 0, + 0, + 0, + 15724, + 0, + 0, + 0, + 15725, + 0, + 15726, + 0, + 0, + 0, + 15740, + 0, + 15745, + 15746, + 0, + 0, + 15747, + 0, + 15748, + 0, + 0, + 0, + 0, + 0, + 15749, + 0, + 0, + 0, + 15752, + 0, + 15753, + 0, + 0, + 0, + 0, + 0, + 0, + 15759, + 0, + 0, + 0, + 15765, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15767, + 0, + 0, + 0, + 15771, + 0, + 0, + 15784, + 0, + 0, + 0, + 0, + 15785, + 15790, + 15791, + 0, + 0, + 15792, + 0, + 0, + 0, + 15807, + 0, + 15811, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15818, + 0, + 0, + 0, + 15819, + 0, + 0, + 0, + 0, + 15821, + 0, + 0, + 0, + 0, + 0, + 15822, + 15824, + 0, + 0, + 15827, + 0, + 0, + 15829, + 15831, + 0, + 15832, + 0, + 0, + 15833, + 0, + 15835, + 15838, + 15839, + 15843, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15844, + 0, + 0, + 0, + 0, + 15845, + 15851, + 15856, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15858, + 15860, + 0, + 15861, + 0, + 0, + 0, + 15864, + 0, + 0, + 0, + 0, + 15865, + 0, + 0, + 0, + 0, + 0, + 0, + 15866, + 0, + 15872, + 0, + 0, + 15876, + 0, + 0, + 0, + 0, + 15877, + 15878, + 15883, + 15885, + 0, + 0, + 15888, + 0, + 0, + 0, + 0, + 0, + 15889, + 15890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15893, + 0, + 0, + 15894, + 0, + 0, + 0, + 15895, + 0, + 15896, + 15897, + 0, + 15898, + 15901, + 15902, + 0, + 15911, + 15915, + 0, + 15916, + 0, + 15924, + 15935, + 0, + 15937, + 0, + 0, + 0, + 0, + 0, + 15950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15958, + 0, + 0, + 0, + 15961, + 0, + 0, + 15966, + 0, + 15967, + 0, + 0, + 15977, + 0, + 0, + 15978, + 0, + 0, + 15981, + 15982, + 15983, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 15986, + 0, + 0, + 0, + 15990, + 0, + 15991, + 15995, + 15998, + 0, + 15999, + 0, + 16000, + 0, + 0, + 0, + 0, + 16008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16009, + 16011, + 0, + 16013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16014, + 0, + 0, + 16015, + 16023, + 16024, + 16025, + 0, + 0, + 16026, + 0, + 16030, + 0, + 16032, + 0, + 16033, + 0, + 0, + 0, + 0, + 0, + 0, + 16035, + 16036, + 16037, + 0, + 0, + 0, + 0, + 0, + 16039, + 0, + 0, + 0, + 0, + 16041, + 0, + 0, + 0, + 0, + 0, + 16043, + 16044, + 0, + 0, + 16047, + 0, + 0, + 0, + 16048, + 0, + 0, + 16049, + 16050, + 16052, + 0, + 0, + 0, + 0, + 0, + 16055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16056, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16058, + 16060, + 16061, + 0, + 0, + 16063, + 0, + 0, + 16064, + 0, + 0, + 0, + 16067, + 16068, + 0, + 0, + 16069, + 16078, + 0, + 0, + 0, + 16079, + 0, + 0, + 0, + 16080, + 0, + 16081, + 0, + 0, + 0, + 16088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16089, + 16093, + 0, + 16097, + 0, + 16103, + 0, + 16104, + 16105, + 0, + 0, + 16256, + 0, + 0, + 16259, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16260, + 16261, + 0, + 0, + 16262, + 0, + 0, + 16263, + 0, + 16268, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16269, + 0, + 0, + 16270, + 16273, + 0, + 16274, + 0, + 0, + 0, + 0, + 16275, + 16276, + 16277, + 16280, + 0, + 0, + 0, + 16281, + 16284, + 0, + 0, + 0, + 16286, + 0, + 16289, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16290, + 0, + 0, + 0, + 0, + 16291, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16292, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16293, + 16295, + 16297, + 0, + 16302, + 0, + 16304, + 0, + 16305, + 0, + 16306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16307, + 16308, + 16312, + 0, + 0, + 0, + 0, + 0, + 0, + 16313, + 16315, + 0, + 16318, + 0, + 0, + 0, + 16321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16326, + 16333, + 16336, + 0, + 0, + 0, + 0, + 16337, + 16340, + 0, + 0, + 0, + 0, + 0, + 16345, + 0, + 0, + 16346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16347, + 0, + 0, + 16348, + 0, + 0, + 0, + 0, + 16349, + 0, + 0, + 0, + 16350, + 0, + 16357, + 0, + 0, + 0, + 0, + 16359, + 16360, + 0, + 0, + 0, + 0, + 16362, + 16363, + 16364, + 16365, + 0, + 0, + 16366, + 0, + 0, + 0, + 0, + 16367, + 16368, + 0, + 16369, + 16374, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16376, + 0, + 0, + 0, + 0, + 16378, + 16379, + 0, + 16380, + 0, + 0, + 0, + 16381, + 16383, + 0, + 0, + 0, + 0, + 0, + 16390, + 0, + 0, + 0, + 16399, + 0, + 16402, + 16404, + 16406, + 16407, + 0, + 0, + 0, + 16409, + 16411, + 0, + 0, + 0, + 0, + 16412, + 0, + 16413, + 16415, + 16423, + 0, + 0, + 0, + 0, + 0, + 16424, + 0, + 0, + 0, + 16428, + 16434, + 16435, + 16449, + 0, + 16450, + 16451, + 0, + 0, + 0, + 16453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16454, + 0, + 0, + 16456, + 16458, + 0, + 0, + 16459, + 0, + 0, + 16460, + 0, + 0, + 0, + 0, + 16462, + 0, + 16463, + 0, + 0, + 16466, + 0, + 0, + 0, + 0, + 0, + 16479, + 0, + 0, + 16480, + 0, + 16481, + 16484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16485, + 0, + 0, + 0, + 0, + 0, + 0, + 16489, + 0, + 0, + 0, + 0, + 0, + 16491, + 0, + 0, + 16498, + 0, + 0, + 16503, + 0, + 16505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16506, + 0, + 0, + 0, + 16508, + 16509, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16511, + 16513, + 0, + 0, + 0, + 16516, + 0, + 16517, + 0, + 16519, + 0, + 16529, + 0, + 0, + 16531, + 0, + 0, + 0, + 0, + 0, + 0, + 16534, + 0, + 0, + 16541, + 16542, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16543, + 16547, + 16548, + 0, + 0, + 0, + 16551, + 0, + 16552, + 0, + 0, + 0, + 16553, + 0, + 0, + 16558, + 0, + 0, + 16562, + 16565, + 0, + 0, + 0, + 16570, + 0, + 0, + 0, + 16573, + 16585, + 0, + 0, + 0, + 16586, + 16587, + 16595, + 0, + 16596, + 0, + 16598, + 0, + 0, + 0, + 16600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16601, + 0, + 0, + 0, + 0, + 16603, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16604, + 16612, + 0, + 0, + 0, + 0, + 16613, + 0, + 16618, + 0, + 0, + 0, + 16640, + 0, + 0, + 16641, + 0, + 0, + 0, + 0, + 0, + 0, + 16645, + 0, + 0, + 0, + 0, + 16646, + 0, + 0, + 0, + 0, + 0, + 0, + 16651, + 0, + 0, + 0, + 0, + 16653, + 16654, + 0, + 0, + 0, + 16655, + 0, + 0, + 16656, + 16667, + 0, + 0, + 0, + 0, + 16671, + 0, + 16672, + 0, + 0, + 0, + 16673, + 0, + 0, + 0, + 0, + 0, + 16676, + 0, + 16686, + 0, + 0, + 0, + 0, + 16689, + 0, + 16690, + 0, + 16692, + 0, + 16693, + 0, + 16694, + 0, + 16696, + 0, + 0, + 0, + 16705, + 0, + 0, + 0, + 0, + 0, + 0, + 16707, + 0, + 0, + 0, + 16709, + 0, + 0, + 0, + 0, + 16711, + 0, + 16712, + 16713, + 0, + 0, + 0, + 16715, + 0, + 0, + 0, + 0, + 16716, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16718, + 16724, + 0, + 0, + 16726, + 16727, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16728, + 0, + 16729, + 0, + 0, + 16730, + 0, + 0, + 0, + 0, + 0, + 16731, + 0, + 0, + 0, + 16732, + 0, + 0, + 0, + 0, + 16734, + 16738, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16743, + 0, + 0, + 16745, + 0, + 0, + 0, + 0, + 0, + 16749, + 0, + 16752, + 0, + 0, + 0, + 0, + 16756, + 0, + 0, + 16758, + 0, + 16759, + 0, + 0, + 0, + 0, + 0, + 16760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16762, + 0, + 16769, + 0, + 16770, + 0, + 16772, + 0, + 0, + 0, + 16777, + 16780, + 0, + 0, + 0, + 0, + 0, + 0, + 16781, + 0, + 0, + 16782, + 0, + 16784, + 0, + 0, + 16785, + 16787, + 16792, + 0, + 0, + 16794, + 0, + 0, + 0, + 16798, + 0, + 0, + 16809, + 0, + 0, + 16814, + 16816, + 16817, + 0, + 16819, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16820, + 0, + 0, + 16836, + 16839, + 0, + 0, + 16841, + 16851, + 16857, + 0, + 0, + 16858, + 16859, + 0, + 0, + 16860, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16862, + 0, + 16863, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16864, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16876, + 0, + 16881, + 16882, + 0, + 16885, + 16886, + 0, + 16887, + 0, + 0, + 0, + 16889, + 16891, + 0, + 0, + 0, + 0, + 0, + 16894, + 16895, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 16897, + 0, + 16898, + 0, + 0, + 0, + 0, + 0, + 16913, + 0, + 0, + 16924, + 16925, + 16926, + 0, + 0, + 16927, + 0, + 0, + 0, + 16937, + 16938, + 0, + 0, + 0, + 16940, + 16941, + 0, + 0, + 0, + 16942, + 16945, + 0, + 16946, + 16949, + 16950, + 0, + 0, + 0, + 16952, + 16955, + 0, + 0, + 0, + 16965, + 0, + 16969, + 0, + 0, + 16975, + 0, + 0, + 16976, + 0, + 0, + 0, + 0, + 16978, + 0, + 0, + 16981, + 0, + 16983, + 16989, + 0, + 0, + 0, + 0, + 16990, + 0, + 0, + 16991, + 0, + 0, + 0, + 16993, + 0, + 16994, + 16996, + 17000, + 0, + 0, + 0, + 0, + 0, + 17002, + 17004, + 0, + 17006, + 0, + 0, + 17007, + 0, + 0, + 0, + 0, + 17008, + 17013, + 17014, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17021, + 0, + 17031, + 0, + 0, + 0, + 0, + 0, + 17033, + 17036, + 0, + 17038, + 0, + 0, + 17039, + 0, + 17045, + 0, + 0, + 17046, + 17047, + 0, + 0, + 0, + 0, + 17048, + 0, + 17049, + 17050, + 0, + 17051, + 17053, + 0, + 17054, + 0, + 17055, + 0, + 0, + 0, + 0, + 0, + 17063, + 0, + 0, + 17064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17065, + 0, + 0, + 17068, + 0, + 0, + 0, + 0, + 0, + 17072, + 0, + 0, + 0, + 0, + 0, + 0, + 17073, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17074, + 0, + 17080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17081, + 17083, + 17084, + 0, + 0, + 0, + 17085, + 0, + 0, + 0, + 0, + 17092, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17093, + 0, + 17095, + 17102, + 0, + 0, + 0, + 0, + 0, + 0, + 17103, + 0, + 0, + 17105, + 0, + 17107, + 0, + 0, + 0, + 0, + 17114, + 0, + 0, + 0, + 0, + 0, + 17115, + 17125, + 17127, + 0, + 0, + 17128, + 0, + 0, + 0, + 17129, + 17130, + 0, + 17131, + 0, + 0, + 0, + 0, + 0, + 17132, + 17135, + 17145, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17146, + 0, + 17147, + 0, + 17148, + 0, + 0, + 0, + 0, + 0, + 0, + 17149, + 17150, + 0, + 17151, + 17153, + 0, + 17155, + 0, + 0, + 0, + 0, + 17163, + 17171, + 0, + 17174, + 0, + 0, + 0, + 0, + 17179, + 0, + 0, + 17182, + 17185, + 0, + 0, + 0, + 0, + 0, + 17186, + 0, + 0, + 17188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17189, + 17191, + 0, + 17194, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17195, + 17196, + 17203, + 17204, + 0, + 0, + 17205, + 17217, + 0, + 0, + 0, + 0, + 0, + 17218, + 0, + 0, + 0, + 0, + 17219, + 0, + 17220, + 0, + 17221, + 0, + 0, + 17230, + 0, + 0, + 0, + 0, + 0, + 17236, + 0, + 17238, + 17239, + 0, + 0, + 0, + 17241, + 17244, + 0, + 0, + 17245, + 0, + 17248, + 0, + 0, + 17251, + 0, + 17252, + 0, + 0, + 17264, + 0, + 17266, + 0, + 0, + 0, + 17268, + 0, + 0, + 0, + 0, + 17271, + 17272, + 0, + 17273, + 0, + 17295, + 0, + 17302, + 0, + 17305, + 0, + 0, + 0, + 17306, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17309, + 0, + 17310, + 17313, + 0, + 0, + 0, + 0, + 17314, + 17315, + 0, + 17317, + 0, + 0, + 0, + 0, + 17318, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17320, + 0, + 0, + 0, + 0, + 0, + 0, + 17334, + 0, + 17344, + 17348, + 0, + 0, + 0, + 17350, + 17351, + 0, + 0, + 17353, + 0, + 0, + 17354, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17355, + 0, + 0, + 0, + 0, + 0, + 0, + 17356, + 17357, + 0, + 0, + 17359, + 0, + 0, + 0, + 17371, + 0, + 17372, + 0, + 0, + 0, + 17393, + 0, + 0, + 0, + 0, + 17394, + 0, + 0, + 0, + 0, + 0, + 17395, + 0, + 0, + 17399, + 0, + 0, + 0, + 17401, + 17417, + 0, + 17418, + 0, + 17419, + 0, + 0, + 0, + 0, + 0, + 17422, + 17423, + 0, + 0, + 0, + 0, + 0, + 17424, + 0, + 0, + 0, + 0, + 0, + 17428, + 17429, + 17433, + 0, + 0, + 0, + 17437, + 0, + 0, + 17441, + 0, + 0, + 17442, + 0, + 0, + 17453, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17454, + 17456, + 17462, + 0, + 0, + 17466, + 0, + 0, + 17468, + 0, + 0, + 17469, + 0, + 0, + 0, + 0, + 17470, + 0, + 17475, + 0, + 0, + 0, + 0, + 0, + 17479, + 0, + 0, + 0, + 17483, + 17484, + 0, + 17485, + 0, + 17486, + 0, + 17491, + 17492, + 0, + 0, + 17493, + 0, + 17494, + 17495, + 0, + 0, + 0, + 17496, + 0, + 0, + 0, + 17497, + 0, + 0, + 0, + 17502, + 0, + 0, + 0, + 0, + 0, + 17503, + 0, + 17505, + 0, + 17507, + 0, + 0, + 0, + 17512, + 17513, + 17514, + 0, + 0, + 17515, + 0, + 0, + 0, + 17519, + 0, + 0, + 0, + 17522, + 0, + 0, + 17523, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17527, + 0, + 0, + 0, + 17528, + 0, + 0, + 0, + 17534, + 0, + 0, + 0, + 0, + 17536, + 0, + 0, + 0, + 17539, + 0, + 17540, + 17543, + 17549, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17556, + 0, + 0, + 17558, + 0, + 17559, + 0, + 0, + 17560, + 0, + 0, + 0, + 17563, + 0, + 0, + 0, + 0, + 0, + 0, + 17564, + 0, + 0, + 17565, + 17566, + 0, + 17567, + 0, + 0, + 0, + 0, + 0, + 0, + 17569, + 17570, + 0, + 17575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17581, + 0, + 0, + 0, + 17582, + 17583, + 0, + 17586, + 0, + 0, + 17587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17588, + 0, + 0, + 0, + 0, + 17596, + 17597, + 0, + 0, + 17598, + 17600, + 0, + 0, + 0, + 0, + 0, + 0, + 17601, + 0, + 0, + 0, + 17604, + 0, + 0, + 17605, + 0, + 0, + 17607, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17612, + 0, + 0, + 17618, + 0, + 17621, + 17622, + 0, + 0, + 0, + 0, + 17623, + 0, + 0, + 17624, + 0, + 0, + 17630, + 0, + 0, + 17631, + 17633, + 17634, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17635, + 0, + 0, + 17636, + 0, + 0, + 17637, + 0, + 17638, + 0, + 17640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17641, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17643, + 0, + 0, + 0, + 0, + 17645, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17646, + 17662, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17663, + 17664, + 0, + 17665, + 17666, + 0, + 0, + 0, + 17669, + 17671, + 17673, + 0, + 17679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17684, + 0, + 0, + 0, + 17686, + 0, + 17714, + 0, + 0, + 17720, + 17722, + 17726, + 0, + 0, + 17728, + 0, + 0, + 17729, + 0, + 0, + 0, + 17732, + 0, + 17733, + 0, + 17734, + 0, + 0, + 0, + 17735, + 0, + 0, + 0, + 0, + 17737, + 0, + 0, + 0, + 0, + 17739, + 0, + 0, + 0, + 17741, + 17742, + 0, + 0, + 0, + 0, + 17743, + 17744, + 17745, + 0, + 0, + 0, + 17749, + 0, + 17750, + 17751, + 17752, + 17754, + 17761, + 17762, + 0, + 17763, + 0, + 17766, + 0, + 17772, + 0, + 0, + 0, + 0, + 0, + 17775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17776, + 0, + 0, + 17777, + 0, + 0, + 17778, + 17779, + 0, + 17782, + 17783, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17784, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17821, + 0, + 0, + 0, + 17822, + 0, + 0, + 0, + 17823, + 17825, + 0, + 0, + 0, + 0, + 0, + 17826, + 17831, + 17832, + 17833, + 0, + 0, + 17845, + 0, + 0, + 0, + 17846, + 0, + 0, + 0, + 17848, + 17850, + 17854, + 0, + 17855, + 0, + 0, + 17859, + 0, + 0, + 0, + 0, + 0, + 0, + 17860, + 17861, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 17870, + 17871, + 0, + 0, + 0, + 0, + 0, + 0, + 17872, + 0, + 0, + 0, + 17879, + 0, + 0, + 0, + 17881, + 17883, + 0, + 17884, + 0, + 17885, + 0, + 0, + 17886, + 0, + 0, + 17887, + 17891, + 17953, + 0, + 0, + 0, + 0, + 17954, + 0, + 0, + 17955, + 0, + 17968, + 0, + 0, + 17972, + 0, + 0, + 0, + 0, + 0, + 17974, + 0, + 0, + 0, + 0, + 17976, + 17978, + 0, + 0, + 17983, + 0, + 0, + 0, + 0, + 18003, + 0, + 0, + 0, + 0, + 0, + 18007, + 0, + 0, + 0, + 0, + 0, + 18009, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18010, + 0, + 0, + 0, + 0, + 0, + 0, + 18012, + 0, + 0, + 18014, + 0, + 0, + 0, + 18015, + 0, + 0, + 0, + 18016, + 0, + 18017, + 0, + 0, + 0, + 18030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18031, + 0, + 0, + 18036, + 18037, + 18038, + 0, + 0, + 18049, + 18056, + 0, + 18057, + 18058, + 0, + 18059, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18062, + 0, + 0, + 0, + 0, + 18064, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18067, + 0, + 0, + 0, + 18068, + 0, + 0, + 18075, + 0, + 0, + 18078, + 18093, + 18094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18097, + 0, + 0, + 0, + 0, + 0, + 18098, + 18100, + 0, + 0, + 0, + 18108, + 0, + 18111, + 0, + 0, + 18112, + 0, + 18113, + 0, + 0, + 18115, + 18116, + 0, + 18118, + 0, + 0, + 0, + 0, + 18121, + 0, + 0, + 0, + 0, + 18123, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18124, + 0, + 0, + 0, + 0, + 18125, + 18126, + 0, + 18127, + 0, + 0, + 18128, + 18135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18150, + 0, + 0, + 0, + 0, + 0, + 18151, + 18152, + 0, + 0, + 18156, + 18164, + 0, + 18166, + 18171, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18172, + 18183, + 0, + 18184, + 0, + 0, + 0, + 0, + 18185, + 0, + 18187, + 0, + 0, + 0, + 0, + 0, + 18188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18189, + 0, + 0, + 18190, + 0, + 0, + 18191, + 18192, + 0, + 0, + 18194, + 18195, + 18196, + 0, + 0, + 0, + 18197, + 0, + 18203, + 0, + 18204, + 0, + 0, + 0, + 0, + 18205, + 0, + 0, + 0, + 18207, + 18208, + 0, + 0, + 18214, + 0, + 0, + 0, + 18215, + 18216, + 0, + 0, + 0, + 18220, + 0, + 0, + 18222, + 0, + 0, + 0, + 0, + 0, + 18223, + 0, + 18225, + 18231, + 0, + 18234, + 0, + 18235, + 0, + 0, + 0, + 0, + 18240, + 0, + 0, + 18241, + 18242, + 0, + 0, + 0, + 0, + 0, + 18243, + 18251, + 0, + 18253, + 0, + 18254, + 0, + 0, + 0, + 18266, + 0, + 0, + 0, + 0, + 0, + 0, + 18269, + 18270, + 18271, + 18273, + 18281, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18282, + 0, + 18283, + 0, + 18284, + 0, + 0, + 0, + 0, + 0, + 0, + 18285, + 0, + 18287, + 18289, + 0, + 0, + 18290, + 0, + 0, + 0, + 0, + 18308, + 0, + 0, + 0, + 18310, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18311, + 0, + 18312, + 18313, + 0, + 18315, + 0, + 0, + 18316, + 18320, + 0, + 18331, + 0, + 18332, + 0, + 18336, + 0, + 0, + 0, + 0, + 18337, + 0, + 18340, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18341, + 0, + 18344, + 18345, + 0, + 18346, + 0, + 0, + 0, + 0, + 0, + 18348, + 0, + 18351, + 0, + 0, + 18356, + 0, + 0, + 0, + 0, + 0, + 0, + 18357, + 0, + 0, + 0, + 0, + 0, + 18367, + 0, + 0, + 0, + 18368, + 0, + 18369, + 0, + 18370, + 18371, + 0, + 0, + 0, + 18437, + 18444, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18445, + 18450, + 0, + 0, + 0, + 0, + 18451, + 0, + 18452, + 0, + 0, + 0, + 18453, + 0, + 0, + 0, + 0, + 0, + 18455, + 0, + 0, + 0, + 18456, + 0, + 18457, + 0, + 18460, + 0, + 0, + 18461, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18466, + 0, + 0, + 18467, + 0, + 0, + 0, + 0, + 18473, + 0, + 0, + 0, + 18476, + 0, + 18477, + 0, + 0, + 0, + 18478, + 18479, + 18480, + 0, + 0, + 0, + 18485, + 0, + 0, + 0, + 18486, + 0, + 0, + 0, + 0, + 0, + 0, + 18488, + 18490, + 0, + 0, + 0, + 0, + 0, + 0, + 18491, + 0, + 0, + 0, + 0, + 0, + 18495, + 0, + 0, + 18496, + 0, + 0, + 0, + 0, + 0, + 0, + 18505, + 0, + 18521, + 0, + 18522, + 18523, + 0, + 0, + 0, + 18525, + 18526, + 0, + 0, + 0, + 0, + 0, + 18527, + 0, + 0, + 0, + 0, + 18532, + 18533, + 0, + 18534, + 0, + 0, + 0, + 0, + 0, + 0, + 18535, + 18537, + 0, + 18538, + 0, + 0, + 0, + 0, + 0, + 0, + 18540, + 18541, + 18542, + 18543, + 0, + 18546, + 0, + 0, + 0, + 0, + 18553, + 18556, + 0, + 0, + 18558, + 0, + 0, + 18569, + 18571, + 0, + 0, + 0, + 18572, + 0, + 18574, + 0, + 0, + 0, + 0, + 18586, + 0, + 0, + 0, + 0, + 0, + 18588, + 0, + 0, + 18589, + 0, + 0, + 0, + 0, + 0, + 0, + 18590, + 0, + 18592, + 0, + 0, + 0, + 0, + 18594, + 0, + 0, + 0, + 18596, + 0, + 0, + 18597, + 18598, + 0, + 0, + 18601, + 0, + 0, + 0, + 0, + 18602, + 0, + 0, + 0, + 18603, + 18604, + 0, + 18605, + 0, + 0, + 0, + 0, + 18608, + 0, + 0, + 18611, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18612, + 0, + 18616, + 0, + 0, + 18617, + 18619, + 0, + 0, + 0, + 18628, + 0, + 0, + 0, + 18629, + 0, + 0, + 18630, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18631, + 0, + 18632, + 0, + 0, + 18635, + 18637, + 0, + 0, + 0, + 0, + 0, + 0, + 18641, + 18643, + 18648, + 0, + 18652, + 0, + 0, + 18653, + 0, + 18655, + 18656, + 0, + 0, + 0, + 18657, + 0, + 0, + 18666, + 18674, + 0, + 0, + 0, + 0, + 18677, + 18684, + 18685, + 0, + 0, + 18686, + 0, + 0, + 18690, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18695, + 18696, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18697, + 0, + 0, + 18700, + 0, + 0, + 0, + 0, + 0, + 0, + 18702, + 0, + 18708, + 0, + 0, + 18709, + 0, + 18710, + 0, + 0, + 18711, + 0, + 18714, + 0, + 0, + 18718, + 0, + 0, + 0, + 0, + 0, + 0, + 18719, + 0, + 0, + 18722, + 0, + 18726, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18731, + 0, + 0, + 0, + 0, + 0, + 18739, + 18741, + 0, + 0, + 18742, + 0, + 18743, + 18744, + 18746, + 18748, + 0, + 18752, + 18753, + 0, + 0, + 18754, + 18763, + 0, + 18765, + 0, + 0, + 0, + 18766, + 0, + 0, + 0, + 18769, + 0, + 0, + 0, + 0, + 0, + 18773, + 18778, + 18779, + 18781, + 0, + 0, + 18784, + 18787, + 0, + 18788, + 0, + 18793, + 0, + 0, + 0, + 0, + 0, + 0, + 18795, + 0, + 0, + 18800, + 0, + 0, + 0, + 0, + 0, + 18801, + 18804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18806, + 0, + 0, + 0, + 18811, + 18815, + 18816, + 0, + 0, + 0, + 0, + 18825, + 0, + 0, + 18827, + 18829, + 0, + 0, + 18830, + 0, + 0, + 0, + 0, + 18831, + 0, + 0, + 18832, + 0, + 0, + 0, + 0, + 18833, + 0, + 18840, + 0, + 18841, + 0, + 18842, + 0, + 0, + 0, + 0, + 18843, + 0, + 18844, + 0, + 0, + 0, + 0, + 0, + 0, + 18845, + 18846, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18848, + 0, + 0, + 0, + 18853, + 18860, + 0, + 0, + 18862, + 18866, + 0, + 0, + 18867, + 18869, + 0, + 0, + 18874, + 18881, + 18891, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18895, + 0, + 18896, + 0, + 0, + 0, + 18900, + 0, + 0, + 0, + 18901, + 0, + 18902, + 18915, + 18916, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18919, + 0, + 0, + 0, + 0, + 0, + 18920, + 0, + 0, + 0, + 18921, + 18929, + 0, + 0, + 0, + 0, + 18930, + 0, + 0, + 0, + 0, + 0, + 0, + 18932, + 0, + 0, + 0, + 0, + 18934, + 18942, + 0, + 0, + 0, + 18951, + 18957, + 0, + 0, + 0, + 0, + 18958, + 0, + 0, + 0, + 0, + 18959, + 18960, + 0, + 0, + 18961, + 0, + 0, + 18962, + 0, + 0, + 0, + 0, + 18963, + 18964, + 0, + 0, + 0, + 18965, + 0, + 18967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 18968, + 0, + 18969, + 0, + 18970, + 18973, + 18976, + 0, + 0, + 0, + 0, + 0, + 0, + 18977, + 0, + 0, + 0, + 18981, + 0, + 0, + 0, + 18990, + 0, + 18998, + 0, + 0, + 0, + 0, + 0, + 18999, + 19003, + 0, + 0, + 19005, + 0, + 0, + 0, + 19006, + 0, + 0, + 0, + 0, + 0, + 0, + 19008, + 19011, + 0, + 0, + 19018, + 0, + 0, + 19019, + 0, + 19024, + 0, + 19031, + 19032, + 0, + 19039, + 0, + 19041, + 19050, + 0, + 0, + 0, + 19051, + 19055, + 19056, + 0, + 19059, + 19063, + 19064, + 0, + 0, + 19088, + 0, + 0, + 0, + 19093, + 19094, + 0, + 0, + 0, + 0, + 19095, + 0, + 19096, + 0, + 0, + 0, + 19097, + 0, + 0, + 19098, + 0, + 19099, + 19100, + 0, + 0, + 19103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19111, + 0, + 0, + 0, + 0, + 0, + 0, + 19112, + 0, + 0, + 0, + 19116, + 19117, + 0, + 19121, + 19122, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19123, + 19124, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19125, + 19126, + 0, + 19128, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19129, + 19130, + 19131, + 19132, + 0, + 0, + 19146, + 0, + 0, + 19147, + 19156, + 19158, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19182, + 19185, + 0, + 0, + 19187, + 0, + 0, + 0, + 19193, + 0, + 0, + 0, + 0, + 0, + 19194, + 0, + 19197, + 0, + 0, + 0, + 0, + 19198, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19202, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19203, + 0, + 19205, + 19210, + 0, + 0, + 0, + 19213, + 0, + 19218, + 0, + 0, + 0, + 19223, + 19229, + 0, + 0, + 19230, + 0, + 0, + 19231, + 19232, + 19233, + 19239, + 0, + 0, + 0, + 0, + 0, + 19240, + 0, + 19248, + 19249, + 0, + 0, + 0, + 0, + 19254, + 0, + 19256, + 19258, + 19259, + 0, + 0, + 19261, + 0, + 19266, + 0, + 0, + 0, + 19272, + 0, + 19278, + 19281, + 19282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19283, + 0, + 0, + 19284, + 0, + 0, + 19285, + 19287, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19288, + 19291, + 0, + 19292, + 0, + 0, + 0, + 0, + 19297, + 0, + 19298, + 0, + 0, + 0, + 0, + 19302, + 19303, + 0, + 0, + 0, + 0, + 19304, + 19305, + 0, + 0, + 0, + 0, + 19314, + 0, + 0, + 19315, + 0, + 0, + 19321, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19322, + 0, + 19333, + 0, + 19334, + 19335, + 0, + 19336, + 19337, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19346, + 0, + 0, + 19353, + 0, + 19354, + 19362, + 0, + 19366, + 19367, + 0, + 0, + 19369, + 0, + 19375, + 0, + 19377, + 19380, + 19388, + 0, + 0, + 0, + 0, + 0, + 19389, + 19390, + 0, + 0, + 0, + 0, + 19392, + 0, + 0, + 0, + 0, + 0, + 19402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19412, + 0, + 0, + 19413, + 19422, + 0, + 19424, + 0, + 0, + 0, + 19425, + 0, + 0, + 0, + 19428, + 0, + 0, + 0, + 0, + 19431, + 0, + 0, + 0, + 0, + 0, + 19432, + 0, + 0, + 0, + 0, + 0, + 19448, + 19459, + 0, + 0, + 19461, + 0, + 19462, + 19463, + 0, + 19467, + 19474, + 19482, + 0, + 0, + 0, + 0, + 19494, + 0, + 0, + 0, + 0, + 19501, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19502, + 19504, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19505, + 0, + 0, + 0, + 0, + 19506, + 19507, + 0, + 0, + 0, + 19508, + 0, + 0, + 19511, + 0, + 0, + 19514, + 0, + 19515, + 0, + 19516, + 0, + 19518, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19530, + 0, + 19537, + 19538, + 0, + 19543, + 19546, + 0, + 19547, + 19551, + 0, + 0, + 0, + 0, + 0, + 0, + 19552, + 19553, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19555, + 0, + 0, + 19556, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19560, + 19561, + 0, + 0, + 19562, + 0, + 0, + 0, + 0, + 0, + 0, + 19565, + 19567, + 0, + 19568, + 0, + 0, + 0, + 19569, + 19570, + 0, + 19578, + 0, + 0, + 0, + 0, + 19580, + 0, + 0, + 0, + 0, + 19581, + 19584, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19585, + 19586, + 0, + 0, + 0, + 19587, + 19588, + 0, + 19589, + 0, + 0, + 0, + 0, + 0, + 0, + 19592, + 19593, + 19599, + 0, + 19600, + 0, + 0, + 19604, + 0, + 0, + 19605, + 0, + 19606, + 19608, + 19610, + 0, + 19613, + 19614, + 0, + 0, + 0, + 0, + 0, + 0, + 19616, + 19617, + 0, + 0, + 19618, + 0, + 0, + 19619, + 0, + 0, + 0, + 19620, + 19621, + 19631, + 0, + 0, + 19632, + 19634, + 19636, + 0, + 19643, + 0, + 0, + 19644, + 19658, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19659, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19675, + 19677, + 0, + 0, + 0, + 0, + 19679, + 0, + 19683, + 0, + 19684, + 0, + 0, + 0, + 0, + 0, + 0, + 19687, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19688, + 19689, + 19692, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19695, + 19697, + 0, + 0, + 0, + 0, + 0, + 19698, + 19699, + 0, + 0, + 19700, + 0, + 19702, + 0, + 0, + 19703, + 0, + 0, + 0, + 0, + 0, + 0, + 19704, + 19708, + 0, + 19710, + 0, + 19713, + 0, + 0, + 0, + 19715, + 0, + 0, + 0, + 0, + 19718, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19720, + 0, + 19722, + 0, + 0, + 19725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19730, + 0, + 0, + 0, + 0, + 0, + 19731, + 0, + 19734, + 19735, + 19739, + 0, + 0, + 19740, + 0, + 19741, + 0, + 0, + 0, + 19746, + 0, + 0, + 19747, + 0, + 19771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19772, + 19775, + 0, + 0, + 0, + 0, + 0, + 0, + 19778, + 0, + 0, + 0, + 0, + 0, + 19779, + 0, + 0, + 19780, + 19790, + 0, + 19791, + 0, + 0, + 19792, + 0, + 0, + 0, + 19793, + 0, + 0, + 19796, + 19797, + 0, + 0, + 0, + 19799, + 0, + 0, + 0, + 19801, + 0, + 0, + 0, + 0, + 19803, + 0, + 19804, + 0, + 19805, + 0, + 0, + 19807, + 0, + 0, + 0, + 19808, + 0, + 0, + 0, + 0, + 0, + 0, + 19809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19816, + 0, + 19821, + 0, + 19822, + 19830, + 19831, + 0, + 0, + 0, + 19833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19838, + 0, + 0, + 0, + 0, + 19839, + 0, + 0, + 19843, + 0, + 0, + 0, + 0, + 19845, + 0, + 0, + 0, + 0, + 19847, + 0, + 0, + 19848, + 0, + 19849, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19851, + 0, + 0, + 0, + 19854, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19864, + 0, + 19865, + 0, + 19866, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19868, + 0, + 0, + 19870, + 0, + 0, + 19871, + 0, + 0, + 19872, + 19873, + 19875, + 0, + 19880, + 19882, + 19884, + 0, + 0, + 19885, + 19886, + 19888, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19890, + 19892, + 19893, + 0, + 0, + 19894, + 0, + 0, + 0, + 19895, + 0, + 19896, + 19902, + 0, + 0, + 19903, + 0, + 0, + 19905, + 0, + 0, + 0, + 19906, + 0, + 19908, + 0, + 19909, + 19911, + 0, + 0, + 0, + 19913, + 19920, + 0, + 19938, + 19939, + 19940, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 19942, + 0, + 19943, + 0, + 19945, + 0, + 0, + 0, + 19951, + 19952, + 19954, + 19960, + 0, + 19965, + 0, + 19971, + 0, + 0, + 0, + 0, + 0, + 19975, + 0, + 19976, + 0, + 19990, + 0, + 0, + 19991, + 0, + 19993, + 0, + 19995, + 0, + 0, + 0, + 19998, + 19999, + 20001, + 0, + 20003, + 20005, + 0, + 20011, + 20012, + 0, + 0, + 0, + 0, + 0, + 0, + 20014, + 0, + 20020, + 0, + 0, + 0, + 0, + 20021, + 0, + 0, + 0, + 0, + 0, + 20023, + 20024, + 0, + 0, + 0, + 0, + 0, + 20025, + 0, + 0, + 20027, + 0, + 0, + 20029, + 0, + 0, + 20032, + 0, + 0, + 0, + 0, + 20044, + 20045, + 0, + 20048, + 20049, + 0, + 0, + 20050, + 0, + 20052, + 0, + 0, + 20054, + 20057, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20059, + 0, + 0, + 20061, + 0, + 20062, + 0, + 20064, + 0, + 0, + 20066, + 0, + 0, + 20067, + 0, + 0, + 0, + 0, + 20069, + 0, + 0, + 0, + 0, + 0, + 0, + 20070, + 20071, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20072, + 0, + 0, + 20073, + 20074, + 0, + 0, + 0, + 0, + 0, + 20075, + 0, + 20078, + 0, + 0, + 0, + 0, + 20080, + 0, + 20081, + 0, + 0, + 0, + 0, + 0, + 0, + 20095, + 0, + 20098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20107, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20112, + 0, + 0, + 0, + 20113, + 20114, + 0, + 0, + 0, + 20115, + 20123, + 20124, + 0, + 0, + 0, + 20131, + 20133, + 20134, + 0, + 0, + 0, + 0, + 20136, + 0, + 0, + 20137, + 20138, + 20150, + 0, + 20152, + 0, + 0, + 0, + 20153, + 0, + 0, + 20154, + 0, + 0, + 0, + 20158, + 0, + 20163, + 0, + 0, + 20164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20166, + 0, + 20168, + 0, + 20170, + 0, + 20175, + 0, + 0, + 20178, + 0, + 0, + 0, + 0, + 20223, + 0, + 0, + 0, + 0, + 20224, + 0, + 20226, + 0, + 0, + 20230, + 0, + 20231, + 0, + 0, + 0, + 0, + 20232, + 0, + 0, + 20233, + 20234, + 0, + 20244, + 0, + 20247, + 0, + 0, + 0, + 0, + 0, + 0, + 20249, + 0, + 0, + 0, + 20250, + 0, + 0, + 0, + 0, + 20251, + 0, + 20253, + 0, + 20254, + 0, + 0, + 0, + 0, + 20256, + 0, + 0, + 20264, + 0, + 0, + 0, + 0, + 20266, + 0, + 0, + 0, + 20278, + 0, + 0, + 20279, + 20282, + 0, + 0, + 0, + 0, + 0, + 20283, + 0, + 20284, + 0, + 20285, + 0, + 20287, + 20290, + 0, + 0, + 0, + 0, + 20292, + 0, + 0, + 0, + 0, + 20293, + 20297, + 0, + 0, + 0, + 0, + 0, + 0, + 20299, + 0, + 20300, + 20303, + 0, + 0, + 0, + 0, + 0, + 0, + 20307, + 0, + 0, + 20308, + 0, + 20309, + 0, + 20310, + 0, + 0, + 0, + 0, + 0, + 0, + 20312, + 0, + 0, + 0, + 20314, + 0, + 0, + 0, + 0, + 20315, + 20316, + 0, + 20322, + 0, + 0, + 0, + 0, + 0, + 0, + 20339, + 0, + 0, + 0, + 20342, + 0, + 0, + 0, + 0, + 20352, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20362, + 0, + 0, + 20365, + 0, + 20375, + 20377, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20378, + 20379, + 0, + 20380, + 0, + 0, + 20381, + 0, + 20382, + 0, + 20383, + 0, + 20388, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20390, + 20392, + 20393, + 0, + 0, + 20395, + 0, + 0, + 0, + 0, + 0, + 20396, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20398, + 20415, + 0, + 0, + 0, + 20417, + 0, + 0, + 20420, + 0, + 0, + 20426, + 20428, + 0, + 20431, + 0, + 0, + 20432, + 0, + 20433, + 20434, + 20435, + 0, + 0, + 0, + 0, + 20440, + 0, + 0, + 0, + 0, + 0, + 20442, + 0, + 20443, + 0, + 20446, + 0, + 0, + 0, + 0, + 20448, + 0, + 20451, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20452, + 20453, + 0, + 0, + 20454, + 0, + 0, + 0, + 0, + 0, + 0, + 20457, + 0, + 20458, + 0, + 0, + 0, + 20465, + 0, + 0, + 0, + 0, + 0, + 20469, + 0, + 0, + 0, + 20473, + 0, + 20476, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20477, + 0, + 0, + 20485, + 0, + 0, + 20486, + 0, + 0, + 20487, + 0, + 20496, + 0, + 20497, + 0, + 0, + 20498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20499, + 20500, + 0, + 20501, + 0, + 0, + 0, + 0, + 0, + 20520, + 20527, + 0, + 20529, + 0, + 0, + 0, + 0, + 20539, + 0, + 0, + 20540, + 0, + 0, + 0, + 20543, + 0, + 0, + 0, + 20546, + 0, + 0, + 0, + 0, + 0, + 20548, + 0, + 0, + 20563, + 0, + 0, + 20564, + 0, + 20566, + 0, + 0, + 0, + 0, + 0, + 20589, + 0, + 0, + 0, + 0, + 20590, + 0, + 0, + 20593, + 20594, + 0, + 0, + 0, + 0, + 20595, + 0, + 20597, + 20598, + 0, + 0, + 0, + 20618, + 20620, + 0, + 0, + 0, + 0, + 20621, + 0, + 0, + 0, + 0, + 20627, + 0, + 0, + 0, + 0, + 0, + 20628, + 0, + 0, + 0, + 20629, + 0, + 20630, + 0, + 0, + 20639, + 0, + 0, + 0, + 0, + 0, + 20707, + 0, + 0, + 20709, + 0, + 0, + 0, + 20713, + 20714, + 0, + 0, + 0, + 0, + 0, + 20724, + 20725, + 0, + 0, + 0, + 0, + 20726, + 20728, + 20729, + 0, + 20733, + 0, + 20734, + 0, + 20735, + 20736, + 0, + 20737, + 0, + 0, + 20744, + 0, + 20745, + 0, + 20748, + 0, + 0, + 20749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20750, + 0, + 0, + 0, + 0, + 20754, + 0, + 0, + 0, + 20761, + 0, + 0, + 20763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20766, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20767, + 0, + 0, + 0, + 0, + 20768, + 0, + 20769, + 20777, + 0, + 0, + 0, + 0, + 0, + 0, + 20785, + 0, + 0, + 0, + 20786, + 20795, + 20801, + 0, + 20802, + 0, + 20807, + 0, + 0, + 20808, + 0, + 0, + 20810, + 0, + 0, + 20811, + 0, + 20812, + 0, + 0, + 0, + 0, + 0, + 20813, + 0, + 0, + 20818, + 20820, + 20821, + 0, + 0, + 0, + 20822, + 0, + 20823, + 0, + 0, + 0, + 20826, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20829, + 20830, + 20831, + 0, + 20832, + 20836, + 0, + 0, + 20839, + 0, + 0, + 20840, + 20842, + 0, + 20843, + 0, + 20844, + 0, + 20854, + 0, + 0, + 0, + 20855, + 0, + 0, + 0, + 0, + 20856, + 0, + 0, + 0, + 20869, + 0, + 0, + 20871, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20873, + 0, + 0, + 0, + 0, + 0, + 20876, + 0, + 0, + 0, + 0, + 0, + 20880, + 0, + 0, + 20882, + 0, + 0, + 0, + 0, + 20883, + 20884, + 0, + 0, + 20890, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 20891, + 0, + 0, + 0, + 0, + 0, + 20905, + 0, + 20906, + 20910, + 0, + 0, + 20912, + 20915, + 0, + 0, + 0, + 0, + 0, + 20916, + 0, + 20917, + 0, + 20919, + 20920, + 20922, + 0, + 20927, + 0, + 20928, + 20929, + 20930, + 0, + 0, + 20935, + 0, + 0, + 20939, + 0, + 0, + 20941, + 0, + 0, + 0, + 20943, + 0, + 0, + 0, + 20946, + 20947, + 0, + 0, + 0, + 0, + 0, + 20950, + 0, + 20954, + 0, + 0, + 20955, + 20964, + 0, + 0, + 20967, + 0, + 0, + 0, + 0, + 0, + 20973, + 20975, + 0, + 0, + 0, + 20984, + 0, + 20987, + 20988, + 0, + 0, + 0, + 0, + 0, + 20989, + 0, + 0, + 0, + 20995, + 0, + 20998, + 0, + 20999, + 0, + 0, + 0, + 0, + 21000, + 21001, + 0, + 0, + 0, + 0, + 21008, + 0, + 21010, + 0, + 21016, + 0, + 0, + 0, + 21017, + 21018, + 0, + 0, + 0, + 0, + 0, + 21021, + 21026, + 21027, + 21028, + 0, + 0, + 21029, + 0, + 0, + 0, + 0, + 0, + 21030, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21031, + 21032, + 0, + 0, + 0, + 0, + 0, + 21037, + 0, + 0, + 21038, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21039, + 0, + 21041, + 0, + 21046, + 21047, + 0, + 0, + 0, + 21049, + 21053, + 0, + 0, + 21057, + 21064, + 21065, + 0, + 0, + 21066, + 21067, + 0, + 0, + 0, + 21069, + 0, + 0, + 0, + 21071, + 21072, + 0, + 0, + 21073, + 0, + 21074, + 0, + 0, + 21078, + 0, + 0, + 0, + 0, + 21079, + 0, + 0, + 21080, + 21081, + 0, + 0, + 21086, + 21087, + 0, + 21089, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21091, + 0, + 21093, + 0, + 21094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21095, + 0, + 0, + 0, + 0, + 0, + 21096, + 0, + 21098, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21099, + 0, + 0, + 21100, + 21101, + 21102, + 0, + 0, + 0, + 0, + 0, + 21103, + 0, + 21104, + 0, + 0, + 0, + 0, + 0, + 21105, + 21108, + 21109, + 0, + 0, + 21112, + 21113, + 0, + 0, + 0, + 0, + 0, + 0, + 21115, + 21122, + 21123, + 0, + 0, + 0, + 0, + 0, + 21125, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21129, + 21131, + 0, + 0, + 21134, + 0, + 0, + 0, + 21137, + 21142, + 0, + 21143, + 0, + 0, + 21144, + 0, + 21145, + 21146, + 0, + 21152, + 21154, + 21155, + 21156, + 0, + 0, + 0, + 21160, + 0, + 0, + 0, + 0, + 0, + 0, + 21161, + 0, + 21164, + 0, + 21166, + 0, + 0, + 0, + 0, + 21170, + 0, + 0, + 0, + 0, + 21171, + 0, + 0, + 21172, + 0, + 21174, + 0, + 21175, + 0, + 0, + 0, + 0, + 0, + 21176, + 21179, + 21188, + 0, + 0, + 0, + 21189, + 0, + 0, + 21190, + 0, + 0, + 0, + 21192, + 0, + 0, + 21193, + 0, + 0, + 0, + 21198, + 0, + 21212, + 0, + 0, + 21213, + 0, + 0, + 0, + 0, + 0, + 0, + 21215, + 21216, + 0, + 0, + 21223, + 21225, + 0, + 21226, + 0, + 0, + 0, + 0, + 21227, + 21228, + 0, + 0, + 21229, + 0, + 0, + 0, + 0, + 21230, + 21236, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21237, + 0, + 0, + 21238, + 21239, + 0, + 0, + 0, + 0, + 21256, + 0, + 0, + 0, + 0, + 0, + 21257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21259, + 0, + 0, + 0, + 21263, + 0, + 21272, + 0, + 21274, + 0, + 21282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21283, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21294, + 0, + 0, + 21297, + 0, + 0, + 0, + 0, + 21298, + 0, + 0, + 0, + 21299, + 0, + 21300, + 21302, + 0, + 21316, + 0, + 21318, + 21322, + 21323, + 0, + 21324, + 0, + 21326, + 0, + 0, + 0, + 21327, + 21328, + 0, + 0, + 0, + 21352, + 0, + 0, + 21354, + 21361, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21362, + 0, + 0, + 0, + 21363, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21366, + 0, + 0, + 21367, + 21372, + 21374, + 0, + 0, + 0, + 21375, + 21377, + 0, + 21378, + 0, + 0, + 0, + 21380, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21381, + 0, + 0, + 0, + 0, + 0, + 0, + 21382, + 0, + 21383, + 0, + 0, + 21384, + 0, + 0, + 21385, + 0, + 0, + 0, + 0, + 21389, + 21390, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21397, + 21398, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21399, + 0, + 21400, + 0, + 0, + 0, + 0, + 21402, + 0, + 0, + 0, + 21403, + 21404, + 0, + 21405, + 21406, + 0, + 0, + 0, + 21407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21408, + 0, + 0, + 0, + 0, + 21409, + 0, + 21421, + 0, + 21422, + 0, + 0, + 0, + 21425, + 21428, + 0, + 0, + 0, + 0, + 21429, + 0, + 0, + 0, + 0, + 0, + 21433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21434, + 0, + 21443, + 0, + 21444, + 21449, + 0, + 21452, + 0, + 21453, + 21454, + 0, + 0, + 0, + 21457, + 0, + 0, + 21458, + 0, + 0, + 0, + 21460, + 21461, + 0, + 0, + 21464, + 0, + 0, + 0, + 21473, + 21478, + 0, + 0, + 21479, + 0, + 0, + 21481, + 21483, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21484, + 0, + 0, + 21485, + 21486, + 0, + 0, + 21488, + 0, + 0, + 0, + 0, + 0, + 0, + 21523, + 0, + 0, + 21525, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21526, + 0, + 0, + 0, + 0, + 0, + 0, + 21529, + 21530, + 0, + 0, + 21531, + 0, + 0, + 21533, + 0, + 0, + 21539, + 21564, + 0, + 21567, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21575, + 0, + 0, + 0, + 0, + 21577, + 0, + 0, + 0, + 0, + 0, + 21591, + 0, + 0, + 21604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21605, + 0, + 21606, + 0, + 0, + 21617, + 21618, + 21619, + 21620, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21623, + 0, + 0, + 0, + 0, + 21631, + 0, + 21635, + 0, + 0, + 0, + 0, + 21639, + 21646, + 21653, + 21662, + 0, + 0, + 21663, + 21664, + 0, + 21666, + 0, + 0, + 21667, + 0, + 21670, + 21672, + 21673, + 0, + 21674, + 21683, + 0, + 0, + 0, + 0, + 0, + 21684, + 0, + 21694, + 0, + 0, + 0, + 0, + 21695, + 21700, + 0, + 21703, + 0, + 21704, + 0, + 0, + 21709, + 0, + 0, + 0, + 21710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21711, + 0, + 0, + 0, + 21712, + 0, + 21717, + 0, + 21730, + 0, + 0, + 0, + 21731, + 21733, + 0, + 0, + 0, + 0, + 21737, + 21741, + 21742, + 0, + 21747, + 0, + 0, + 0, + 21749, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21750, + 0, + 0, + 0, + 0, + 0, + 21752, + 0, + 0, + 0, + 0, + 21753, + 0, + 0, + 0, + 0, + 0, + 0, + 21755, + 21756, + 0, + 21757, + 0, + 0, + 0, + 0, + 0, + 0, + 21760, + 0, + 0, + 21763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21764, + 0, + 0, + 21766, + 0, + 0, + 21767, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21773, + 0, + 21774, + 0, + 0, + 21775, + 0, + 0, + 0, + 0, + 21776, + 0, + 0, + 21777, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21780, + 21787, + 21788, + 21791, + 0, + 0, + 0, + 21797, + 0, + 0, + 0, + 0, + 0, + 21805, + 0, + 0, + 0, + 0, + 21806, + 0, + 21807, + 21809, + 0, + 21810, + 21811, + 0, + 21817, + 21819, + 21820, + 0, + 21823, + 0, + 21824, + 0, + 0, + 21825, + 0, + 0, + 21826, + 21832, + 0, + 0, + 0, + 0, + 0, + 21833, + 21848, + 21849, + 0, + 0, + 21867, + 21870, + 21871, + 21873, + 0, + 0, + 0, + 21874, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21875, + 0, + 21878, + 0, + 0, + 0, + 21879, + 0, + 21881, + 21886, + 0, + 0, + 0, + 0, + 21887, + 0, + 0, + 21888, + 21894, + 21895, + 21897, + 0, + 21901, + 0, + 21904, + 0, + 0, + 21906, + 0, + 0, + 0, + 21909, + 21910, + 21911, + 0, + 0, + 21912, + 0, + 0, + 21913, + 21914, + 21915, + 0, + 21919, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21921, + 0, + 0, + 21922, + 21933, + 21939, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21944, + 0, + 0, + 0, + 0, + 0, + 21945, + 0, + 21947, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21949, + 0, + 0, + 0, + 21950, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21951, + 0, + 21952, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21954, + 21957, + 0, + 0, + 0, + 0, + 21958, + 0, + 21959, + 0, + 0, + 0, + 0, + 0, + 0, + 21962, + 21963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 21964, + 21965, + 0, + 0, + 21969, + 21970, + 0, + 0, + 0, + 21974, + 0, + 0, + 21980, + 21981, + 0, + 21982, + 0, + 0, + 0, + 0, + 0, + 21985, + 0, + 21988, + 0, + 21992, + 0, + 21999, + 0, + 0, + 0, + 0, + 0, + 0, + 22001, + 0, + 22002, + 0, + 0, + 0, + 0, + 0, + 0, + 22003, + 0, + 0, + 0, + 0, + 0, + 22004, + 0, + 0, + 0, + 22008, + 0, + 22009, + 22015, + 0, + 0, + 22016, + 0, + 0, + 0, + 22017, + 22019, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22020, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22021, + 22037, + 0, + 22039, + 0, + 0, + 0, + 22040, + 0, + 0, + 0, + 22048, + 22049, + 0, + 0, + 22053, + 22055, + 22056, + 22059, + 0, + 0, + 22060, + 22061, + 0, + 0, + 22064, + 0, + 0, + 0, + 0, + 22066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22073, + 0, + 0, + 0, + 22074, + 22075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22076, + 0, + 0, + 0, + 0, + 22077, + 22084, + 22099, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22104, + 0, + 0, + 22107, + 0, + 22108, + 0, + 22109, + 0, + 22110, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22111, + 22119, + 0, + 22120, + 22122, + 0, + 0, + 0, + 0, + 22125, + 0, + 0, + 0, + 22128, + 22129, + 0, + 0, + 0, + 0, + 0, + 0, + 22141, + 0, + 0, + 0, + 22142, + 0, + 0, + 22144, + 22146, + 0, + 22148, + 22149, + 22151, + 22154, + 0, + 0, + 0, + 22162, + 0, + 0, + 0, + 0, + 22164, + 22177, + 0, + 0, + 0, + 0, + 22179, + 0, + 22182, + 22183, + 0, + 0, + 22184, + 22188, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22190, + 0, + 22194, + 22201, + 0, + 0, + 22208, + 0, + 22209, + 0, + 22212, + 0, + 0, + 22215, + 0, + 22223, + 22231, + 0, + 0, + 22232, + 0, + 22234, + 0, + 0, + 22235, + 22236, + 0, + 22237, + 0, + 22240, + 0, + 0, + 0, + 0, + 0, + 22241, + 0, + 0, + 0, + 22242, + 22246, + 22247, + 0, + 0, + 0, + 22259, + 22268, + 0, + 22269, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22270, + 0, + 0, + 0, + 0, + 22271, + 0, + 22272, + 0, + 22277, + 0, + 0, + 0, + 0, + 0, + 22278, + 22280, + 22283, + 22286, + 0, + 0, + 22287, + 22289, + 0, + 0, + 22290, + 0, + 22293, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22295, + 0, + 22301, + 22302, + 0, + 0, + 0, + 22305, + 0, + 22308, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22315, + 0, + 0, + 0, + 22317, + 0, + 22334, + 0, + 0, + 0, + 22335, + 0, + 0, + 0, + 0, + 0, + 22336, + 0, + 22338, + 22344, + 0, + 22347, + 22349, + 0, + 22350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22357, + 0, + 0, + 0, + 0, + 0, + 22358, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22359, + 22360, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22361, + 22366, + 0, + 0, + 22369, + 0, + 22370, + 22373, + 0, + 0, + 0, + 0, + 0, + 22375, + 0, + 22377, + 0, + 0, + 0, + 0, + 0, + 22378, + 0, + 0, + 0, + 0, + 22381, + 0, + 0, + 0, + 0, + 22382, + 0, + 22383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22391, + 0, + 0, + 22392, + 22395, + 22396, + 22402, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22405, + 0, + 0, + 22406, + 0, + 0, + 22408, + 0, + 0, + 22409, + 22410, + 0, + 0, + 0, + 0, + 0, + 0, + 22424, + 0, + 0, + 0, + 0, + 22426, + 0, + 0, + 0, + 22427, + 0, + 22428, + 0, + 22432, + 0, + 22435, + 22442, + 22443, + 0, + 0, + 0, + 0, + 22444, + 0, + 0, + 0, + 0, + 0, + 22446, + 0, + 22454, + 0, + 22455, + 0, + 0, + 0, + 22465, + 0, + 22470, + 0, + 22471, + 0, + 0, + 0, + 0, + 22472, + 22473, + 0, + 22487, + 0, + 0, + 0, + 22488, + 0, + 0, + 0, + 0, + 22489, + 0, + 0, + 22499, + 0, + 0, + 0, + 0, + 0, + 0, + 22514, + 0, + 0, + 22515, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22516, + 0, + 0, + 0, + 22517, + 22520, + 0, + 0, + 0, + 22534, + 0, + 0, + 22535, + 0, + 0, + 22536, + 0, + 22540, + 22553, + 0, + 22555, + 0, + 0, + 0, + 0, + 22561, + 0, + 0, + 22562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22566, + 0, + 0, + 0, + 0, + 22567, + 22568, + 0, + 0, + 22575, + 0, + 22579, + 0, + 22582, + 22583, + 22585, + 0, + 0, + 0, + 0, + 0, + 22586, + 0, + 0, + 22587, + 0, + 0, + 22590, + 0, + 0, + 0, + 0, + 0, + 22591, + 0, + 22592, + 0, + 0, + 0, + 0, + 0, + 22593, + 0, + 22602, + 0, + 0, + 22604, + 0, + 0, + 22609, + 0, + 0, + 22618, + 0, + 0, + 0, + 0, + 0, + 0, + 22619, + 0, + 22624, + 22625, + 0, + 0, + 22638, + 0, + 0, + 0, + 0, + 0, + 22639, + 0, + 0, + 22640, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22644, + 0, + 22645, + 22647, + 0, + 0, + 0, + 0, + 22652, + 22653, + 0, + 0, + 0, + 22654, + 0, + 22655, + 0, + 0, + 0, + 22656, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22673, + 22675, + 22676, + 0, + 0, + 22678, + 22679, + 0, + 22691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22693, + 0, + 0, + 22696, + 0, + 22699, + 22707, + 22708, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22718, + 0, + 22719, + 0, + 0, + 0, + 0, + 22723, + 0, + 0, + 0, + 22724, + 22725, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22726, + 22728, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22729, + 0, + 0, + 22731, + 0, + 0, + 0, + 0, + 22732, + 22735, + 22736, + 0, + 0, + 0, + 0, + 22739, + 0, + 22749, + 0, + 0, + 22751, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22758, + 0, + 0, + 0, + 0, + 0, + 22760, + 0, + 0, + 0, + 0, + 0, + 22764, + 22765, + 22766, + 0, + 22768, + 0, + 0, + 0, + 0, + 0, + 22769, + 22770, + 0, + 0, + 0, + 0, + 0, + 0, + 22771, + 0, + 0, + 22772, + 22775, + 0, + 22776, + 22777, + 22780, + 0, + 0, + 22782, + 22784, + 0, + 22787, + 0, + 22789, + 22796, + 0, + 0, + 0, + 0, + 0, + 22798, + 0, + 0, + 0, + 0, + 0, + 0, + 22802, + 0, + 22803, + 22804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22805, + 0, + 0, + 22810, + 22811, + 22814, + 22816, + 0, + 22825, + 22826, + 0, + 22831, + 22833, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22834, + 0, + 22836, + 22838, + 0, + 22839, + 0, + 0, + 0, + 0, + 0, + 22840, + 0, + 22847, + 0, + 0, + 0, + 0, + 0, + 22856, + 22857, + 0, + 22858, + 22859, + 0, + 0, + 22862, + 0, + 0, + 22864, + 0, + 0, + 0, + 0, + 22865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22866, + 0, + 22867, + 22868, + 0, + 0, + 0, + 0, + 22869, + 0, + 22871, + 0, + 22872, + 0, + 22873, + 22881, + 22882, + 22884, + 22885, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22886, + 22887, + 0, + 22894, + 0, + 22895, + 0, + 0, + 0, + 22900, + 0, + 22901, + 0, + 0, + 0, + 0, + 22904, + 0, + 0, + 0, + 0, + 22905, + 22907, + 0, + 0, + 0, + 22915, + 22917, + 0, + 0, + 22918, + 0, + 0, + 0, + 22920, + 0, + 0, + 0, + 22929, + 22930, + 0, + 0, + 0, + 22941, + 22942, + 0, + 0, + 0, + 22943, + 0, + 0, + 0, + 22944, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22946, + 0, + 22947, + 0, + 0, + 22954, + 0, + 22956, + 0, + 0, + 22962, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22963, + 0, + 0, + 22964, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 22965, + 0, + 22968, + 0, + 0, + 0, + 22969, + 0, + 0, + 0, + 0, + 0, + 22970, + 0, + 22971, + 0, + 0, + 0, + 0, + 0, + 22978, + 0, + 0, + 22979, + 0, + 22987, + 0, + 0, + 22989, + 0, + 0, + 0, + 0, + 0, + 0, + 22990, + 0, + 23005, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23006, + 23007, + 23008, + 0, + 0, + 23023, + 23024, + 23029, + 0, + 0, + 0, + 0, + 23030, + 0, + 0, + 0, + 0, + 0, + 23032, + 0, + 0, + 0, + 0, + 0, + 23035, + 0, + 0, + 0, + 0, + 23038, + 0, + 0, + 0, + 23048, + 0, + 23049, + 23052, + 23053, + 23060, + 23061, + 0, + 23063, + 0, + 0, + 0, + 0, + 23067, + 23068, + 0, + 0, + 0, + 23069, + 23073, + 0, + 0, + 0, + 23127, + 0, + 23128, + 0, + 0, + 0, + 0, + 0, + 23129, + 0, + 23138, + 23141, + 0, + 23149, + 0, + 0, + 23150, + 0, + 0, + 0, + 23152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23154, + 0, + 0, + 0, + 0, + 23157, + 23159, + 23160, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23180, + 0, + 0, + 0, + 0, + 23181, + 0, + 0, + 23188, + 0, + 23189, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23195, + 0, + 0, + 23196, + 23199, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23202, + 0, + 23204, + 0, + 23207, + 0, + 23209, + 23210, + 0, + 0, + 0, + 0, + 0, + 0, + 23227, + 23229, + 0, + 0, + 23230, + 23234, + 23238, + 0, + 0, + 0, + 23245, + 23246, + 23248, + 0, + 0, + 0, + 0, + 23249, + 23254, + 0, + 0, + 0, + 23265, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23268, + 0, + 23276, + 0, + 0, + 0, + 0, + 23277, + 0, + 23297, + 0, + 23298, + 0, + 0, + 0, + 0, + 23299, + 0, + 23302, + 0, + 0, + 23303, + 23312, + 0, + 0, + 23314, + 0, + 23320, + 0, + 0, + 0, + 0, + 23324, + 0, + 23325, + 0, + 23328, + 0, + 23334, + 0, + 0, + 0, + 23337, + 0, + 0, + 0, + 0, + 23343, + 23344, + 23346, + 0, + 23348, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23353, + 0, + 0, + 0, + 0, + 23355, + 0, + 23356, + 23358, + 0, + 0, + 0, + 23359, + 23360, + 0, + 23361, + 0, + 23367, + 0, + 23369, + 0, + 0, + 23373, + 0, + 23378, + 23379, + 0, + 23382, + 23383, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23387, + 0, + 0, + 0, + 0, + 0, + 0, + 23388, + 23390, + 0, + 0, + 23393, + 23398, + 0, + 0, + 0, + 23399, + 0, + 0, + 0, + 23400, + 0, + 0, + 0, + 0, + 23401, + 0, + 0, + 0, + 23415, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23416, + 0, + 23422, + 0, + 23443, + 23444, + 0, + 0, + 0, + 0, + 23448, + 0, + 23454, + 0, + 0, + 0, + 0, + 0, + 0, + 23456, + 0, + 0, + 23458, + 23464, + 0, + 0, + 0, + 0, + 0, + 0, + 23465, + 0, + 0, + 0, + 23470, + 23471, + 0, + 0, + 23472, + 0, + 0, + 0, + 23473, + 23496, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23497, + 0, + 23499, + 0, + 0, + 23502, + 0, + 0, + 23503, + 0, + 0, + 23513, + 0, + 0, + 23515, + 0, + 0, + 0, + 23517, + 0, + 0, + 0, + 0, + 23518, + 23519, + 23521, + 23524, + 0, + 23525, + 23528, + 23539, + 0, + 0, + 0, + 0, + 0, + 23541, + 0, + 0, + 23544, + 0, + 0, + 23556, + 0, + 0, + 23557, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23559, + 0, + 23560, + 0, + 0, + 23561, + 0, + 0, + 23566, + 0, + 0, + 0, + 0, + 0, + 23568, + 23569, + 23570, + 0, + 0, + 0, + 0, + 23571, + 0, + 23574, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23575, + 0, + 23579, + 0, + 0, + 23581, + 0, + 0, + 0, + 0, + 0, + 0, + 23587, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23596, + 23598, + 0, + 0, + 0, + 0, + 23602, + 23606, + 0, + 0, + 23607, + 0, + 23608, + 0, + 0, + 0, + 23614, + 23616, + 0, + 0, + 0, + 0, + 0, + 23618, + 0, + 0, + 23619, + 0, + 0, + 0, + 0, + 23621, + 23626, + 0, + 23627, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23629, + 0, + 23630, + 0, + 0, + 0, + 0, + 23634, + 0, + 23636, + 0, + 0, + 0, + 0, + 0, + 0, + 23638, + 0, + 0, + 0, + 0, + 23640, + 23667, + 0, + 23669, + 0, + 0, + 0, + 23681, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23682, + 0, + 23683, + 0, + 0, + 0, + 0, + 0, + 23684, + 0, + 0, + 0, + 23685, + 23689, + 0, + 23693, + 23694, + 23700, + 0, + 23702, + 0, + 23709, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23712, + 0, + 0, + 0, + 0, + 0, + 23714, + 0, + 0, + 23715, + 0, + 0, + 0, + 0, + 23718, + 0, + 0, + 23720, + 0, + 0, + 0, + 0, + 23722, + 0, + 0, + 0, + 23726, + 23729, + 0, + 23741, + 23746, + 0, + 23748, + 0, + 0, + 0, + 0, + 23749, + 0, + 0, + 0, + 0, + 0, + 23750, + 0, + 0, + 0, + 0, + 23751, + 0, + 23753, + 0, + 0, + 0, + 0, + 23757, + 23765, + 0, + 0, + 0, + 23770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23771, + 0, + 23772, + 23781, + 0, + 0, + 23796, + 0, + 0, + 0, + 0, + 23798, + 0, + 23799, + 0, + 0, + 0, + 23802, + 0, + 0, + 23806, + 0, + 23807, + 0, + 0, + 23808, + 0, + 23809, + 0, + 23819, + 0, + 0, + 0, + 23821, + 0, + 23827, + 0, + 0, + 0, + 23829, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23830, + 0, + 0, + 0, + 0, + 0, + 0, + 23832, + 23833, + 23834, + 23835, + 0, + 0, + 0, + 0, + 23837, + 23838, + 0, + 0, + 0, + 0, + 0, + 23846, + 0, + 0, + 0, + 0, + 0, + 0, + 23847, + 0, + 0, + 0, + 0, + 0, + 23879, + 23881, + 0, + 0, + 23882, + 23883, + 23895, + 0, + 23899, + 0, + 0, + 0, + 0, + 23901, + 0, + 0, + 0, + 0, + 0, + 0, + 23902, + 0, + 0, + 0, + 0, + 0, + 23903, + 23905, + 0, + 23906, + 0, + 23907, + 23918, + 23919, + 23920, + 0, + 23922, + 0, + 23924, + 0, + 23927, + 0, + 23934, + 0, + 23937, + 23941, + 0, + 23942, + 23946, + 0, + 0, + 0, + 0, + 0, + 23955, + 23956, + 23958, + 0, + 0, + 0, + 0, + 0, + 0, + 23959, + 0, + 23962, + 23965, + 0, + 23966, + 0, + 0, + 0, + 0, + 23967, + 23968, + 0, + 0, + 23973, + 0, + 0, + 23974, + 0, + 0, + 0, + 0, + 23975, + 0, + 23976, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23977, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23980, + 0, + 0, + 23984, + 0, + 23985, + 0, + 0, + 23987, + 0, + 0, + 23988, + 23990, + 23991, + 0, + 0, + 0, + 0, + 0, + 0, + 23992, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23994, + 0, + 0, + 0, + 23998, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 23999, + 0, + 0, + 24003, + 0, + 24004, + 0, + 24006, + 0, + 0, + 0, + 24007, + 0, + 0, + 24008, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24009, + 0, + 0, + 24010, + 0, + 0, + 24011, + 0, + 0, + 24013, + 24014, + 0, + 0, + 24015, + 24016, + 24027, + 0, + 24028, + 24029, + 0, + 24030, + 0, + 0, + 0, + 0, + 0, + 24033, + 24034, + 0, + 24035, + 0, + 0, + 24036, + 0, + 0, + 24044, + 0, + 24048, + 24049, + 24063, + 24067, + 0, + 24068, + 24070, + 0, + 0, + 24071, + 24078, + 24087, + 0, + 24090, + 0, + 0, + 0, + 24095, + 0, + 24098, + 24101, + 24104, + 24106, + 0, + 24107, + 0, + 0, + 0, + 24108, + 0, + 0, + 0, + 0, + 24110, + 24111, + 0, + 24113, + 0, + 0, + 24115, + 24120, + 0, + 0, + 0, + 0, + 0, + 0, + 24124, + 0, + 24125, + 0, + 24126, + 0, + 24127, + 0, + 0, + 0, + 0, + 0, + 24135, + 0, + 0, + 24136, + 0, + 24137, + 24142, + 0, + 0, + 0, + 24146, + 0, + 0, + 24147, + 24149, + 24154, + 0, + 24163, + 0, + 0, + 0, + 24165, + 24166, + 24167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24169, + 24170, + 24175, + 0, + 0, + 0, + 24178, + 0, + 0, + 24179, + 0, + 0, + 24181, + 0, + 24184, + 24197, + 0, + 24201, + 24204, + 0, + 0, + 0, + 0, + 0, + 0, + 24206, + 24212, + 24220, + 0, + 0, + 0, + 24224, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24226, + 0, + 24234, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24235, + 0, + 24236, + 0, + 0, + 0, + 0, + 0, + 24239, + 24240, + 24241, + 0, + 0, + 24248, + 0, + 0, + 24249, + 0, + 24251, + 0, + 0, + 0, + 0, + 0, + 0, + 24253, + 0, + 24268, + 0, + 0, + 0, + 24269, + 0, + 24271, + 24272, + 0, + 0, + 0, + 0, + 24273, + 0, + 0, + 24274, + 0, + 0, + 24279, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24280, + 0, + 24293, + 24294, + 0, + 0, + 0, + 0, + 0, + 0, + 24296, + 0, + 0, + 24323, + 0, + 0, + 0, + 24329, + 24330, + 24331, + 24339, + 0, + 24351, + 0, + 0, + 24369, + 24370, + 0, + 0, + 0, + 24371, + 0, + 0, + 0, + 0, + 24372, + 24373, + 24374, + 0, + 0, + 0, + 0, + 0, + 24378, + 0, + 0, + 0, + 0, + 24379, + 0, + 24381, + 0, + 24383, + 24389, + 0, + 24390, + 0, + 0, + 24394, + 24395, + 24400, + 0, + 0, + 0, + 24401, + 24402, + 0, + 24406, + 0, + 0, + 0, + 24411, + 0, + 0, + 0, + 24415, + 0, + 24416, + 0, + 0, + 0, + 0, + 0, + 24417, + 0, + 24419, + 0, + 24422, + 0, + 24423, + 24428, + 0, + 24435, + 0, + 0, + 0, + 24439, + 0, + 0, + 0, + 24440, + 24442, + 24446, + 0, + 0, + 0, + 24447, + 24448, + 24449, + 24452, + 0, + 0, + 0, + 0, + 24453, + 24457, + 0, + 0, + 24458, + 24459, + 24460, + 0, + 24465, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24471, + 0, + 24473, + 24474, + 24475, + 24476, + 0, + 24478, + 0, + 0, + 0, + 0, + 24480, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24481, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24482, + 24485, + 0, + 0, + 0, + 0, + 24486, + 0, + 0, + 0, + 24488, + 0, + 0, + 0, + 24494, + 0, + 0, + 0, + 0, + 24497, + 0, + 0, + 24498, + 0, + 0, + 0, + 24499, + 24506, + 0, + 0, + 0, + 24507, + 0, + 0, + 24511, + 0, + 0, + 24513, + 24514, + 0, + 0, + 0, + 0, + 0, + 24517, + 0, + 24518, + 0, + 24520, + 0, + 24521, + 24524, + 24525, + 0, + 0, + 0, + 0, + 0, + 24527, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24528, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24537, + 24539, + 0, + 24540, + 0, + 0, + 0, + 24548, + 0, + 0, + 0, + 0, + 0, + 24549, + 24550, + 0, + 0, + 0, + 24553, + 24554, + 0, + 24555, + 0, + 24556, + 0, + 24558, + 0, + 0, + 0, + 0, + 0, + 24560, + 0, + 0, + 0, + 24561, + 0, + 0, + 0, + 0, + 0, + 24562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24567, + 0, + 0, + 0, + 0, + 0, + 24569, + 0, + 0, + 0, + 24574, + 0, + 24575, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24577, + 24581, + 0, + 24584, + 0, + 0, + 0, + 0, + 0, + 24585, + 0, + 0, + 0, + 0, + 0, + 24586, + 0, + 0, + 24587, + 0, + 24588, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24590, + 24591, + 0, + 0, + 0, + 0, + 24592, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24594, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24596, + 24597, + 0, + 0, + 0, + 0, + 24602, + 24603, + 0, + 0, + 0, + 0, + 24604, + 0, + 0, + 24605, + 0, + 24610, + 0, + 0, + 24611, + 0, + 0, + 0, + 0, + 24612, + 24615, + 24616, + 24624, + 0, + 0, + 0, + 24627, + 0, + 24638, + 24639, + 0, + 0, + 0, + 0, + 24640, + 0, + 0, + 0, + 24655, + 24656, + 24657, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24662, + 0, + 24663, + 24664, + 0, + 0, + 0, + 0, + 0, + 24665, + 0, + 0, + 0, + 0, + 24667, + 0, + 0, + 0, + 0, + 0, + 0, + 24668, + 24669, + 0, + 24670, + 24674, + 0, + 0, + 0, + 24675, + 0, + 24678, + 0, + 0, + 24679, + 0, + 0, + 0, + 24681, + 0, + 24683, + 0, + 0, + 0, + 0, + 24684, + 0, + 24685, + 0, + 0, + 24686, + 0, + 0, + 24688, + 24689, + 0, + 0, + 0, + 0, + 24690, + 24691, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24697, + 0, + 24698, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24709, + 0, + 0, + 0, + 0, + 0, + 24710, + 0, + 24712, + 0, + 0, + 0, + 0, + 0, + 0, + 24713, + 24714, + 0, + 24715, + 0, + 24716, + 24718, + 0, + 24719, + 0, + 0, + 0, + 0, + 24720, + 0, + 0, + 24725, + 0, + 0, + 24738, + 0, + 24749, + 24750, + 0, + 0, + 0, + 24752, + 0, + 0, + 0, + 24753, + 0, + 0, + 0, + 24758, + 0, + 0, + 0, + 0, + 0, + 24762, + 0, + 24763, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24764, + 0, + 0, + 0, + 0, + 0, + 24765, + 24767, + 24768, + 0, + 24772, + 0, + 0, + 0, + 0, + 24773, + 0, + 0, + 0, + 0, + 24777, + 0, + 0, + 0, + 0, + 0, + 24785, + 0, + 24786, + 24788, + 0, + 0, + 0, + 24789, + 0, + 0, + 0, + 0, + 24794, + 24798, + 0, + 24799, + 24800, + 0, + 0, + 0, + 24803, + 0, + 24804, + 24806, + 0, + 24807, + 0, + 0, + 0, + 24810, + 0, + 0, + 0, + 0, + 0, + 0, + 24827, + 24828, + 0, + 24835, + 0, + 0, + 0, + 0, + 0, + 0, + 24836, + 0, + 0, + 0, + 0, + 0, + 24839, + 0, + 24843, + 24844, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24847, + 0, + 0, + 24848, + 0, + 0, + 0, + 0, + 0, + 0, + 24849, + 0, + 24850, + 24851, + 0, + 0, + 0, + 24852, + 0, + 24853, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24854, + 0, + 24855, + 0, + 0, + 24868, + 0, + 0, + 0, + 24883, + 0, + 0, + 0, + 24884, + 0, + 24895, + 24897, + 0, + 0, + 0, + 0, + 0, + 24899, + 0, + 0, + 0, + 0, + 0, + 24900, + 0, + 24913, + 0, + 0, + 0, + 0, + 0, + 0, + 24914, + 0, + 0, + 24917, + 24930, + 24931, + 0, + 0, + 0, + 24932, + 0, + 0, + 24939, + 0, + 0, + 24942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 24945, + 24950, + 0, + 24951, + 0, + 0, + 24953, + 0, + 0, + 0, + 24954, + 0, + 24959, + 0, + 0, + 0, + 24961, + 0, + 0, + 24962, + 0, + 24964, + 24968, + 24970, + 24972, + 0, + 0, + 0, + 0, + 0, + 24976, + 0, + 0, + 0, + 24977, + 0, + 24982, + 0, + 0, + 24983, + 0, + 0, + 24984, + 0, + 0, + 0, + 24993, + 0, + 0, + 0, + 24994, + 0, + 0, + 25001, + 0, + 0, + 0, + 25003, + 0, + 0, + 25018, + 0, + 0, + 25023, + 0, + 0, + 0, + 25034, + 0, + 0, + 25035, + 25036, + 0, + 25037, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25039, + 0, + 0, + 0, + 0, + 0, + 25040, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25042, + 0, + 0, + 25043, + 25045, + 0, + 0, + 0, + 0, + 0, + 0, + 25049, + 0, + 0, + 25051, + 0, + 25052, + 25053, + 0, + 0, + 25054, + 0, + 0, + 0, + 25055, + 0, + 0, + 0, + 0, + 25057, + 25059, + 0, + 0, + 25060, + 25064, + 0, + 25065, + 25069, + 25070, + 0, + 0, + 0, + 0, + 25072, + 0, + 25073, + 0, + 25090, + 0, + 0, + 25092, + 25093, + 25101, + 0, + 0, + 0, + 0, + 0, + 0, + 25105, + 25108, + 0, + 0, + 25113, + 0, + 0, + 25115, + 25116, + 0, + 0, + 0, + 0, + 0, + 0, + 25117, + 0, + 0, + 0, + 25120, + 25121, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25125, + 0, + 0, + 0, + 25126, + 0, + 25130, + 25134, + 0, + 25139, + 0, + 25143, + 0, + 0, + 0, + 25151, + 0, + 25161, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25163, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25174, + 0, + 25175, + 0, + 25207, + 0, + 0, + 0, + 25209, + 0, + 0, + 0, + 0, + 25213, + 0, + 25219, + 0, + 25223, + 0, + 25225, + 0, + 0, + 0, + 25227, + 0, + 0, + 0, + 25228, + 0, + 0, + 0, + 25229, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25231, + 25233, + 0, + 0, + 0, + 0, + 25237, + 25239, + 0, + 0, + 0, + 25243, + 0, + 0, + 0, + 25252, + 0, + 25257, + 25258, + 0, + 0, + 0, + 0, + 25260, + 25265, + 0, + 25268, + 0, + 0, + 25273, + 25324, + 0, + 25325, + 0, + 25326, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25327, + 0, + 0, + 0, + 0, + 0, + 25328, + 0, + 0, + 0, + 0, + 0, + 0, + 25332, + 0, + 0, + 0, + 25333, + 0, + 0, + 0, + 25336, + 25337, + 25338, + 0, + 0, + 25343, + 0, + 25350, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25352, + 0, + 25354, + 0, + 25375, + 0, + 25379, + 0, + 0, + 0, + 0, + 25384, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25386, + 0, + 25388, + 0, + 25390, + 0, + 0, + 25399, + 0, + 0, + 25401, + 0, + 0, + 0, + 25402, + 0, + 0, + 0, + 25407, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25413, + 25415, + 0, + 0, + 25417, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25419, + 0, + 0, + 0, + 25421, + 0, + 0, + 0, + 25424, + 0, + 0, + 0, + 0, + 25433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25435, + 0, + 0, + 0, + 0, + 0, + 0, + 25436, + 0, + 0, + 0, + 25437, + 0, + 0, + 25440, + 0, + 0, + 0, + 0, + 0, + 0, + 25442, + 0, + 0, + 25443, + 0, + 25446, + 0, + 0, + 25449, + 0, + 0, + 0, + 25450, + 0, + 0, + 0, + 0, + 25452, + 0, + 25453, + 25454, + 25455, + 0, + 0, + 0, + 25456, + 0, + 25457, + 0, + 0, + 0, + 25459, + 0, + 25461, + 0, + 25468, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25469, + 0, + 0, + 0, + 0, + 0, + 25471, + 0, + 0, + 0, + 0, + 0, + 25474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25475, + 0, + 0, + 0, + 0, + 25477, + 0, + 0, + 0, + 0, + 25483, + 0, + 0, + 0, + 0, + 0, + 25484, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25485, + 0, + 25497, + 0, + 0, + 25498, + 0, + 25504, + 0, + 25510, + 0, + 25512, + 0, + 0, + 25513, + 25514, + 0, + 0, + 0, + 0, + 0, + 0, + 25517, + 25518, + 25519, + 0, + 25520, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25521, + 0, + 25522, + 25527, + 25534, + 0, + 25536, + 0, + 25537, + 0, + 0, + 25548, + 25550, + 0, + 0, + 25551, + 0, + 25552, + 0, + 0, + 0, + 0, + 0, + 25554, + 0, + 25555, + 0, + 25556, + 25557, + 25568, + 0, + 0, + 0, + 25570, + 25571, + 0, + 0, + 0, + 0, + 0, + 0, + 25574, + 0, + 0, + 0, + 0, + 25579, + 0, + 0, + 0, + 25581, + 0, + 0, + 0, + 25582, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25588, + 0, + 0, + 0, + 0, + 25589, + 0, + 0, + 0, + 0, + 25590, + 0, + 25591, + 25592, + 25593, + 0, + 25594, + 0, + 0, + 0, + 25596, + 0, + 25597, + 25615, + 0, + 0, + 0, + 0, + 0, + 25618, + 0, + 0, + 0, + 0, + 25619, + 25623, + 0, + 0, + 25629, + 0, + 0, + 25631, + 0, + 0, + 0, + 25635, + 25636, + 0, + 0, + 25649, + 0, + 0, + 0, + 0, + 25654, + 0, + 0, + 0, + 25661, + 25663, + 0, + 0, + 25671, + 0, + 0, + 25678, + 25698, + 0, + 25699, + 25702, + 25703, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25704, + 0, + 0, + 0, + 0, + 0, + 25706, + 0, + 0, + 25710, + 0, + 25711, + 0, + 25712, + 0, + 25715, + 25716, + 25717, + 0, + 0, + 25718, + 25728, + 25732, + 0, + 0, + 0, + 25734, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25737, + 0, + 0, + 25739, + 0, + 0, + 0, + 25740, + 0, + 25741, + 25745, + 0, + 25746, + 0, + 25748, + 25772, + 25778, + 0, + 0, + 0, + 0, + 0, + 25780, + 0, + 0, + 0, + 0, + 25781, + 0, + 25782, + 25784, + 25785, + 0, + 0, + 0, + 25789, + 0, + 0, + 0, + 0, + 0, + 0, + 25797, + 25801, + 0, + 0, + 0, + 25808, + 25809, + 0, + 0, + 25811, + 25814, + 25815, + 0, + 0, + 25817, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25820, + 0, + 0, + 0, + 0, + 25832, + 25833, + 0, + 0, + 0, + 25846, + 0, + 0, + 0, + 25847, + 25848, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25849, + 25850, + 0, + 0, + 25851, + 0, + 0, + 25852, + 0, + 25862, + 0, + 0, + 0, + 25863, + 25865, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25867, + 25868, + 0, + 25869, + 25874, + 0, + 25875, + 0, + 25876, + 25877, + 0, + 0, + 0, + 0, + 25878, + 25902, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25903, + 25904, + 25905, + 0, + 0, + 0, + 25908, + 25909, + 0, + 0, + 0, + 0, + 25910, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25912, + 0, + 25913, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25914, + 0, + 0, + 25916, + 0, + 0, + 0, + 0, + 0, + 25917, + 25927, + 0, + 0, + 0, + 0, + 25928, + 0, + 0, + 25930, + 0, + 0, + 0, + 25933, + 0, + 0, + 25938, + 25942, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25945, + 0, + 25950, + 0, + 25956, + 0, + 0, + 25961, + 25962, + 0, + 0, + 25963, + 0, + 25964, + 25965, + 25966, + 0, + 0, + 0, + 0, + 0, + 25967, + 0, + 0, + 0, + 0, + 25968, + 0, + 0, + 0, + 25969, + 25971, + 0, + 0, + 0, + 0, + 0, + 25973, + 25975, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25978, + 0, + 25981, + 0, + 0, + 0, + 25982, + 0, + 0, + 0, + 25984, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 25993, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26002, + 0, + 0, + 0, + 26005, + 0, + 0, + 0, + 26006, + 26007, + 0, + 0, + 26014, + 26015, + 26016, + 0, + 0, + 0, + 0, + 0, + 0, + 26017, + 26018, + 26020, + 0, + 26022, + 26023, + 0, + 0, + 0, + 26024, + 26028, + 0, + 26029, + 26033, + 26034, + 26044, + 0, + 0, + 0, + 0, + 0, + 26046, + 0, + 0, + 26047, + 0, + 0, + 26049, + 0, + 26050, + 0, + 26051, + 0, + 0, + 0, + 0, + 0, + 26053, + 0, + 0, + 0, + 0, + 26054, + 26059, + 0, + 0, + 0, + 0, + 0, + 0, + 26060, + 0, + 26066, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26067, + 0, + 26069, + 0, + 0, + 26071, + 0, + 0, + 0, + 26073, + 0, + 26074, + 26077, + 0, + 0, + 0, + 0, + 26078, + 0, + 0, + 0, + 26079, + 0, + 26090, + 0, + 0, + 26094, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26095, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26096, + 26101, + 0, + 26107, + 26122, + 0, + 26124, + 0, + 0, + 26125, + 0, + 0, + 0, + 0, + 0, + 0, + 26136, + 26141, + 26155, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26164, + 26166, + 0, + 0, + 0, + 26167, + 0, + 26170, + 26171, + 0, + 0, + 26172, + 0, + 0, + 26174, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26175, + 0, + 0, + 0, + 26176, + 26177, + 0, + 26321, + 26322, + 0, + 26323, + 0, + 0, + 26324, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26325, + 0, + 26331, + 0, + 0, + 0, + 0, + 0, + 0, + 26335, + 0, + 0, + 0, + 26350, + 0, + 0, + 0, + 26379, + 0, + 0, + 26382, + 26383, + 26385, + 0, + 0, + 26392, + 26406, + 0, + 0, + 0, + 0, + 26411, + 0, + 0, + 0, + 0, + 0, + 26412, + 0, + 0, + 26420, + 0, + 0, + 26423, + 0, + 26424, + 26426, + 26432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26435, + 0, + 26436, + 0, + 0, + 0, + 0, + 0, + 26441, + 0, + 26444, + 0, + 0, + 0, + 26446, + 0, + 0, + 0, + 0, + 26447, + 0, + 0, + 0, + 0, + 26449, + 0, + 26450, + 26452, + 0, + 26453, + 26454, + 0, + 0, + 0, + 26455, + 0, + 0, + 0, + 26456, + 0, + 0, + 26458, + 0, + 0, + 26460, + 0, + 26463, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26464, + 26470, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26473, + 0, + 0, + 26474, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26475, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26477, + 0, + 26485, + 0, + 0, + 26486, + 0, + 26487, + 0, + 0, + 26488, + 26493, + 26494, + 0, + 0, + 26495, + 0, + 26497, + 26504, + 26506, + 0, + 0, + 0, + 0, + 0, + 26507, + 0, + 0, + 0, + 0, + 0, + 26509, + 0, + 0, + 26510, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26512, + 0, + 26513, + 26515, + 0, + 0, + 0, + 26518, + 0, + 0, + 0, + 26519, + 0, + 26524, + 26526, + 0, + 0, + 0, + 26527, + 0, + 26532, + 0, + 26533, + 26537, + 26558, + 0, + 0, + 0, + 26559, + 0, + 0, + 0, + 26571, + 0, + 0, + 26573, + 0, + 26588, + 0, + 26593, + 0, + 0, + 0, + 0, + 0, + 0, + 26603, + 0, + 26604, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26606, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26607, + 26609, + 26611, + 26614, + 0, + 0, + 0, + 26616, + 26620, + 0, + 26621, + 0, + 0, + 0, + 0, + 0, + 26627, + 0, + 26629, + 0, + 0, + 26630, + 0, + 0, + 26632, + 26643, + 0, + 0, + 0, + 26644, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26646, + 26647, + 0, + 0, + 0, + 26650, + 0, + 0, + 26656, + 0, + 0, + 0, + 0, + 26663, + 26670, + 26671, + 0, + 0, + 0, + 26685, + 26686, + 26687, + 0, + 26689, + 0, + 0, + 0, + 0, + 26744, + 0, + 26745, + 0, + 26747, + 26748, + 0, + 26749, + 26750, + 26751, + 0, + 0, + 0, + 0, + 26752, + 26755, + 0, + 0, + 0, + 26756, + 26769, + 0, + 0, + 0, + 26774, + 0, + 0, + 0, + 0, + 0, + 26775, + 0, + 26777, + 26778, + 0, + 26786, + 0, + 0, + 0, + 26787, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26788, + 0, + 0, + 26789, + 0, + 0, + 0, + 0, + 0, + 26791, + 0, + 26792, + 26793, + 0, + 0, + 0, + 26794, + 0, + 26797, + 26798, + 0, + 0, + 0, + 26800, + 0, + 0, + 26803, + 0, + 26804, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26805, + 0, + 0, + 26808, + 0, + 0, + 26809, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26812, + 0, + 26825, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26826, + 0, + 0, + 26827, + 26829, + 26834, + 0, + 0, + 0, + 0, + 26835, + 0, + 0, + 26849, + 0, + 26851, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26852, + 0, + 26853, + 26857, + 0, + 26858, + 0, + 26859, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26876, + 0, + 26878, + 26882, + 26883, + 0, + 0, + 0, + 0, + 26890, + 26894, + 0, + 0, + 0, + 0, + 26895, + 26896, + 0, + 0, + 0, + 0, + 0, + 26900, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26911, + 26913, + 26914, + 26915, + 26916, + 26919, + 0, + 0, + 0, + 26921, + 26922, + 0, + 0, + 26925, + 0, + 0, + 0, + 26928, + 0, + 0, + 26929, + 26930, + 0, + 0, + 0, + 26931, + 0, + 26932, + 0, + 0, + 0, + 0, + 0, + 26933, + 0, + 0, + 0, + 0, + 0, + 0, + 26937, + 0, + 0, + 26943, + 0, + 0, + 26944, + 0, + 0, + 0, + 26946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26956, + 0, + 26958, + 0, + 0, + 26963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 26965, + 0, + 26969, + 26970, + 26972, + 0, + 0, + 0, + 0, + 0, + 26973, + 0, + 26974, + 0, + 26978, + 0, + 26980, + 0, + 0, + 0, + 0, + 0, + 0, + 26982, + 0, + 26986, + 26987, + 0, + 26990, + 0, + 0, + 0, + 0, + 27003, + 27006, + 0, + 0, + 27007, + 27010, + 27012, + 27013, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27014, + 27015, + 27018, + 0, + 27019, + 0, + 0, + 0, + 0, + 0, + 27025, + 0, + 0, + 0, + 27026, + 0, + 0, + 0, + 0, + 27029, + 27030, + 27031, + 27034, + 0, + 0, + 27036, + 27037, + 0, + 0, + 0, + 27038, + 27042, + 0, + 0, + 0, + 27044, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27045, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27046, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27047, + 27049, + 0, + 27050, + 0, + 0, + 0, + 27051, + 27052, + 0, + 27055, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27056, + 27058, + 27059, + 0, + 27061, + 0, + 27064, + 0, + 0, + 0, + 0, + 0, + 27069, + 0, + 0, + 27070, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27072, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27076, + 0, + 0, + 0, + 0, + 0, + 27078, + 0, + 27079, + 0, + 0, + 0, + 27081, + 0, + 0, + 0, + 0, + 0, + 0, + 27082, + 0, + 27083, + 27086, + 0, + 0, + 0, + 0, + 27087, + 0, + 0, + 0, + 0, + 0, + 27088, + 27090, + 0, + 27094, + 0, + 0, + 27095, + 0, + 27099, + 27102, + 0, + 0, + 0, + 27103, + 0, + 0, + 0, + 0, + 27105, + 0, + 0, + 0, + 27106, + 0, + 0, + 0, + 0, + 0, + 0, + 27107, + 0, + 0, + 0, + 0, + 27108, + 27117, + 0, + 0, + 0, + 0, + 27118, + 0, + 0, + 27124, + 0, + 27126, + 0, + 0, + 27130, + 27131, + 0, + 0, + 0, + 0, + 0, + 0, + 27147, + 0, + 0, + 0, + 0, + 27148, + 27149, + 0, + 0, + 0, + 0, + 27150, + 27151, + 0, + 27152, + 0, + 27159, + 0, + 0, + 0, + 27164, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27175, + 0, + 27189, + 0, + 0, + 27191, + 0, + 27193, + 0, + 27195, + 0, + 27198, + 0, + 0, + 0, + 0, + 0, + 27200, + 0, + 0, + 0, + 0, + 27202, + 0, + 0, + 0, + 0, + 27203, + 0, + 0, + 27204, + 0, + 0, + 27206, + 0, + 27207, + 0, + 0, + 0, + 0, + 27209, + 0, + 0, + 0, + 27213, + 0, + 0, + 27216, + 27219, + 27220, + 27222, + 27223, + 0, + 27224, + 0, + 27225, + 27226, + 0, + 0, + 27233, + 0, + 0, + 0, + 0, + 27235, + 0, + 27237, + 0, + 27238, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27239, + 0, + 27242, + 27243, + 0, + 27250, + 0, + 0, + 0, + 27251, + 0, + 27253, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27254, + 27255, + 27258, + 0, + 0, + 0, + 27259, + 0, + 0, + 0, + 0, + 0, + 0, + 27267, + 0, + 27276, + 27278, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27296, + 27297, + 27301, + 0, + 0, + 0, + 0, + 0, + 0, + 27302, + 0, + 0, + 0, + 0, + 0, + 0, + 27312, + 27313, + 0, + 0, + 0, + 0, + 0, + 27318, + 0, + 27320, + 0, + 27329, + 0, + 27330, + 27331, + 0, + 27332, + 0, + 0, + 0, + 0, + 27340, + 0, + 0, + 0, + 27348, + 0, + 0, + 0, + 0, + 0, + 0, + 27350, + 0, + 27351, + 0, + 0, + 0, + 0, + 27355, + 0, + 0, + 27358, + 27359, + 27361, + 0, + 0, + 0, + 27365, + 0, + 27367, + 0, + 27376, + 27378, + 0, + 0, + 27379, + 0, + 0, + 0, + 0, + 0, + 0, + 27396, + 0, + 27397, + 27404, + 0, + 0, + 0, + 0, + 0, + 27408, + 0, + 0, + 0, + 0, + 27453, + 0, + 0, + 0, + 27456, + 0, + 0, + 0, + 27458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27459, + 0, + 0, + 0, + 27460, + 0, + 0, + 27461, + 0, + 27465, + 27467, + 0, + 0, + 27469, + 0, + 27470, + 0, + 27471, + 0, + 27477, + 27482, + 0, + 0, + 0, + 0, + 0, + 0, + 27484, + 0, + 0, + 0, + 0, + 0, + 0, + 27485, + 0, + 0, + 0, + 0, + 0, + 27493, + 0, + 27494, + 27502, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27511, + 27532, + 0, + 0, + 0, + 27533, + 27545, + 0, + 0, + 0, + 27546, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27547, + 0, + 0, + 27549, + 27550, + 0, + 27551, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27555, + 0, + 0, + 27571, + 0, + 27573, + 27574, + 27575, + 27577, + 0, + 27578, + 0, + 0, + 27579, + 27585, + 0, + 0, + 0, + 0, + 0, + 27586, + 0, + 0, + 27588, + 27589, + 0, + 0, + 0, + 0, + 27596, + 0, + 0, + 27600, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27608, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27610, + 0, + 0, + 0, + 27618, + 0, + 0, + 27620, + 0, + 0, + 0, + 27631, + 0, + 0, + 27632, + 27634, + 0, + 27636, + 27638, + 0, + 0, + 0, + 27643, + 0, + 27644, + 27649, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27651, + 27660, + 0, + 27661, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27662, + 0, + 0, + 27664, + 0, + 27665, + 0, + 0, + 0, + 27669, + 0, + 27671, + 0, + 0, + 0, + 27673, + 27674, + 0, + 0, + 0, + 27682, + 0, + 0, + 0, + 27711, + 0, + 27712, + 27713, + 27719, + 27720, + 0, + 0, + 27728, + 0, + 27729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27731, + 0, + 0, + 27732, + 0, + 27733, + 0, + 27738, + 0, + 0, + 0, + 27742, + 0, + 0, + 0, + 27743, + 27744, + 0, + 0, + 0, + 0, + 0, + 0, + 27745, + 27746, + 0, + 0, + 0, + 27747, + 27748, + 27751, + 27752, + 0, + 0, + 0, + 27768, + 27770, + 0, + 0, + 0, + 27774, + 27775, + 0, + 27776, + 27777, + 0, + 0, + 27781, + 0, + 27784, + 0, + 27786, + 0, + 0, + 27791, + 0, + 27792, + 27793, + 27804, + 0, + 27812, + 27813, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27814, + 0, + 27825, + 0, + 27827, + 0, + 0, + 0, + 0, + 27828, + 27861, + 27862, + 0, + 0, + 0, + 27864, + 0, + 0, + 0, + 27865, + 27884, + 0, + 27889, + 0, + 0, + 0, + 0, + 0, + 27890, + 0, + 27891, + 0, + 0, + 0, + 27892, + 0, + 0, + 0, + 0, + 0, + 27897, + 27898, + 0, + 0, + 27899, + 0, + 0, + 0, + 27901, + 27905, + 0, + 0, + 27920, + 0, + 0, + 27921, + 0, + 27922, + 0, + 0, + 0, + 27931, + 27934, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27941, + 0, + 27942, + 0, + 27945, + 0, + 27947, + 27954, + 0, + 0, + 0, + 0, + 27960, + 27963, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 27964, + 27965, + 0, + 0, + 0, + 27967, + 0, + 27969, + 27975, + 0, + 27976, + 27977, + 0, + 27981, + 0, + 27983, + 28051, + 28052, + 0, + 0, + 0, + 0, + 0, + 28056, + 0, + 0, + 0, + 0, + 0, + 0, + 28058, + 28059, + 0, + 0, + 28061, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28063, + 0, + 0, + 0, + 0, + 0, + 0, + 28066, + 0, + 0, + 0, + 0, + 0, + 0, + 28069, + 28070, + 28072, + 0, + 28073, + 0, + 0, + 28074, + 0, + 0, + 0, + 0, + 28075, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28078, + 0, + 0, + 0, + 0, + 28085, + 0, + 0, + 0, + 0, + 28086, + 0, + 0, + 0, + 0, + 0, + 0, + 28088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28090, + 0, + 28097, + 28114, + 28115, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28116, + 0, + 0, + 0, + 0, + 0, + 28118, + 0, + 28129, + 0, + 28131, + 0, + 0, + 28135, + 0, + 0, + 0, + 28140, + 28141, + 0, + 0, + 0, + 28146, + 0, + 0, + 0, + 0, + 28152, + 0, + 0, + 0, + 0, + 28155, + 28157, + 28161, + 0, + 0, + 0, + 0, + 28166, + 0, + 28167, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28172, + 0, + 0, + 0, + 0, + 0, + 0, + 28173, + 0, + 0, + 28175, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28178, + 28188, + 0, + 28190, + 0, + 0, + 0, + 0, + 0, + 28191, + 0, + 28193, + 28206, + 0, + 0, + 28207, + 28209, + 0, + 28211, + 0, + 28213, + 0, + 0, + 0, + 28215, + 28216, + 28217, + 0, + 28222, + 0, + 28223, + 28225, + 0, + 0, + 0, + 28226, + 0, + 28227, + 28229, + 28232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28235, + 0, + 28241, + 0, + 0, + 28242, + 0, + 0, + 0, + 0, + 28243, + 0, + 0, + 0, + 28245, + 0, + 0, + 0, + 28248, + 28250, + 0, + 28251, + 28252, + 0, + 0, + 0, + 0, + 0, + 0, + 28253, + 0, + 0, + 28254, + 28255, + 0, + 0, + 28256, + 0, + 0, + 28258, + 0, + 0, + 0, + 0, + 0, + 28259, + 0, + 0, + 28260, + 0, + 0, + 28261, + 0, + 0, + 0, + 0, + 28262, + 28263, + 0, + 0, + 28264, + 0, + 0, + 0, + 28266, + 0, + 28268, + 28269, + 0, + 28270, + 28272, + 28274, + 0, + 28277, + 28278, + 0, + 0, + 0, + 28279, + 0, + 28280, + 28281, + 28283, + 0, + 28292, + 0, + 28294, + 0, + 28297, + 0, + 0, + 0, + 0, + 28299, + 0, + 0, + 0, + 0, + 0, + 28300, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28301, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28302, + 28303, + 0, + 0, + 0, + 0, + 28304, + 0, + 0, + 28305, + 0, + 28312, + 0, + 28313, + 28314, + 0, + 0, + 0, + 0, + 0, + 0, + 28315, + 0, + 0, + 0, + 28320, + 28321, + 0, + 0, + 28328, + 0, + 0, + 0, + 28329, + 28338, + 0, + 28339, + 0, + 0, + 28344, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28347, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28348, + 0, + 0, + 0, + 0, + 0, + 28411, + 0, + 28412, + 28413, + 0, + 28416, + 0, + 0, + 0, + 28420, + 0, + 0, + 0, + 0, + 0, + 28421, + 0, + 0, + 0, + 0, + 28423, + 0, + 0, + 0, + 28424, + 0, + 0, + 28428, + 0, + 0, + 0, + 0, + 0, + 28429, + 0, + 0, + 0, + 28431, + 28434, + 0, + 28458, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28464, + 0, + 0, + 0, + 0, + 28465, + 0, + 28467, + 0, + 0, + 0, + 0, + 0, + 0, + 28471, + 0, + 0, + 0, + 0, + 28474, + 0, + 28480, + 0, + 28481, + 0, + 0, + 28485, + 0, + 0, + 0, + 0, + 28486, + 28488, + 0, + 0, + 28489, + 0, + 0, + 0, + 0, + 28492, + 0, + 0, + 0, + 28495, + 0, + 28497, + 0, + 28499, + 0, + 0, + 0, + 0, + 28500, + 0, + 0, + 28502, + 28503, + 0, + 0, + 0, + 28508, + 0, + 0, + 0, + 28510, + 0, + 0, + 28512, + 28513, + 28514, + 28521, + 0, + 28526, + 0, + 28527, + 28528, + 0, + 0, + 0, + 0, + 28529, + 0, + 0, + 28532, + 0, + 0, + 28537, + 28538, + 0, + 0, + 0, + 28539, + 0, + 28548, + 0, + 28553, + 28554, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28560, + 28563, + 0, + 0, + 28564, + 0, + 0, + 0, + 0, + 28565, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28566, + 28568, + 0, + 0, + 0, + 0, + 0, + 0, + 28569, + 0, + 0, + 0, + 28570, + 0, + 28572, + 28573, + 0, + 0, + 0, + 0, + 28575, + 0, + 0, + 0, + 0, + 28576, + 28581, + 28588, + 0, + 0, + 28589, + 0, + 0, + 0, + 28590, + 28595, + 0, + 28598, + 0, + 0, + 28601, + 0, + 0, + 28605, + 0, + 0, + 0, + 0, + 28614, + 28615, + 28619, + 0, + 0, + 0, + 0, + 0, + 0, + 28620, + 0, + 28626, + 0, + 0, + 28628, + 0, + 28631, + 0, + 28632, + 0, + 0, + 0, + 0, + 0, + 0, + 28635, + 0, + 0, + 0, + 28637, + 28638, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28639, + 0, + 28643, + 0, + 0, + 28652, + 0, + 0, + 0, + 28662, + 0, + 28670, + 28671, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28672, + 28673, + 28675, + 28676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28691, + 0, + 0, + 0, + 28695, + 0, + 0, + 0, + 28696, + 0, + 28697, + 28698, + 0, + 28705, + 0, + 28707, + 28708, + 28710, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28711, + 28728, + 0, + 0, + 0, + 28736, + 0, + 0, + 0, + 28737, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28738, + 0, + 28739, + 0, + 28741, + 0, + 0, + 28742, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28745, + 0, + 0, + 0, + 0, + 0, + 0, + 28749, + 28750, + 28752, + 28754, + 28756, + 0, + 28757, + 0, + 0, + 0, + 0, + 28759, + 28760, + 0, + 0, + 0, + 0, + 0, + 0, + 28762, + 0, + 0, + 0, + 28764, + 0, + 0, + 0, + 0, + 0, + 0, + 28766, + 0, + 28767, + 28768, + 0, + 0, + 0, + 0, + 28769, + 28770, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28772, + 0, + 28773, + 0, + 28782, + 0, + 0, + 0, + 0, + 0, + 0, + 28784, + 0, + 28785, + 0, + 28786, + 0, + 0, + 0, + 28787, + 0, + 0, + 0, + 28797, + 0, + 0, + 0, + 0, + 0, + 0, + 28799, + 0, + 0, + 28801, + 0, + 0, + 0, + 0, + 28802, + 0, + 28805, + 0, + 0, + 28806, + 0, + 0, + 28807, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28808, + 0, + 0, + 0, + 0, + 0, + 28810, + 28812, + 0, + 0, + 28816, + 28819, + 0, + 0, + 28821, + 0, + 28826, + 0, + 0, + 0, + 28842, + 28852, + 0, + 0, + 28853, + 0, + 28854, + 28855, + 0, + 0, + 0, + 28857, + 0, + 0, + 0, + 28858, + 0, + 28867, + 28868, + 28869, + 0, + 0, + 0, + 28874, + 28880, + 28882, + 28890, + 28892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28895, + 0, + 0, + 0, + 28898, + 28899, + 0, + 0, + 0, + 28900, + 0, + 0, + 28904, + 0, + 28906, + 0, + 0, + 0, + 0, + 28907, + 0, + 0, + 0, + 0, + 0, + 0, + 28908, + 0, + 0, + 0, + 28910, + 0, + 28914, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28915, + 28916, + 28919, + 0, + 0, + 28920, + 0, + 28921, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28924, + 0, + 0, + 0, + 0, + 28926, + 28929, + 0, + 0, + 0, + 28930, + 0, + 28936, + 0, + 28939, + 0, + 0, + 0, + 0, + 28942, + 0, + 0, + 0, + 0, + 0, + 0, + 28956, + 0, + 0, + 0, + 28966, + 0, + 0, + 0, + 0, + 28967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 28968, + 0, + 28971, + 0, + 28975, + 28976, + 0, + 28982, + 28983, + 0, + 0, + 28984, + 28989, + 28996, + 28997, + 28998, + 0, + 0, + 0, + 0, + 0, + 0, + 28999, + 0, + 0, + 0, + 0, + 0, + 29000, + 0, + 29001, + 0, + 0, + 0, + 29009, + 0, + 0, + 29011, + 0, + 0, + 29021, + 0, + 0, + 0, + 0, + 29024, + 0, + 29025, + 0, + 0, + 0, + 0, + 0, + 29026, + 0, + 0, + 0, + 29036, + 0, + 0, + 0, + 29037, + 0, + 0, + 0, + 0, + 29038, + 0, + 29045, + 0, + 29047, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29051, + 0, + 0, + 0, + 29054, + 29056, + 29062, + 0, + 29070, + 29082, + 0, + 0, + 0, + 29083, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29084, + 0, + 0, + 0, + 0, + 29085, + 29088, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29090, + 29097, + 0, + 0, + 0, + 29103, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29105, + 0, + 0, + 0, + 0, + 0, + 29107, + 0, + 29109, + 0, + 0, + 0, + 29115, + 0, + 0, + 29120, + 0, + 0, + 29138, + 29140, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29152, + 0, + 29160, + 29174, + 0, + 29176, + 0, + 0, + 29180, + 0, + 29181, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29228, + 0, + 0, + 29229, + 0, + 0, + 29230, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29234, + 0, + 0, + 0, + 29241, + 0, + 29245, + 0, + 29248, + 0, + 29250, + 29256, + 29280, + 0, + 29282, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29285, + 0, + 0, + 29286, + 29291, + 29292, + 0, + 0, + 0, + 0, + 29294, + 0, + 29295, + 0, + 0, + 0, + 0, + 0, + 29296, + 29297, + 29298, + 29300, + 0, + 29302, + 0, + 0, + 29304, + 29307, + 0, + 29312, + 0, + 0, + 0, + 29322, + 0, + 0, + 29323, + 0, + 0, + 29324, + 29326, + 29328, + 0, + 29335, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29338, + 29339, + 0, + 0, + 0, + 0, + 0, + 29341, + 29343, + 0, + 0, + 0, + 0, + 29344, + 0, + 0, + 0, + 0, + 0, + 29345, + 0, + 0, + 0, + 0, + 29346, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29347, + 29348, + 29349, + 0, + 0, + 29354, + 0, + 0, + 29355, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29357, + 0, + 0, + 0, + 0, + 29364, + 0, + 29365, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29366, + 0, + 0, + 29368, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29378, + 0, + 29381, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29386, + 0, + 0, + 0, + 0, + 0, + 0, + 29389, + 0, + 0, + 0, + 29390, + 0, + 0, + 29391, + 29397, + 0, + 29398, + 29412, + 29414, + 29418, + 29419, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29420, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29423, + 0, + 0, + 0, + 29435, + 0, + 0, + 0, + 29437, + 0, + 0, + 29439, + 0, + 29441, + 0, + 0, + 0, + 0, + 29443, + 0, + 29446, + 29450, + 29452, + 0, + 0, + 0, + 0, + 0, + 29456, + 0, + 0, + 0, + 0, + 0, + 29461, + 0, + 0, + 0, + 29464, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29468, + 0, + 29473, + 0, + 0, + 0, + 29486, + 0, + 0, + 0, + 29490, + 0, + 0, + 0, + 29491, + 29492, + 0, + 0, + 29497, + 0, + 0, + 0, + 29498, + 0, + 29499, + 0, + 29502, + 29505, + 0, + 29509, + 0, + 0, + 0, + 29510, + 0, + 0, + 0, + 29512, + 0, + 0, + 0, + 29516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29518, + 0, + 29519, + 0, + 0, + 0, + 0, + 0, + 29520, + 29521, + 29529, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29530, + 0, + 0, + 29531, + 29538, + 0, + 29540, + 0, + 0, + 0, + 29542, + 0, + 29543, + 29544, + 29547, + 0, + 0, + 29548, + 0, + 0, + 0, + 29549, + 0, + 0, + 0, + 29550, + 0, + 0, + 29552, + 0, + 0, + 0, + 0, + 29558, + 29561, + 0, + 29562, + 29564, + 0, + 0, + 29565, + 0, + 0, + 29566, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29578, + 29584, + 29586, + 29591, + 0, + 0, + 0, + 0, + 29593, + 29594, + 0, + 0, + 29597, + 0, + 0, + 29613, + 0, + 29614, + 0, + 29615, + 0, + 0, + 0, + 0, + 29616, + 29617, + 0, + 0, + 29625, + 0, + 0, + 0, + 29632, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29633, + 0, + 0, + 0, + 0, + 0, + 29634, + 29635, + 29637, + 0, + 29638, + 0, + 29641, + 29643, + 0, + 0, + 0, + 0, + 0, + 0, + 29644, + 0, + 29645, + 0, + 29649, + 0, + 0, + 0, + 29650, + 0, + 29653, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29656, + 29659, + 0, + 0, + 29660, + 0, + 0, + 0, + 29661, + 0, + 0, + 0, + 0, + 0, + 29664, + 0, + 0, + 0, + 29671, + 29673, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29675, + 0, + 29677, + 29679, + 0, + 0, + 29684, + 0, + 0, + 0, + 0, + 0, + 29685, + 0, + 0, + 0, + 29687, + 0, + 0, + 0, + 29688, + 0, + 29689, + 29690, + 29700, + 0, + 29701, + 0, + 0, + 0, + 29702, + 0, + 29706, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29720, + 0, + 29721, + 0, + 29727, + 0, + 29733, + 29734, + 0, + 29750, + 29761, + 0, + 29763, + 0, + 0, + 0, + 0, + 0, + 29764, + 0, + 0, + 29765, + 0, + 0, + 0, + 29771, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29772, + 0, + 0, + 0, + 29773, + 29774, + 29775, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29822, + 0, + 0, + 0, + 29824, + 0, + 29825, + 0, + 0, + 0, + 0, + 0, + 29827, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29829, + 0, + 29832, + 29834, + 0, + 0, + 29835, + 0, + 0, + 29837, + 29838, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29843, + 0, + 0, + 0, + 0, + 29844, + 29845, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29849, + 0, + 0, + 29869, + 29872, + 29890, + 29905, + 0, + 0, + 0, + 0, + 0, + 29907, + 29921, + 0, + 29922, + 0, + 0, + 29923, + 29926, + 29944, + 29946, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 29947, + 29948, + 0, + 0, + 0, + 29951, + 0, + 0, + 0, + 0, + 0, + 29953, + 0, + 0, + 29956, + 0, + 29957, + 0, + 0, + 29962, + 0, + 0, + 0, + 0, + 29971, + 0, + 0, + 0, + 29972, + 0, + 0, + 0, + 0, + 0, + 29978, + 0, + 29979, + 29992, + 30007, + 30008, + 30010, + 0, + 0, + 0, + 30013, + 0, + 0, + 0, + 0, + 30014, + 30016, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30017, + 0, + 0, + 0, + 0, + 0, + 30023, + 30031, + 0, + 0, + 30033, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30034, + 0, + 30038, + 0, + 30039, + 0, + 30040, + 0, + 0, + 0, + 0, + 0, + 0, + 30067, + 30068, + 0, + 0, + 0, + 30069, + 0, + 30072, + 0, + 0, + 0, + 30073, + 0, + 0, + 0, + 0, + 30075, + 0, + 0, + 0, + 0, + 0, + 0, + 30079, + 0, + 0, + 30080, + 0, + 0, + 0, + 0, + 0, + 30082, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30084, + 30090, + 0, + 0, + 30091, + 0, + 0, + 0, + 0, + 30098, + 30118, + 0, + 30119, + 0, + 30121, + 30130, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30131, + 30132, + 30133, + 0, + 0, + 0, + 0, + 0, + 0, + 30135, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30136, + 0, + 0, + 30137, + 30138, + 0, + 0, + 0, + 30139, + 30146, + 0, + 0, + 0, + 0, + 0, + 30147, + 0, + 0, + 30148, + 30151, + 0, + 0, + 0, + 30168, + 0, + 30172, + 30173, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30180, + 30181, + 0, + 30192, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30194, + 30196, + 0, + 0, + 30199, + 0, + 0, + 30202, + 0, + 0, + 0, + 0, + 30203, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30213, + 0, + 0, + 0, + 30216, + 0, + 0, + 30217, + 0, + 0, + 0, + 30218, + 0, + 0, + 0, + 0, + 30219, + 0, + 30220, + 0, + 30222, + 30227, + 0, + 0, + 0, + 0, + 0, + 30231, + 0, + 0, + 30233, + 30235, + 0, + 0, + 0, + 0, + 30238, + 0, + 30240, + 30243, + 30245, + 0, + 30250, + 30252, + 0, + 0, + 0, + 30269, + 0, + 0, + 30271, + 30272, + 0, + 0, + 0, + 30278, + 30280, + 0, + 0, + 30282, + 0, + 30284, + 0, + 30294, + 0, + 0, + 0, + 0, + 30295, + 30296, + 0, + 0, + 0, + 0, + 0, + 30298, + 30299, + 30302, + 30304, + 30306, + 0, + 0, + 0, + 0, + 0, + 0, + 30316, + 30317, + 0, + 0, + 0, + 30318, + 0, + 0, + 0, + 30319, + 0, + 30320, + 30322, + 30326, + 0, + 0, + 0, + 0, + 0, + 30327, + 0, + 30332, + 30348, + 30349, + 0, + 0, + 30356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30357, + 0, + 30358, + 0, + 30359, + 30360, + 0, + 0, + 30365, + 30366, + 30378, + 0, + 0, + 0, + 0, + 30379, + 0, + 0, + 30381, + 0, + 30385, + 0, + 30388, + 30397, + 0, + 0, + 0, + 30401, + 0, + 0, + 0, + 0, + 30403, + 0, + 0, + 0, + 0, + 0, + 30404, + 0, + 0, + 30405, + 0, + 30406, + 30408, + 0, + 30409, + 0, + 30410, + 0, + 0, + 0, + 30417, + 0, + 0, + 30418, + 30419, + 0, + 30420, + 0, + 30424, + 0, + 0, + 0, + 30427, + 30430, + 30432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30436, + 0, + 30437, + 30438, + 0, + 30441, + 30442, + 0, + 0, + 0, + 30445, + 0, + 0, + 0, + 0, + 30452, + 30456, + 30457, + 0, + 0, + 0, + 30458, + 0, + 30464, + 0, + 0, + 0, + 0, + 0, + 0, + 30467, + 0, + 30469, + 0, + 0, + 0, + 0, + 0, + 30477, + 0, + 0, + 30484, + 0, + 0, + 0, + 0, + 0, + 30485, + 0, + 0, + 0, + 0, + 0, + 30486, + 30487, + 30497, + 30498, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30505, + 0, + 30508, + 0, + 0, + 0, + 30509, + 30510, + 0, + 30514, + 30516, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30523, + 0, + 30524, + 0, + 30525, + 0, + 0, + 0, + 0, + 30537, + 0, + 0, + 30538, + 0, + 0, + 0, + 0, + 0, + 30553, + 0, + 0, + 30555, + 30556, + 30558, + 30559, + 30560, + 0, + 0, + 30561, + 0, + 30562, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30563, + 30570, + 30571, + 0, + 30586, + 30587, + 0, + 0, + 30590, + 0, + 0, + 30594, + 0, + 0, + 0, + 0, + 30611, + 30612, + 30623, + 30634, + 0, + 0, + 30636, + 30640, + 30655, + 30656, + 0, + 30657, + 0, + 0, + 30658, + 30669, + 0, + 30670, + 0, + 30676, + 30678, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30679, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30695, + 0, + 0, + 30698, + 0, + 0, + 0, + 0, + 30700, + 0, + 0, + 0, + 0, + 30701, + 0, + 30702, + 30703, + 0, + 0, + 0, + 0, + 30707, + 0, + 0, + 0, + 30709, + 0, + 0, + 30710, + 30719, + 30729, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30731, + 0, + 0, + 30733, + 0, + 0, + 0, + 30734, + 0, + 0, + 0, + 0, + 0, + 30736, + 30737, + 0, + 0, + 0, + 30740, + 0, + 0, + 0, + 30743, + 0, + 30746, + 0, + 30747, + 30748, + 0, + 0, + 30751, + 30752, + 30753, + 0, + 0, + 0, + 30754, + 0, + 0, + 30760, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30763, + 0, + 30764, + 0, + 0, + 30766, + 0, + 30769, + 30770, + 30771, + 30774, + 30777, + 0, + 0, + 30779, + 30780, + 30781, + 0, + 0, + 0, + 0, + 30790, + 0, + 0, + 0, + 30792, + 0, + 0, + 0, + 0, + 30810, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30812, + 30819, + 0, + 0, + 30823, + 30824, + 0, + 30825, + 0, + 30827, + 0, + 0, + 0, + 0, + 0, + 0, + 30828, + 0, + 0, + 30830, + 0, + 0, + 0, + 30834, + 0, + 30835, + 0, + 30837, + 30838, + 0, + 30845, + 0, + 0, + 0, + 0, + 0, + 30846, + 30847, + 0, + 0, + 30849, + 0, + 30851, + 0, + 0, + 0, + 0, + 0, + 30852, + 30858, + 0, + 0, + 30859, + 0, + 30865, + 0, + 0, + 30866, + 0, + 0, + 30868, + 0, + 0, + 30869, + 0, + 0, + 0, + 30881, + 30883, + 0, + 0, + 0, + 0, + 0, + 30889, + 0, + 30891, + 0, + 0, + 0, + 0, + 30894, + 0, + 30895, + 0, + 30897, + 0, + 30898, + 0, + 0, + 0, + 30904, + 30906, + 0, + 30909, + 0, + 0, + 0, + 0, + 0, + 0, + 30910, + 0, + 0, + 0, + 30915, + 30933, + 30942, + 0, + 0, + 0, + 0, + 30943, + 0, + 0, + 30945, + 0, + 0, + 0, + 0, + 0, + 0, + 30946, + 0, + 0, + 30947, + 0, + 0, + 30955, + 30956, + 0, + 0, + 30960, + 0, + 0, + 30961, + 30962, + 30966, + 0, + 0, + 30969, + 30974, + 0, + 0, + 0, + 30976, + 0, + 0, + 30977, + 0, + 30978, + 30982, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 30994, + 30995, + 30998, + 0, + 31000, + 0, + 0, + 31001, + 0, + 0, + 31003, + 31005, + 0, + 0, + 31006, + 31011, + 0, + 0, + 31014, + 0, + 31016, + 0, + 0, + 0, + 0, + 31018, + 0, + 0, + 31020, + 31023, + 31024, + 31025, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31027, + 31028, + 31029, + 0, + 0, + 0, + 0, + 0, + 0, + 31032, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31036, + 31037, + 31038, + 0, + 0, + 0, + 31041, + 31043, + 31045, + 0, + 31047, + 0, + 0, + 0, + 31048, + 0, + 31049, + 0, + 0, + 0, + 31053, + 31054, + 31055, + 0, + 0, + 31063, + 0, + 0, + 0, + 0, + 0, + 31066, + 0, + 31068, + 31071, + 0, + 0, + 0, + 31072, + 31073, + 0, + 0, + 0, + 0, + 31075, + 0, + 0, + 31076, + 0, + 0, + 0, + 31077, + 31079, + 0, + 31080, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31087, + 0, + 31142, + 0, + 31144, + 0, + 0, + 31145, + 31146, + 31147, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31149, + 0, + 31151, + 31152, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31162, + 31171, + 31174, + 31175, + 0, + 0, + 0, + 31176, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31179, + 0, + 0, + 0, + 31186, + 0, + 0, + 0, + 31192, + 31195, + 0, + 0, + 31196, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31198, + 0, + 0, + 0, + 0, + 0, + 31199, + 0, + 0, + 0, + 31205, + 0, + 0, + 0, + 0, + 31211, + 31215, + 0, + 0, + 0, + 0, + 31231, + 0, + 31232, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31233, + 31236, + 31253, + 0, + 31254, + 0, + 0, + 0, + 0, + 0, + 0, + 31255, + 0, + 0, + 31257, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31258, + 31259, + 0, + 0, + 31260, + 0, + 31261, + 0, + 0, + 0, + 0, + 0, + 31262, + 31263, + 0, + 0, + 31264, + 0, + 31266, + 0, + 31267, + 0, + 0, + 0, + 0, + 0, + 31281, + 0, + 31282, + 0, + 31284, + 0, + 0, + 31285, + 31287, + 31288, + 0, + 0, + 31290, + 0, + 0, + 0, + 31292, + 31295, + 0, + 31299, + 0, + 31300, + 0, + 0, + 0, + 0, + 0, + 31302, + 0, + 0, + 0, + 0, + 31303, + 0, + 0, + 0, + 0, + 0, + 0, + 31304, + 0, + 0, + 0, + 0, + 0, + 31305, + 31308, + 31309, + 31315, + 0, + 31317, + 0, + 0, + 0, + 0, + 0, + 31323, + 0, + 31324, + 0, + 0, + 0, + 0, + 0, + 31325, + 31327, + 0, + 0, + 31331, + 0, + 0, + 0, + 0, + 0, + 31333, + 0, + 0, + 0, + 0, + 0, + 31336, + 0, + 0, + 31337, + 0, + 0, + 0, + 0, + 0, + 0, + 31338, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31339, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31342, + 0, + 0, + 0, + 0, + 31345, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31347, + 0, + 0, + 0, + 0, + 0, + 0, + 31348, + 0, + 0, + 31350, + 31351, + 0, + 31352, + 0, + 0, + 31354, + 0, + 0, + 0, + 0, + 31355, + 0, + 0, + 31356, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31363, + 0, + 31372, + 0, + 0, + 31373, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31376, + 0, + 31388, + 0, + 31389, + 0, + 31392, + 0, + 31401, + 0, + 31405, + 31407, + 31408, + 0, + 31409, + 0, + 0, + 0, + 0, + 0, + 0, + 31413, + 31415, + 0, + 0, + 0, + 31416, + 31418, + 0, + 0, + 0, + 0, + 0, + 0, + 31422, + 31423, + 0, + 0, + 31424, + 0, + 31425, + 31432, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31433, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31434, + 0, + 0, + 0, + 0, + 0, + 0, + 31435, + 0, + 0, + 0, + 0, + 31438, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31442, + 0, + 31444, + 0, + 31448, + 0, + 0, + 31451, + 0, + 0, + 0, + 0, + 31452, + 0, + 31461, + 31465, + 0, + 0, + 31466, + 0, + 0, + 31467, + 0, + 0, + 31468, + 0, + 0, + 0, + 31469, + 31473, + 0, + 31476, + 0, + 0, + 0, + 0, + 31489, + 31490, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31492, + 31493, + 31494, + 0, + 0, + 0, + 0, + 31501, + 31504, + 31505, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31509, + 0, + 0, + 0, + 0, + 31510, + 0, + 0, + 31511, + 0, + 0, + 31513, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31514, + 0, + 31522, + 31536, + 31539, + 31540, + 0, + 31541, + 0, + 0, + 0, + 0, + 0, + 0, + 31546, + 31553, + 31559, + 0, + 0, + 0, + 31560, + 31561, + 31562, + 0, + 0, + 31564, + 31567, + 0, + 31569, + 0, + 0, + 0, + 31570, + 0, + 0, + 0, + 0, + 31571, + 0, + 0, + 0, + 0, + 0, + 0, + 31572, + 31574, + 31580, + 31581, + 0, + 0, + 31582, + 31584, + 31585, + 31586, + 31595, + 0, + 31596, + 0, + 0, + 0, + 0, + 31597, + 0, + 31599, + 0, + 31600, + 31601, + 0, + 0, + 31603, + 31604, + 0, + 0, + 31608, + 31610, + 0, + 0, + 0, + 31611, + 0, + 31615, + 0, + 0, + 0, + 0, + 31616, + 0, + 0, + 0, + 0, + 0, + 0, + 31617, + 0, + 0, + 0, + 0, + 0, + 31618, + 0, + 0, + 0, + 0, + 0, + 0, + 31621, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31622, + 31625, + 0, + 0, + 0, + 0, + 31627, + 0, + 31641, + 0, + 0, + 31642, + 0, + 0, + 31643, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31644, + 0, + 31646, + 0, + 0, + 0, + 0, + 31648, + 0, + 0, + 0, + 31652, + 0, + 0, + 0, + 31657, + 0, + 0, + 31676, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 31689, + 31691, + 31692, + 0, + 31694, + 0, + 0, + 0, + 31696, + 0, + 31702, + 0, + 31703, + 0, +} + +var kStaticDictionaryWords = [31705]dictWord{ + dictWord{0, 0, 0}, + dictWord{8, 0, 1002}, + dictWord{136, 0, 1015}, + dictWord{4, 0, 683}, + dictWord{4, 10, 325}, + dictWord{138, 10, 125}, + dictWord{7, 11, 572}, + dictWord{ + 9, + 11, + 592, + }, + dictWord{11, 11, 680}, + dictWord{11, 11, 842}, + dictWord{11, 11, 924}, + dictWord{12, 11, 356}, + dictWord{12, 11, 550}, + dictWord{13, 11, 317}, + dictWord{13, 11, 370}, + dictWord{13, 11, 469}, + dictWord{13, 11, 471}, + dictWord{14, 11, 397}, + dictWord{18, 11, 69}, + dictWord{146, 11, 145}, + dictWord{ + 134, + 0, + 1265, + }, + dictWord{136, 11, 534}, + dictWord{134, 0, 1431}, + dictWord{11, 0, 138}, + dictWord{140, 0, 40}, + dictWord{4, 0, 155}, + dictWord{7, 0, 1689}, + dictWord{ + 4, + 10, + 718, + }, + dictWord{135, 10, 1216}, + dictWord{4, 0, 245}, + dictWord{5, 0, 151}, + dictWord{5, 0, 741}, + dictWord{6, 0, 1147}, + dictWord{7, 0, 498}, + dictWord{7, 0, 870}, + dictWord{7, 0, 1542}, + dictWord{12, 0, 213}, + dictWord{14, 0, 36}, + dictWord{14, 0, 391}, + dictWord{17, 0, 111}, + dictWord{18, 0, 6}, + dictWord{18, 0, 46}, + dictWord{ + 18, + 0, + 151, + }, + dictWord{19, 0, 36}, + dictWord{20, 0, 32}, + dictWord{20, 0, 56}, + dictWord{20, 0, 69}, + dictWord{20, 0, 102}, + dictWord{21, 0, 4}, + dictWord{22, 0, 8}, + dictWord{ + 22, + 0, + 10, + }, + dictWord{22, 0, 14}, + dictWord{150, 0, 31}, + dictWord{4, 0, 624}, + dictWord{135, 0, 1752}, + dictWord{5, 10, 124}, + dictWord{5, 10, 144}, + dictWord{6, 10, 548}, + dictWord{7, 10, 15}, + dictWord{7, 10, 153}, + dictWord{137, 10, 629}, + dictWord{6, 0, 503}, + dictWord{9, 0, 586}, + dictWord{13, 0, 468}, + dictWord{14, 0, 66}, + dictWord{ + 16, + 0, + 58, + }, + dictWord{7, 10, 1531}, + dictWord{8, 10, 416}, + dictWord{9, 10, 275}, + dictWord{10, 10, 100}, + dictWord{11, 10, 658}, + dictWord{11, 10, 979}, + dictWord{ + 12, + 10, + 86, + }, + dictWord{14, 10, 207}, + dictWord{15, 10, 20}, + dictWord{143, 10, 25}, + dictWord{5, 0, 603}, + dictWord{7, 0, 1212}, + dictWord{9, 0, 565}, + dictWord{ + 14, + 0, + 301, + }, + dictWord{5, 10, 915}, + dictWord{6, 10, 1783}, + dictWord{7, 10, 211}, + dictWord{7, 10, 1353}, + dictWord{9, 10, 83}, + dictWord{10, 10, 376}, + dictWord{ + 10, + 10, + 431, + }, + dictWord{11, 10, 543}, + dictWord{12, 10, 664}, + dictWord{13, 10, 280}, + dictWord{13, 10, 428}, + dictWord{14, 10, 128}, + dictWord{17, 10, 52}, + dictWord{ + 145, + 10, + 81, + }, + dictWord{4, 0, 492}, + dictWord{133, 0, 451}, + dictWord{135, 0, 835}, + dictWord{141, 0, 70}, + dictWord{132, 0, 539}, + dictWord{7, 11, 748}, + dictWord{ + 139, + 11, + 700, + }, + dictWord{7, 11, 1517}, + dictWord{11, 11, 597}, + dictWord{14, 11, 76}, + dictWord{14, 11, 335}, + dictWord{148, 11, 33}, + dictWord{6, 0, 113}, + dictWord{135, 0, 436}, + dictWord{4, 10, 338}, + dictWord{133, 10, 400}, + dictWord{136, 0, 718}, + dictWord{133, 11, 127}, + dictWord{133, 11, 418}, + dictWord{ + 6, + 0, + 1505, + }, + dictWord{7, 0, 520}, + dictWord{6, 11, 198}, + dictWord{11, 10, 892}, + dictWord{140, 11, 83}, + dictWord{4, 10, 221}, + dictWord{5, 10, 659}, + dictWord{ + 5, + 10, + 989, + }, + dictWord{7, 10, 697}, + dictWord{7, 10, 1211}, + dictWord{138, 10, 284}, + dictWord{135, 0, 1070}, + dictWord{5, 11, 276}, + dictWord{6, 11, 55}, + dictWord{ + 135, + 11, + 1369, + }, + dictWord{134, 0, 1515}, + dictWord{6, 11, 1752}, + dictWord{136, 11, 726}, + dictWord{138, 10, 507}, + dictWord{15, 0, 78}, + dictWord{4, 10, 188}, + dictWord{135, 10, 805}, + dictWord{5, 10, 884}, + dictWord{139, 10, 991}, + dictWord{133, 11, 764}, + dictWord{134, 10, 1653}, + dictWord{6, 11, 309}, + dictWord{ + 7, + 11, + 331, + }, + dictWord{138, 11, 550}, + dictWord{135, 11, 1861}, + dictWord{132, 11, 348}, + dictWord{135, 11, 986}, + dictWord{135, 11, 1573}, + dictWord{ + 12, + 0, + 610, + }, + dictWord{13, 0, 431}, + dictWord{144, 0, 59}, + dictWord{9, 11, 799}, + dictWord{140, 10, 166}, + dictWord{134, 0, 1530}, + dictWord{132, 0, 750}, + dictWord{132, 0, 307}, + dictWord{133, 0, 964}, + dictWord{6, 11, 194}, + dictWord{7, 11, 133}, + dictWord{10, 11, 493}, + dictWord{10, 11, 570}, + dictWord{139, 11, 664}, + dictWord{5, 11, 24}, + dictWord{5, 11, 569}, + dictWord{6, 11, 3}, + dictWord{6, 11, 119}, + dictWord{6, 11, 143}, + dictWord{6, 11, 440}, + dictWord{7, 11, 295}, + dictWord{ + 7, + 11, + 599, + }, + dictWord{7, 11, 1686}, + dictWord{7, 11, 1854}, + dictWord{8, 11, 424}, + dictWord{9, 11, 43}, + dictWord{9, 11, 584}, + dictWord{9, 11, 760}, + dictWord{ + 10, + 11, + 148, + }, + dictWord{10, 11, 328}, + dictWord{11, 11, 159}, + dictWord{11, 11, 253}, + dictWord{11, 11, 506}, + dictWord{12, 11, 487}, + dictWord{12, 11, 531}, + dictWord{144, 11, 33}, + dictWord{136, 10, 760}, + dictWord{5, 11, 14}, + dictWord{5, 11, 892}, + dictWord{6, 11, 283}, + dictWord{7, 11, 234}, + dictWord{136, 11, 537}, + dictWord{135, 11, 1251}, + dictWord{4, 11, 126}, + dictWord{8, 11, 635}, + dictWord{147, 11, 34}, + dictWord{4, 11, 316}, + dictWord{135, 11, 1561}, + dictWord{ + 6, + 0, + 999, + }, + dictWord{6, 0, 1310}, + dictWord{137, 11, 861}, + dictWord{4, 11, 64}, + dictWord{5, 11, 352}, + dictWord{5, 11, 720}, + dictWord{6, 11, 368}, + dictWord{ + 139, + 11, + 359, + }, + dictWord{4, 0, 75}, + dictWord{5, 0, 180}, + dictWord{6, 0, 500}, + dictWord{7, 0, 58}, + dictWord{7, 0, 710}, + dictWord{10, 0, 645}, + dictWord{136, 10, 770}, + dictWord{133, 0, 649}, + dictWord{6, 0, 276}, + dictWord{7, 0, 282}, + dictWord{7, 0, 879}, + dictWord{7, 0, 924}, + dictWord{8, 0, 459}, + dictWord{9, 0, 599}, + dictWord{9, 0, 754}, + dictWord{11, 0, 574}, + dictWord{12, 0, 128}, + dictWord{12, 0, 494}, + dictWord{13, 0, 52}, + dictWord{13, 0, 301}, + dictWord{15, 0, 30}, + dictWord{143, 0, 132}, + dictWord{132, 0, 200}, + dictWord{4, 10, 89}, + dictWord{5, 10, 489}, + dictWord{6, 10, 315}, + dictWord{7, 10, 553}, + dictWord{7, 10, 1745}, + dictWord{138, 10, 243}, + dictWord{135, 11, 1050}, + dictWord{7, 0, 1621}, + dictWord{6, 10, 1658}, + dictWord{9, 10, 3}, + dictWord{10, 10, 154}, + dictWord{11, 10, 641}, + dictWord{13, 10, 85}, + dictWord{13, 10, 201}, + dictWord{141, 10, 346}, + dictWord{6, 11, 175}, + dictWord{137, 11, 289}, + dictWord{5, 11, 432}, + dictWord{133, 11, 913}, + dictWord{ + 6, + 0, + 225, + }, + dictWord{137, 0, 211}, + dictWord{7, 0, 718}, + dictWord{8, 0, 687}, + dictWord{139, 0, 374}, + dictWord{4, 10, 166}, + dictWord{133, 10, 505}, + dictWord{ + 9, + 0, + 110, + }, + dictWord{134, 10, 1670}, + dictWord{8, 0, 58}, + dictWord{9, 0, 724}, + dictWord{11, 0, 809}, + dictWord{13, 0, 113}, + dictWord{145, 0, 72}, + dictWord{6, 0, 345}, + dictWord{7, 0, 1247}, + dictWord{144, 11, 82}, + dictWord{5, 11, 931}, + dictWord{134, 11, 1698}, + dictWord{8, 0, 767}, + dictWord{8, 0, 803}, + dictWord{9, 0, 301}, + dictWord{137, 0, 903}, + dictWord{139, 0, 203}, + dictWord{134, 0, 1154}, + dictWord{7, 0, 1949}, + dictWord{136, 0, 674}, + dictWord{134, 0, 259}, + dictWord{ + 135, + 0, + 1275, + }, + dictWord{5, 11, 774}, + dictWord{6, 11, 1637}, + dictWord{6, 11, 1686}, + dictWord{134, 11, 1751}, + dictWord{134, 0, 1231}, + dictWord{7, 10, 445}, + dictWord{8, 10, 307}, + dictWord{8, 10, 704}, + dictWord{10, 10, 41}, + dictWord{10, 10, 439}, + dictWord{11, 10, 237}, + dictWord{11, 10, 622}, + dictWord{140, 10, 201}, + dictWord{136, 0, 254}, + dictWord{6, 11, 260}, + dictWord{135, 11, 1484}, + dictWord{139, 0, 277}, + dictWord{135, 10, 1977}, + dictWord{4, 10, 189}, + dictWord{ + 5, + 10, + 713, + }, + dictWord{6, 11, 573}, + dictWord{136, 10, 57}, + dictWord{138, 10, 371}, + dictWord{132, 10, 552}, + dictWord{134, 11, 344}, + dictWord{133, 0, 248}, + dictWord{9, 0, 800}, + dictWord{10, 0, 693}, + dictWord{11, 0, 482}, + dictWord{11, 0, 734}, + dictWord{11, 0, 789}, + dictWord{134, 11, 240}, + dictWord{4, 0, 116}, + dictWord{ + 5, + 0, + 95, + }, + dictWord{5, 0, 445}, + dictWord{7, 0, 1688}, + dictWord{8, 0, 29}, + dictWord{9, 0, 272}, + dictWord{11, 0, 509}, + dictWord{11, 0, 915}, + dictWord{4, 11, 292}, + dictWord{4, 11, 736}, + dictWord{5, 11, 871}, + dictWord{6, 11, 171}, + dictWord{6, 11, 1689}, + dictWord{7, 11, 1324}, + dictWord{7, 11, 1944}, + dictWord{9, 11, 415}, + dictWord{9, 11, 580}, + dictWord{14, 11, 230}, + dictWord{146, 11, 68}, + dictWord{7, 0, 490}, + dictWord{13, 0, 100}, + dictWord{143, 0, 75}, + dictWord{135, 0, 1641}, + dictWord{133, 0, 543}, + dictWord{7, 11, 209}, + dictWord{8, 11, 661}, + dictWord{10, 11, 42}, + dictWord{11, 11, 58}, + dictWord{12, 11, 58}, + dictWord{12, 11, 118}, + dictWord{141, 11, 32}, + dictWord{5, 0, 181}, + dictWord{8, 0, 41}, + dictWord{6, 11, 63}, + dictWord{135, 11, 920}, + dictWord{133, 0, 657}, + dictWord{133, 11, 793}, + dictWord{138, 0, 709}, + dictWord{7, 0, 25}, + dictWord{8, 0, 202}, + dictWord{138, 0, 536}, + dictWord{5, 11, 665}, + dictWord{135, 10, 1788}, + dictWord{145, 10, 49}, + dictWord{9, 0, 423}, + dictWord{140, 0, 89}, + dictWord{5, 11, 67}, + dictWord{6, 11, 62}, + dictWord{6, 11, 374}, + dictWord{135, 11, 1391}, + dictWord{8, 0, 113}, + dictWord{ + 9, + 0, + 877, + }, + dictWord{10, 0, 554}, + dictWord{11, 0, 83}, + dictWord{12, 0, 136}, + dictWord{19, 0, 109}, + dictWord{9, 11, 790}, + dictWord{140, 11, 47}, + dictWord{ + 138, + 10, + 661, + }, + dictWord{4, 0, 963}, + dictWord{10, 0, 927}, + dictWord{14, 0, 442}, + dictWord{135, 10, 1945}, + dictWord{133, 0, 976}, + dictWord{132, 0, 206}, + dictWord{ + 4, + 11, + 391, + }, + dictWord{135, 11, 1169}, + dictWord{134, 0, 2002}, + dictWord{6, 0, 696}, + dictWord{134, 0, 1008}, + dictWord{134, 0, 1170}, + dictWord{132, 11, 271}, + dictWord{7, 0, 13}, + dictWord{8, 0, 226}, + dictWord{10, 0, 537}, + dictWord{11, 0, 570}, + dictWord{11, 0, 605}, + dictWord{11, 0, 799}, + dictWord{11, 0, 804}, + dictWord{ + 12, + 0, + 85, + }, + dictWord{12, 0, 516}, + dictWord{12, 0, 623}, + dictWord{13, 0, 112}, + dictWord{13, 0, 361}, + dictWord{14, 0, 77}, + dictWord{14, 0, 78}, + dictWord{17, 0, 28}, + dictWord{19, 0, 110}, + dictWord{140, 11, 314}, + dictWord{132, 0, 769}, + dictWord{134, 0, 1544}, + dictWord{4, 0, 551}, + dictWord{137, 0, 678}, + dictWord{5, 10, 84}, + dictWord{134, 10, 163}, + dictWord{9, 0, 57}, + dictWord{9, 0, 459}, + dictWord{10, 0, 425}, + dictWord{11, 0, 119}, + dictWord{12, 0, 184}, + dictWord{12, 0, 371}, + dictWord{ + 13, + 0, + 358, + }, + dictWord{145, 0, 51}, + dictWord{5, 0, 188}, + dictWord{5, 0, 814}, + dictWord{8, 0, 10}, + dictWord{9, 0, 421}, + dictWord{9, 0, 729}, + dictWord{10, 0, 609}, + dictWord{11, 0, 689}, + dictWord{4, 11, 253}, + dictWord{5, 10, 410}, + dictWord{5, 11, 544}, + dictWord{7, 11, 300}, + dictWord{137, 11, 340}, + dictWord{134, 0, 624}, + dictWord{138, 11, 321}, + dictWord{135, 0, 1941}, + dictWord{18, 0, 130}, + dictWord{5, 10, 322}, + dictWord{8, 10, 186}, + dictWord{9, 10, 262}, + dictWord{10, 10, 187}, + dictWord{142, 10, 208}, + dictWord{5, 11, 53}, + dictWord{5, 11, 541}, + dictWord{6, 11, 94}, + dictWord{6, 11, 499}, + dictWord{7, 11, 230}, + dictWord{139, 11, 321}, + dictWord{133, 10, 227}, + dictWord{4, 0, 378}, + dictWord{4, 11, 920}, + dictWord{5, 11, 25}, + dictWord{5, 11, 790}, + dictWord{6, 11, 457}, + dictWord{135, 11, 853}, + dictWord{137, 0, 269}, + dictWord{132, 0, 528}, + dictWord{134, 0, 1146}, + dictWord{7, 10, 1395}, + dictWord{8, 10, 486}, + dictWord{9, 10, 236}, + dictWord{9, 10, 878}, + dictWord{10, 10, 218}, + dictWord{11, 10, 95}, + dictWord{19, 10, 17}, + dictWord{147, 10, 31}, + dictWord{7, 10, 2043}, + dictWord{8, 10, 672}, + dictWord{ + 141, + 10, + 448, + }, + dictWord{134, 0, 1105}, + dictWord{134, 0, 1616}, + dictWord{134, 11, 1765}, + dictWord{140, 11, 163}, + dictWord{5, 10, 412}, + dictWord{133, 11, 822}, + dictWord{132, 11, 634}, + dictWord{6, 0, 656}, + dictWord{134, 11, 1730}, + dictWord{134, 0, 1940}, + dictWord{5, 0, 104}, + dictWord{6, 0, 173}, + dictWord{ + 135, + 0, + 1631, + }, + dictWord{136, 10, 562}, + dictWord{6, 11, 36}, + dictWord{7, 11, 658}, + dictWord{8, 11, 454}, + dictWord{147, 11, 86}, + dictWord{5, 0, 457}, + dictWord{ + 134, + 10, + 1771, + }, + dictWord{7, 0, 810}, + dictWord{8, 0, 138}, + dictWord{8, 0, 342}, + dictWord{9, 0, 84}, + dictWord{10, 0, 193}, + dictWord{11, 0, 883}, + dictWord{140, 0, 359}, + dictWord{9, 0, 620}, + dictWord{135, 10, 1190}, + dictWord{137, 10, 132}, + dictWord{7, 11, 975}, + dictWord{137, 11, 789}, + dictWord{6, 0, 95}, + dictWord{6, 0, 1934}, + dictWord{136, 0, 967}, + dictWord{141, 11, 335}, + dictWord{6, 0, 406}, + dictWord{10, 0, 409}, + dictWord{10, 0, 447}, + dictWord{11, 0, 44}, + dictWord{140, 0, 100}, + dictWord{4, 10, 317}, + dictWord{135, 10, 1279}, + dictWord{132, 0, 477}, + dictWord{134, 0, 1268}, + dictWord{6, 0, 1941}, + dictWord{8, 0, 944}, + dictWord{5, 10, 63}, + dictWord{133, 10, 509}, + dictWord{132, 0, 629}, + dictWord{132, 11, 104}, + dictWord{4, 0, 246}, + dictWord{133, 0, 375}, + dictWord{6, 0, 1636}, + dictWord{ + 132, + 10, + 288, + }, + dictWord{135, 11, 1614}, + dictWord{9, 0, 49}, + dictWord{10, 0, 774}, + dictWord{8, 10, 89}, + dictWord{8, 10, 620}, + dictWord{11, 10, 628}, + dictWord{ + 12, + 10, + 322, + }, + dictWord{143, 10, 124}, + dictWord{4, 0, 282}, + dictWord{7, 0, 1034}, + dictWord{11, 0, 398}, + dictWord{11, 0, 634}, + dictWord{12, 0, 1}, + dictWord{12, 0, 79}, + dictWord{12, 0, 544}, + dictWord{14, 0, 237}, + dictWord{17, 0, 10}, + dictWord{146, 0, 20}, + dictWord{132, 0, 824}, + dictWord{7, 11, 45}, + dictWord{9, 11, 542}, + dictWord{ + 9, + 11, + 566, + }, + dictWord{138, 11, 728}, + dictWord{5, 0, 118}, + dictWord{5, 0, 499}, + dictWord{6, 0, 476}, + dictWord{6, 0, 665}, + dictWord{6, 0, 1176}, + dictWord{ + 6, + 0, + 1196, + }, + dictWord{7, 0, 600}, + dictWord{7, 0, 888}, + dictWord{135, 0, 1096}, + dictWord{7, 0, 296}, + dictWord{7, 0, 596}, + dictWord{8, 0, 560}, + dictWord{8, 0, 586}, + dictWord{9, 0, 612}, + dictWord{11, 0, 304}, + dictWord{12, 0, 46}, + dictWord{13, 0, 89}, + dictWord{14, 0, 112}, + dictWord{145, 0, 122}, + dictWord{5, 0, 894}, + dictWord{ + 6, + 0, + 1772, + }, + dictWord{9, 0, 1009}, + dictWord{138, 10, 120}, + dictWord{5, 11, 533}, + dictWord{7, 11, 755}, + dictWord{138, 11, 780}, + dictWord{151, 10, 1}, + dictWord{ + 6, + 0, + 1474, + }, + dictWord{7, 11, 87}, + dictWord{142, 11, 288}, + dictWord{139, 0, 366}, + dictWord{137, 10, 461}, + dictWord{7, 11, 988}, + dictWord{7, 11, 1939}, + dictWord{ + 9, + 11, + 64, + }, + dictWord{9, 11, 502}, + dictWord{12, 11, 7}, + dictWord{12, 11, 34}, + dictWord{13, 11, 12}, + dictWord{13, 11, 234}, + dictWord{147, 11, 77}, + dictWord{ + 7, + 0, + 1599, + }, + dictWord{7, 0, 1723}, + dictWord{8, 0, 79}, + dictWord{8, 0, 106}, + dictWord{8, 0, 190}, + dictWord{8, 0, 302}, + dictWord{8, 0, 383}, + dictWord{8, 0, 713}, + dictWord{ + 9, + 0, + 119, + }, + dictWord{9, 0, 233}, + dictWord{9, 0, 419}, + dictWord{9, 0, 471}, + dictWord{10, 0, 181}, + dictWord{10, 0, 406}, + dictWord{11, 0, 57}, + dictWord{11, 0, 85}, + dictWord{11, 0, 120}, + dictWord{11, 0, 177}, + dictWord{11, 0, 296}, + dictWord{11, 0, 382}, + dictWord{11, 0, 454}, + dictWord{11, 0, 758}, + dictWord{11, 0, 999}, + dictWord{ + 12, + 0, + 27, + }, + dictWord{12, 0, 98}, + dictWord{12, 0, 131}, + dictWord{12, 0, 245}, + dictWord{12, 0, 312}, + dictWord{12, 0, 446}, + dictWord{12, 0, 454}, + dictWord{13, 0, 25}, + dictWord{13, 0, 98}, + dictWord{13, 0, 426}, + dictWord{13, 0, 508}, + dictWord{14, 0, 70}, + dictWord{14, 0, 163}, + dictWord{14, 0, 272}, + dictWord{14, 0, 277}, + dictWord{ + 14, + 0, + 370, + }, + dictWord{15, 0, 95}, + dictWord{15, 0, 138}, + dictWord{15, 0, 167}, + dictWord{17, 0, 38}, + dictWord{148, 0, 96}, + dictWord{135, 10, 1346}, + dictWord{ + 10, + 0, + 200, + }, + dictWord{19, 0, 2}, + dictWord{151, 0, 22}, + dictWord{135, 11, 141}, + dictWord{134, 10, 85}, + dictWord{134, 0, 1759}, + dictWord{138, 0, 372}, + dictWord{ + 145, + 0, + 16, + }, + dictWord{8, 0, 943}, + dictWord{132, 11, 619}, + dictWord{139, 11, 88}, + dictWord{5, 11, 246}, + dictWord{8, 11, 189}, + dictWord{9, 11, 355}, + dictWord{ + 9, + 11, + 512, + }, + dictWord{10, 11, 124}, + dictWord{10, 11, 453}, + dictWord{11, 11, 143}, + dictWord{11, 11, 416}, + dictWord{11, 11, 859}, + dictWord{141, 11, 341}, + dictWord{ + 5, + 0, + 258, + }, + dictWord{134, 0, 719}, + dictWord{6, 0, 1798}, + dictWord{6, 0, 1839}, + dictWord{8, 0, 900}, + dictWord{10, 0, 874}, + dictWord{10, 0, 886}, + dictWord{ + 12, + 0, + 698, + }, + dictWord{12, 0, 732}, + dictWord{12, 0, 770}, + dictWord{16, 0, 106}, + dictWord{18, 0, 163}, + dictWord{18, 0, 170}, + dictWord{18, 0, 171}, + dictWord{152, 0, 20}, + dictWord{9, 0, 707}, + dictWord{11, 0, 326}, + dictWord{11, 0, 339}, + dictWord{12, 0, 423}, + dictWord{12, 0, 502}, + dictWord{20, 0, 62}, + dictWord{9, 11, 707}, + dictWord{ + 11, + 11, + 326, + }, + dictWord{11, 11, 339}, + dictWord{12, 11, 423}, + dictWord{12, 11, 502}, + dictWord{148, 11, 62}, + dictWord{5, 0, 30}, + dictWord{7, 0, 495}, + dictWord{ + 8, + 0, + 134, + }, + dictWord{9, 0, 788}, + dictWord{140, 0, 438}, + dictWord{133, 11, 678}, + dictWord{5, 10, 279}, + dictWord{6, 10, 235}, + dictWord{7, 10, 468}, + dictWord{ + 8, + 10, + 446, + }, + dictWord{9, 10, 637}, + dictWord{10, 10, 717}, + dictWord{11, 10, 738}, + dictWord{140, 10, 514}, + dictWord{5, 11, 35}, + dictWord{6, 11, 287}, + dictWord{ + 7, + 11, + 862, + }, + dictWord{7, 11, 1886}, + dictWord{138, 11, 179}, + dictWord{7, 0, 1948}, + dictWord{7, 0, 2004}, + dictWord{132, 11, 517}, + dictWord{5, 10, 17}, + dictWord{ + 6, + 10, + 371, + }, + dictWord{137, 10, 528}, + dictWord{4, 0, 115}, + dictWord{5, 0, 669}, + dictWord{6, 0, 407}, + dictWord{8, 0, 311}, + dictWord{11, 0, 10}, + dictWord{141, 0, 5}, + dictWord{137, 0, 381}, + dictWord{5, 0, 50}, + dictWord{6, 0, 439}, + dictWord{7, 0, 780}, + dictWord{135, 0, 1040}, + dictWord{136, 11, 667}, + dictWord{11, 11, 403}, + dictWord{146, 11, 83}, + dictWord{5, 0, 1}, + dictWord{6, 0, 81}, + dictWord{138, 0, 520}, + dictWord{134, 0, 738}, + dictWord{5, 0, 482}, + dictWord{8, 0, 98}, + dictWord{9, 0, 172}, + dictWord{10, 0, 360}, + dictWord{10, 0, 700}, + dictWord{10, 0, 822}, + dictWord{11, 0, 302}, + dictWord{11, 0, 778}, + dictWord{12, 0, 50}, + dictWord{12, 0, 127}, + dictWord{ + 12, + 0, + 396, + }, + dictWord{13, 0, 62}, + dictWord{13, 0, 328}, + dictWord{14, 0, 122}, + dictWord{147, 0, 72}, + dictWord{9, 11, 157}, + dictWord{10, 11, 131}, + dictWord{ + 140, + 11, + 72, + }, + dictWord{135, 11, 714}, + dictWord{135, 11, 539}, + dictWord{5, 0, 2}, + dictWord{6, 0, 512}, + dictWord{7, 0, 797}, + dictWord{7, 0, 1494}, + dictWord{8, 0, 253}, + dictWord{8, 0, 589}, + dictWord{9, 0, 77}, + dictWord{10, 0, 1}, + dictWord{10, 0, 129}, + dictWord{10, 0, 225}, + dictWord{11, 0, 118}, + dictWord{11, 0, 226}, + dictWord{ + 11, + 0, + 251, + }, + dictWord{11, 0, 430}, + dictWord{11, 0, 701}, + dictWord{11, 0, 974}, + dictWord{11, 0, 982}, + dictWord{12, 0, 64}, + dictWord{12, 0, 260}, + dictWord{12, 0, 488}, + dictWord{140, 0, 690}, + dictWord{5, 11, 394}, + dictWord{7, 11, 367}, + dictWord{7, 11, 487}, + dictWord{7, 11, 857}, + dictWord{7, 11, 1713}, + dictWord{8, 11, 246}, + dictWord{9, 11, 537}, + dictWord{10, 11, 165}, + dictWord{12, 11, 219}, + dictWord{140, 11, 561}, + dictWord{136, 0, 557}, + dictWord{5, 10, 779}, + dictWord{5, 10, 807}, + dictWord{6, 10, 1655}, + dictWord{134, 10, 1676}, + dictWord{4, 10, 196}, + dictWord{5, 10, 558}, + dictWord{133, 10, 949}, + dictWord{11, 11, 827}, + dictWord{ + 12, + 11, + 56, + }, + dictWord{14, 11, 34}, + dictWord{143, 11, 148}, + dictWord{137, 0, 347}, + dictWord{133, 0, 572}, + dictWord{134, 0, 832}, + dictWord{4, 0, 12}, + dictWord{ + 7, + 0, + 504, + }, + dictWord{7, 0, 522}, + dictWord{7, 0, 809}, + dictWord{8, 0, 797}, + dictWord{141, 0, 88}, + dictWord{4, 10, 752}, + dictWord{133, 11, 449}, + dictWord{7, 11, 86}, + dictWord{8, 11, 103}, + dictWord{145, 11, 69}, + dictWord{7, 11, 2028}, + dictWord{138, 11, 641}, + dictWord{5, 0, 528}, + dictWord{6, 11, 1}, + dictWord{142, 11, 2}, + dictWord{134, 0, 861}, + dictWord{10, 0, 294}, + dictWord{4, 10, 227}, + dictWord{5, 10, 159}, + dictWord{5, 10, 409}, + dictWord{7, 10, 80}, + dictWord{10, 10, 479}, + dictWord{ + 12, + 10, + 418, + }, + dictWord{14, 10, 50}, + dictWord{14, 10, 249}, + dictWord{142, 10, 295}, + dictWord{7, 10, 1470}, + dictWord{8, 10, 66}, + dictWord{8, 10, 137}, + dictWord{ + 8, + 10, + 761, + }, + dictWord{9, 10, 638}, + dictWord{11, 10, 80}, + dictWord{11, 10, 212}, + dictWord{11, 10, 368}, + dictWord{11, 10, 418}, + dictWord{12, 10, 8}, + dictWord{ + 13, + 10, + 15, + }, + dictWord{16, 10, 61}, + dictWord{17, 10, 59}, + dictWord{19, 10, 28}, + dictWord{148, 10, 84}, + dictWord{20, 0, 109}, + dictWord{135, 11, 1148}, + dictWord{ + 6, + 11, + 277, + }, + dictWord{7, 11, 1274}, + dictWord{7, 11, 1386}, + dictWord{7, 11, 1392}, + dictWord{12, 11, 129}, + dictWord{146, 11, 87}, + dictWord{6, 11, 187}, + dictWord{7, 11, 39}, + dictWord{7, 11, 1203}, + dictWord{8, 11, 380}, + dictWord{8, 11, 542}, + dictWord{14, 11, 117}, + dictWord{149, 11, 28}, + dictWord{134, 0, 1187}, + dictWord{5, 0, 266}, + dictWord{9, 0, 290}, + dictWord{9, 0, 364}, + dictWord{10, 0, 293}, + dictWord{11, 0, 606}, + dictWord{142, 0, 45}, + dictWord{6, 11, 297}, + dictWord{ + 7, + 11, + 793, + }, + dictWord{139, 11, 938}, + dictWord{4, 0, 50}, + dictWord{6, 0, 594}, + dictWord{9, 0, 121}, + dictWord{10, 0, 49}, + dictWord{10, 0, 412}, + dictWord{139, 0, 834}, + dictWord{136, 0, 748}, + dictWord{7, 11, 464}, + dictWord{8, 11, 438}, + dictWord{11, 11, 105}, + dictWord{11, 11, 363}, + dictWord{12, 11, 231}, + dictWord{ + 14, + 11, + 386, + }, + dictWord{15, 11, 102}, + dictWord{148, 11, 75}, + dictWord{132, 0, 466}, + dictWord{13, 0, 399}, + dictWord{14, 0, 337}, + dictWord{6, 10, 38}, + dictWord{ + 7, + 10, + 1220, + }, + dictWord{8, 10, 185}, + dictWord{8, 10, 256}, + dictWord{9, 10, 22}, + dictWord{9, 10, 331}, + dictWord{10, 10, 738}, + dictWord{11, 10, 205}, + dictWord{ + 11, + 10, + 540, + }, + dictWord{11, 10, 746}, + dictWord{13, 10, 465}, + dictWord{142, 10, 194}, + dictWord{9, 0, 378}, + dictWord{141, 0, 162}, + dictWord{137, 0, 519}, + dictWord{ + 4, + 10, + 159, + }, + dictWord{6, 10, 115}, + dictWord{7, 10, 252}, + dictWord{7, 10, 257}, + dictWord{7, 10, 1928}, + dictWord{8, 10, 69}, + dictWord{9, 10, 384}, + dictWord{ + 10, + 10, + 91, + }, + dictWord{10, 10, 615}, + dictWord{12, 10, 375}, + dictWord{14, 10, 235}, + dictWord{18, 10, 117}, + dictWord{147, 10, 123}, + dictWord{5, 11, 604}, + dictWord{ + 5, + 10, + 911, + }, + dictWord{136, 10, 278}, + dictWord{132, 0, 667}, + dictWord{8, 0, 351}, + dictWord{9, 0, 322}, + dictWord{4, 10, 151}, + dictWord{135, 10, 1567}, + dictWord{134, 0, 902}, + dictWord{133, 10, 990}, + dictWord{12, 0, 180}, + dictWord{5, 10, 194}, + dictWord{7, 10, 1662}, + dictWord{137, 10, 90}, + dictWord{4, 0, 869}, + dictWord{134, 0, 1996}, + dictWord{134, 0, 813}, + dictWord{133, 10, 425}, + dictWord{137, 11, 761}, + dictWord{132, 0, 260}, + dictWord{133, 10, 971}, + dictWord{ + 5, + 11, + 20, + }, + dictWord{6, 11, 298}, + dictWord{7, 11, 659}, + dictWord{7, 11, 1366}, + dictWord{137, 11, 219}, + dictWord{4, 0, 39}, + dictWord{5, 0, 36}, + dictWord{ + 7, + 0, + 1843, + }, + dictWord{8, 0, 407}, + dictWord{11, 0, 144}, + dictWord{140, 0, 523}, + dictWord{4, 0, 510}, + dictWord{10, 0, 587}, + dictWord{139, 10, 752}, + dictWord{7, 0, 29}, + dictWord{7, 0, 66}, + dictWord{7, 0, 1980}, + dictWord{10, 0, 487}, + dictWord{138, 0, 809}, + dictWord{13, 0, 260}, + dictWord{14, 0, 82}, + dictWord{18, 0, 63}, + dictWord{ + 137, + 10, + 662, + }, + dictWord{5, 10, 72}, + dictWord{6, 10, 264}, + dictWord{7, 10, 21}, + dictWord{7, 10, 46}, + dictWord{7, 10, 2013}, + dictWord{8, 10, 215}, + dictWord{ + 8, + 10, + 513, + }, + dictWord{10, 10, 266}, + dictWord{139, 10, 22}, + dictWord{134, 0, 570}, + dictWord{6, 0, 565}, + dictWord{7, 0, 1667}, + dictWord{4, 11, 439}, + dictWord{ + 10, + 10, + 95, + }, + dictWord{11, 10, 603}, + dictWord{12, 11, 242}, + dictWord{13, 10, 443}, + dictWord{14, 10, 160}, + dictWord{143, 10, 4}, + dictWord{134, 0, 1464}, + dictWord{ + 134, + 10, + 431, + }, + dictWord{9, 0, 372}, + dictWord{15, 0, 2}, + dictWord{19, 0, 10}, + dictWord{19, 0, 18}, + dictWord{5, 10, 874}, + dictWord{6, 10, 1677}, + dictWord{143, 10, 0}, + dictWord{132, 0, 787}, + dictWord{6, 0, 380}, + dictWord{12, 0, 399}, + dictWord{21, 0, 19}, + dictWord{7, 10, 939}, + dictWord{7, 10, 1172}, + dictWord{7, 10, 1671}, + dictWord{9, 10, 540}, + dictWord{10, 10, 696}, + dictWord{11, 10, 265}, + dictWord{11, 10, 732}, + dictWord{11, 10, 928}, + dictWord{11, 10, 937}, + dictWord{ + 141, + 10, + 438, + }, + dictWord{137, 0, 200}, + dictWord{132, 11, 233}, + dictWord{132, 0, 516}, + dictWord{134, 11, 577}, + dictWord{132, 0, 844}, + dictWord{11, 0, 887}, + dictWord{14, 0, 365}, + dictWord{142, 0, 375}, + dictWord{132, 11, 482}, + dictWord{8, 0, 821}, + dictWord{140, 0, 44}, + dictWord{7, 0, 1655}, + dictWord{136, 0, 305}, + dictWord{5, 10, 682}, + dictWord{135, 10, 1887}, + dictWord{135, 11, 346}, + dictWord{132, 10, 696}, + dictWord{4, 0, 10}, + dictWord{7, 0, 917}, + dictWord{139, 0, 786}, + dictWord{5, 11, 795}, + dictWord{6, 11, 1741}, + dictWord{8, 11, 417}, + dictWord{137, 11, 782}, + dictWord{4, 0, 1016}, + dictWord{134, 0, 2031}, + dictWord{5, 0, 684}, + dictWord{4, 10, 726}, + dictWord{133, 10, 630}, + dictWord{6, 0, 1021}, + dictWord{134, 0, 1480}, + dictWord{8, 10, 802}, + dictWord{136, 10, 838}, + dictWord{ + 134, + 0, + 27, + }, + dictWord{134, 0, 395}, + dictWord{135, 11, 622}, + dictWord{7, 11, 625}, + dictWord{135, 11, 1750}, + dictWord{4, 11, 203}, + dictWord{135, 11, 1936}, + dictWord{6, 10, 118}, + dictWord{7, 10, 215}, + dictWord{7, 10, 1521}, + dictWord{140, 10, 11}, + dictWord{132, 0, 813}, + dictWord{136, 0, 511}, + dictWord{7, 10, 615}, + dictWord{138, 10, 251}, + dictWord{135, 10, 1044}, + dictWord{145, 0, 56}, + dictWord{133, 10, 225}, + dictWord{6, 0, 342}, + dictWord{6, 0, 496}, + dictWord{8, 0, 275}, + dictWord{137, 0, 206}, + dictWord{4, 0, 909}, + dictWord{133, 0, 940}, + dictWord{132, 0, 891}, + dictWord{7, 11, 311}, + dictWord{9, 11, 308}, + dictWord{ + 140, + 11, + 255, + }, + dictWord{4, 10, 370}, + dictWord{5, 10, 756}, + dictWord{135, 10, 1326}, + dictWord{4, 0, 687}, + dictWord{134, 0, 1596}, + dictWord{134, 0, 1342}, + dictWord{ + 6, + 10, + 1662, + }, + dictWord{7, 10, 48}, + dictWord{8, 10, 771}, + dictWord{10, 10, 116}, + dictWord{13, 10, 104}, + dictWord{14, 10, 105}, + dictWord{14, 10, 184}, + dictWord{15, 10, 168}, + dictWord{19, 10, 92}, + dictWord{148, 10, 68}, + dictWord{138, 10, 209}, + dictWord{4, 11, 400}, + dictWord{5, 11, 267}, + dictWord{135, 11, 232}, + dictWord{151, 11, 12}, + dictWord{6, 0, 41}, + dictWord{141, 0, 160}, + dictWord{141, 11, 314}, + dictWord{134, 0, 1718}, + dictWord{136, 0, 778}, + dictWord{ + 142, + 11, + 261, + }, + dictWord{134, 0, 1610}, + dictWord{133, 0, 115}, + dictWord{132, 0, 294}, + dictWord{14, 0, 314}, + dictWord{132, 10, 120}, + dictWord{132, 0, 983}, + dictWord{5, 0, 193}, + dictWord{140, 0, 178}, + dictWord{138, 10, 429}, + dictWord{5, 10, 820}, + dictWord{135, 10, 931}, + dictWord{6, 0, 994}, + dictWord{6, 0, 1051}, + dictWord{6, 0, 1439}, + dictWord{7, 0, 174}, + dictWord{133, 11, 732}, + dictWord{4, 11, 100}, + dictWord{7, 11, 679}, + dictWord{8, 11, 313}, + dictWord{138, 10, 199}, + dictWord{6, 10, 151}, + dictWord{6, 10, 1675}, + dictWord{7, 10, 383}, + dictWord{151, 10, 10}, + dictWord{6, 0, 1796}, + dictWord{8, 0, 848}, + dictWord{8, 0, 867}, + dictWord{ + 8, + 0, + 907, + }, + dictWord{10, 0, 855}, + dictWord{140, 0, 703}, + dictWord{140, 0, 221}, + dictWord{4, 0, 122}, + dictWord{5, 0, 796}, + dictWord{5, 0, 952}, + dictWord{6, 0, 1660}, + dictWord{6, 0, 1671}, + dictWord{8, 0, 567}, + dictWord{9, 0, 687}, + dictWord{9, 0, 742}, + dictWord{10, 0, 686}, + dictWord{11, 0, 682}, + dictWord{11, 0, 909}, + dictWord{ + 140, + 0, + 281, + }, + dictWord{5, 11, 362}, + dictWord{5, 11, 443}, + dictWord{6, 11, 318}, + dictWord{7, 11, 1019}, + dictWord{139, 11, 623}, + dictWord{5, 11, 463}, + dictWord{136, 11, 296}, + dictWord{11, 0, 583}, + dictWord{13, 0, 262}, + dictWord{6, 10, 1624}, + dictWord{12, 10, 422}, + dictWord{142, 10, 360}, + dictWord{5, 0, 179}, + dictWord{7, 0, 1095}, + dictWord{135, 0, 1213}, + dictWord{4, 10, 43}, + dictWord{4, 11, 454}, + dictWord{5, 10, 344}, + dictWord{133, 10, 357}, + dictWord{4, 0, 66}, + dictWord{7, 0, 722}, + dictWord{135, 0, 904}, + dictWord{134, 0, 773}, + dictWord{7, 0, 352}, + dictWord{133, 10, 888}, + dictWord{5, 11, 48}, + dictWord{5, 11, 404}, + dictWord{ + 6, + 11, + 557, + }, + dictWord{7, 11, 458}, + dictWord{8, 11, 597}, + dictWord{10, 11, 455}, + dictWord{10, 11, 606}, + dictWord{11, 11, 49}, + dictWord{11, 11, 548}, + dictWord{ + 12, + 11, + 476, + }, + dictWord{13, 11, 18}, + dictWord{141, 11, 450}, + dictWord{134, 11, 418}, + dictWord{132, 10, 711}, + dictWord{5, 11, 442}, + dictWord{ + 135, + 11, + 1984, + }, + dictWord{141, 0, 35}, + dictWord{137, 0, 152}, + dictWord{134, 0, 1197}, + dictWord{135, 11, 1093}, + dictWord{137, 11, 203}, + dictWord{137, 10, 440}, + dictWord{10, 0, 592}, + dictWord{10, 0, 753}, + dictWord{12, 0, 317}, + dictWord{12, 0, 355}, + dictWord{12, 0, 465}, + dictWord{12, 0, 469}, + dictWord{12, 0, 560}, + dictWord{12, 0, 578}, + dictWord{141, 0, 243}, + dictWord{133, 0, 564}, + dictWord{134, 0, 797}, + dictWord{5, 10, 958}, + dictWord{133, 10, 987}, + dictWord{5, 11, 55}, + dictWord{7, 11, 376}, + dictWord{140, 11, 161}, + dictWord{133, 11, 450}, + dictWord{134, 0, 556}, + dictWord{134, 0, 819}, + dictWord{11, 10, 276}, + dictWord{ + 142, + 10, + 293, + }, + dictWord{7, 0, 544}, + dictWord{138, 0, 61}, + dictWord{8, 0, 719}, + dictWord{4, 10, 65}, + dictWord{5, 10, 479}, + dictWord{5, 10, 1004}, + dictWord{7, 10, 1913}, + dictWord{8, 10, 317}, + dictWord{9, 10, 302}, + dictWord{10, 10, 612}, + dictWord{141, 10, 22}, + dictWord{4, 0, 5}, + dictWord{5, 0, 498}, + dictWord{8, 0, 637}, + dictWord{ + 9, + 0, + 521, + }, + dictWord{4, 11, 213}, + dictWord{4, 10, 261}, + dictWord{7, 11, 223}, + dictWord{7, 10, 510}, + dictWord{136, 11, 80}, + dictWord{5, 0, 927}, + dictWord{7, 0, 101}, + dictWord{4, 10, 291}, + dictWord{7, 11, 381}, + dictWord{7, 11, 806}, + dictWord{7, 11, 820}, + dictWord{8, 11, 354}, + dictWord{8, 11, 437}, + dictWord{8, 11, 787}, + dictWord{9, 10, 515}, + dictWord{9, 11, 657}, + dictWord{10, 11, 58}, + dictWord{10, 11, 339}, + dictWord{10, 11, 749}, + dictWord{11, 11, 914}, + dictWord{12, 10, 152}, + dictWord{12, 11, 162}, + dictWord{12, 10, 443}, + dictWord{13, 11, 75}, + dictWord{13, 10, 392}, + dictWord{14, 11, 106}, + dictWord{14, 11, 198}, + dictWord{ + 14, + 11, + 320, + }, + dictWord{14, 10, 357}, + dictWord{14, 11, 413}, + dictWord{146, 11, 43}, + dictWord{6, 0, 1153}, + dictWord{7, 0, 1441}, + dictWord{136, 11, 747}, + dictWord{ + 4, + 0, + 893, + }, + dictWord{5, 0, 780}, + dictWord{133, 0, 893}, + dictWord{138, 11, 654}, + dictWord{133, 11, 692}, + dictWord{133, 0, 238}, + dictWord{134, 11, 191}, + dictWord{4, 10, 130}, + dictWord{135, 10, 843}, + dictWord{6, 0, 1296}, + dictWord{5, 10, 42}, + dictWord{5, 10, 879}, + dictWord{7, 10, 245}, + dictWord{7, 10, 324}, + dictWord{ + 7, + 10, + 1532, + }, + dictWord{11, 10, 463}, + dictWord{11, 10, 472}, + dictWord{13, 10, 363}, + dictWord{144, 10, 52}, + dictWord{134, 0, 1729}, + dictWord{6, 0, 1999}, + dictWord{136, 0, 969}, + dictWord{4, 10, 134}, + dictWord{133, 10, 372}, + dictWord{4, 0, 60}, + dictWord{7, 0, 941}, + dictWord{7, 0, 1800}, + dictWord{8, 0, 314}, + dictWord{ + 9, + 0, + 700, + }, + dictWord{139, 0, 487}, + dictWord{134, 0, 1144}, + dictWord{6, 11, 162}, + dictWord{7, 11, 1960}, + dictWord{136, 11, 831}, + dictWord{132, 11, 706}, + dictWord{135, 0, 1147}, + dictWord{138, 11, 426}, + dictWord{138, 11, 89}, + dictWord{7, 0, 1853}, + dictWord{138, 0, 437}, + dictWord{136, 0, 419}, + dictWord{ + 135, + 10, + 1634, + }, + dictWord{133, 0, 828}, + dictWord{5, 0, 806}, + dictWord{7, 0, 176}, + dictWord{7, 0, 178}, + dictWord{7, 0, 1240}, + dictWord{7, 0, 1976}, + dictWord{ + 132, + 10, + 644, + }, + dictWord{135, 11, 1877}, + dictWord{5, 11, 420}, + dictWord{135, 11, 1449}, + dictWord{4, 0, 51}, + dictWord{5, 0, 39}, + dictWord{6, 0, 4}, + dictWord{7, 0, 591}, + dictWord{7, 0, 849}, + dictWord{7, 0, 951}, + dictWord{7, 0, 1613}, + dictWord{7, 0, 1760}, + dictWord{7, 0, 1988}, + dictWord{9, 0, 434}, + dictWord{10, 0, 754}, + dictWord{ + 11, + 0, + 25, + }, + dictWord{139, 0, 37}, + dictWord{10, 11, 57}, + dictWord{138, 11, 277}, + dictWord{135, 10, 540}, + dictWord{132, 11, 204}, + dictWord{135, 0, 159}, + dictWord{139, 11, 231}, + dictWord{133, 0, 902}, + dictWord{7, 0, 928}, + dictWord{7, 11, 366}, + dictWord{9, 11, 287}, + dictWord{12, 11, 199}, + dictWord{12, 11, 556}, + dictWord{140, 11, 577}, + dictWord{6, 10, 623}, + dictWord{136, 10, 789}, + dictWord{4, 10, 908}, + dictWord{5, 10, 359}, + dictWord{5, 10, 508}, + dictWord{6, 10, 1723}, + dictWord{7, 10, 343}, + dictWord{7, 10, 1996}, + dictWord{135, 10, 2026}, + dictWord{134, 0, 270}, + dictWord{4, 10, 341}, + dictWord{135, 10, 480}, + dictWord{ + 5, + 11, + 356, + }, + dictWord{135, 11, 224}, + dictWord{11, 11, 588}, + dictWord{11, 11, 864}, + dictWord{11, 11, 968}, + dictWord{143, 11, 160}, + dictWord{132, 0, 556}, + dictWord{137, 0, 801}, + dictWord{132, 0, 416}, + dictWord{142, 0, 372}, + dictWord{5, 0, 152}, + dictWord{5, 0, 197}, + dictWord{7, 0, 340}, + dictWord{7, 0, 867}, + dictWord{ + 10, + 0, + 548, + }, + dictWord{10, 0, 581}, + dictWord{11, 0, 6}, + dictWord{12, 0, 3}, + dictWord{12, 0, 19}, + dictWord{14, 0, 110}, + dictWord{142, 0, 289}, + dictWord{139, 0, 369}, + dictWord{7, 11, 630}, + dictWord{9, 11, 567}, + dictWord{11, 11, 150}, + dictWord{11, 11, 444}, + dictWord{141, 11, 119}, + dictWord{134, 11, 539}, + dictWord{ + 7, + 10, + 1995, + }, + dictWord{8, 10, 299}, + dictWord{11, 10, 890}, + dictWord{140, 10, 674}, + dictWord{7, 0, 34}, + dictWord{7, 0, 190}, + dictWord{8, 0, 28}, + dictWord{8, 0, 141}, + dictWord{8, 0, 444}, + dictWord{8, 0, 811}, + dictWord{9, 0, 468}, + dictWord{11, 0, 334}, + dictWord{12, 0, 24}, + dictWord{12, 0, 386}, + dictWord{140, 0, 576}, + dictWord{ + 133, + 0, + 757, + }, + dictWord{7, 0, 1553}, + dictWord{136, 0, 898}, + dictWord{133, 0, 721}, + dictWord{136, 0, 1012}, + dictWord{4, 0, 789}, + dictWord{5, 0, 647}, + dictWord{ + 135, + 0, + 1102, + }, + dictWord{132, 0, 898}, + dictWord{10, 0, 183}, + dictWord{4, 10, 238}, + dictWord{5, 10, 503}, + dictWord{6, 10, 179}, + dictWord{7, 10, 2003}, + dictWord{ + 8, + 10, + 381, + }, + dictWord{8, 10, 473}, + dictWord{9, 10, 149}, + dictWord{10, 10, 788}, + dictWord{15, 10, 45}, + dictWord{15, 10, 86}, + dictWord{20, 10, 110}, + dictWord{ + 150, + 10, + 57, + }, + dictWord{9, 0, 136}, + dictWord{19, 0, 107}, + dictWord{4, 10, 121}, + dictWord{5, 10, 156}, + dictWord{5, 10, 349}, + dictWord{10, 10, 605}, + dictWord{ + 142, + 10, + 342, + }, + dictWord{4, 11, 235}, + dictWord{135, 11, 255}, + dictWord{4, 11, 194}, + dictWord{5, 11, 584}, + dictWord{6, 11, 384}, + dictWord{7, 11, 583}, + dictWord{ + 10, + 11, + 761, + }, + dictWord{11, 11, 760}, + dictWord{139, 11, 851}, + dictWord{6, 10, 80}, + dictWord{6, 10, 1694}, + dictWord{7, 10, 173}, + dictWord{7, 10, 1974}, + dictWord{ + 9, + 10, + 547, + }, + dictWord{10, 10, 730}, + dictWord{14, 10, 18}, + dictWord{150, 10, 39}, + dictWord{4, 10, 923}, + dictWord{134, 10, 1711}, + dictWord{5, 0, 277}, + dictWord{141, 0, 247}, + dictWord{132, 0, 435}, + dictWord{133, 11, 562}, + dictWord{134, 0, 1311}, + dictWord{5, 11, 191}, + dictWord{137, 11, 271}, + dictWord{ + 132, + 10, + 595, + }, + dictWord{7, 11, 1537}, + dictWord{14, 11, 96}, + dictWord{143, 11, 73}, + dictWord{5, 0, 437}, + dictWord{7, 0, 502}, + dictWord{7, 0, 519}, + dictWord{7, 0, 1122}, + dictWord{7, 0, 1751}, + dictWord{14, 0, 211}, + dictWord{6, 10, 459}, + dictWord{7, 10, 1753}, + dictWord{7, 10, 1805}, + dictWord{8, 10, 658}, + dictWord{9, 10, 1}, + dictWord{11, 10, 959}, + dictWord{141, 10, 446}, + dictWord{6, 0, 814}, + dictWord{4, 11, 470}, + dictWord{5, 11, 473}, + dictWord{6, 11, 153}, + dictWord{7, 11, 1503}, + dictWord{7, 11, 1923}, + dictWord{10, 11, 701}, + dictWord{11, 11, 132}, + dictWord{11, 11, 168}, + dictWord{11, 11, 227}, + dictWord{11, 11, 320}, + dictWord{ + 11, + 11, + 436, + }, + dictWord{11, 11, 525}, + dictWord{11, 11, 855}, + dictWord{12, 11, 41}, + dictWord{12, 11, 286}, + dictWord{13, 11, 103}, + dictWord{13, 11, 284}, + dictWord{ + 14, + 11, + 255, + }, + dictWord{14, 11, 262}, + dictWord{15, 11, 117}, + dictWord{143, 11, 127}, + dictWord{5, 0, 265}, + dictWord{6, 0, 212}, + dictWord{135, 0, 28}, + dictWord{ + 138, + 0, + 750, + }, + dictWord{133, 11, 327}, + dictWord{6, 11, 552}, + dictWord{7, 11, 1754}, + dictWord{137, 11, 604}, + dictWord{134, 0, 2012}, + dictWord{132, 0, 702}, + dictWord{5, 11, 80}, + dictWord{6, 11, 405}, + dictWord{7, 11, 403}, + dictWord{7, 11, 1502}, + dictWord{7, 11, 1626}, + dictWord{8, 11, 456}, + dictWord{9, 11, 487}, + dictWord{9, 11, 853}, + dictWord{9, 11, 889}, + dictWord{10, 11, 309}, + dictWord{11, 11, 721}, + dictWord{11, 11, 994}, + dictWord{12, 11, 430}, + dictWord{ + 141, + 11, + 165, + }, + dictWord{5, 0, 808}, + dictWord{135, 0, 2045}, + dictWord{5, 0, 166}, + dictWord{8, 0, 739}, + dictWord{140, 0, 511}, + dictWord{134, 10, 490}, + dictWord{ + 4, + 11, + 453, + }, + dictWord{5, 11, 887}, + dictWord{6, 11, 535}, + dictWord{8, 11, 6}, + dictWord{136, 11, 543}, + dictWord{4, 0, 119}, + dictWord{5, 0, 170}, + dictWord{5, 0, 447}, + dictWord{7, 0, 1708}, + dictWord{7, 0, 1889}, + dictWord{9, 0, 357}, + dictWord{9, 0, 719}, + dictWord{12, 0, 486}, + dictWord{140, 0, 596}, + dictWord{137, 0, 500}, + dictWord{ + 7, + 10, + 250, + }, + dictWord{136, 10, 507}, + dictWord{132, 10, 158}, + dictWord{6, 0, 809}, + dictWord{134, 0, 1500}, + dictWord{9, 0, 327}, + dictWord{11, 0, 350}, + dictWord{11, 0, 831}, + dictWord{13, 0, 352}, + dictWord{4, 10, 140}, + dictWord{7, 10, 362}, + dictWord{8, 10, 209}, + dictWord{9, 10, 10}, + dictWord{9, 10, 503}, + dictWord{ + 9, + 10, + 614, + }, + dictWord{10, 10, 689}, + dictWord{11, 10, 327}, + dictWord{11, 10, 725}, + dictWord{12, 10, 252}, + dictWord{12, 10, 583}, + dictWord{13, 10, 192}, + dictWord{14, 10, 269}, + dictWord{14, 10, 356}, + dictWord{148, 10, 50}, + dictWord{135, 11, 741}, + dictWord{4, 0, 450}, + dictWord{7, 0, 1158}, + dictWord{19, 10, 1}, + dictWord{19, 10, 26}, + dictWord{150, 10, 9}, + dictWord{6, 0, 597}, + dictWord{135, 0, 1318}, + dictWord{134, 0, 1602}, + dictWord{6, 10, 228}, + dictWord{7, 10, 1341}, + dictWord{9, 10, 408}, + dictWord{138, 10, 343}, + dictWord{7, 0, 1375}, + dictWord{7, 0, 1466}, + dictWord{138, 0, 331}, + dictWord{132, 0, 754}, + dictWord{ + 132, + 10, + 557, + }, + dictWord{5, 11, 101}, + dictWord{6, 11, 88}, + dictWord{6, 11, 543}, + dictWord{7, 11, 1677}, + dictWord{9, 11, 100}, + dictWord{10, 11, 677}, + dictWord{ + 14, + 11, + 169, + }, + dictWord{14, 11, 302}, + dictWord{14, 11, 313}, + dictWord{15, 11, 48}, + dictWord{143, 11, 84}, + dictWord{134, 0, 1368}, + dictWord{4, 11, 310}, + dictWord{ + 9, + 11, + 795, + }, + dictWord{10, 11, 733}, + dictWord{11, 11, 451}, + dictWord{12, 11, 249}, + dictWord{14, 11, 115}, + dictWord{14, 11, 286}, + dictWord{143, 11, 100}, + dictWord{132, 10, 548}, + dictWord{10, 0, 557}, + dictWord{7, 10, 197}, + dictWord{8, 10, 142}, + dictWord{8, 10, 325}, + dictWord{9, 10, 150}, + dictWord{9, 10, 596}, + dictWord{10, 10, 353}, + dictWord{11, 10, 74}, + dictWord{11, 10, 315}, + dictWord{12, 10, 662}, + dictWord{12, 10, 681}, + dictWord{14, 10, 423}, + dictWord{ + 143, + 10, + 141, + }, + dictWord{133, 11, 587}, + dictWord{5, 0, 850}, + dictWord{136, 0, 799}, + dictWord{10, 0, 908}, + dictWord{12, 0, 701}, + dictWord{12, 0, 757}, + dictWord{ + 142, + 0, + 466, + }, + dictWord{4, 0, 62}, + dictWord{5, 0, 275}, + dictWord{18, 0, 19}, + dictWord{6, 10, 399}, + dictWord{6, 10, 579}, + dictWord{7, 10, 692}, + dictWord{7, 10, 846}, + dictWord{ + 7, + 10, + 1015, + }, + dictWord{7, 10, 1799}, + dictWord{8, 10, 403}, + dictWord{9, 10, 394}, + dictWord{10, 10, 133}, + dictWord{12, 10, 4}, + dictWord{12, 10, 297}, + dictWord{12, 10, 452}, + dictWord{16, 10, 81}, + dictWord{18, 10, 25}, + dictWord{21, 10, 14}, + dictWord{22, 10, 12}, + dictWord{151, 10, 18}, + dictWord{12, 0, 459}, + dictWord{ + 7, + 10, + 1546, + }, + dictWord{11, 10, 299}, + dictWord{142, 10, 407}, + dictWord{132, 10, 177}, + dictWord{132, 11, 498}, + dictWord{7, 11, 217}, + dictWord{ + 8, + 11, + 140, + }, + dictWord{138, 11, 610}, + dictWord{5, 10, 411}, + dictWord{135, 10, 653}, + dictWord{134, 0, 1802}, + dictWord{7, 10, 439}, + dictWord{10, 10, 727}, + dictWord{11, 10, 260}, + dictWord{139, 10, 684}, + dictWord{133, 11, 905}, + dictWord{11, 11, 580}, + dictWord{142, 11, 201}, + dictWord{134, 0, 1397}, + dictWord{ + 5, + 10, + 208, + }, + dictWord{7, 10, 753}, + dictWord{135, 10, 1528}, + dictWord{7, 0, 238}, + dictWord{7, 0, 2033}, + dictWord{8, 0, 120}, + dictWord{8, 0, 188}, + dictWord{8, 0, 659}, + dictWord{9, 0, 598}, + dictWord{10, 0, 466}, + dictWord{12, 0, 342}, + dictWord{12, 0, 588}, + dictWord{13, 0, 503}, + dictWord{14, 0, 246}, + dictWord{143, 0, 92}, + dictWord{135, 11, 1041}, + dictWord{4, 11, 456}, + dictWord{7, 11, 105}, + dictWord{7, 11, 358}, + dictWord{7, 11, 1637}, + dictWord{8, 11, 643}, + dictWord{139, 11, 483}, + dictWord{6, 0, 1318}, + dictWord{134, 0, 1324}, + dictWord{4, 0, 201}, + dictWord{7, 0, 1744}, + dictWord{8, 0, 602}, + dictWord{11, 0, 247}, + dictWord{11, 0, 826}, + dictWord{17, 0, 65}, + dictWord{133, 10, 242}, + dictWord{8, 0, 164}, + dictWord{146, 0, 62}, + dictWord{133, 10, 953}, + dictWord{139, 10, 802}, + dictWord{133, 0, 615}, + dictWord{7, 11, 1566}, + dictWord{8, 11, 269}, + dictWord{9, 11, 212}, + dictWord{9, 11, 718}, + dictWord{14, 11, 15}, + dictWord{14, 11, 132}, + dictWord{142, 11, 227}, + dictWord{133, 10, 290}, + dictWord{132, 10, 380}, + dictWord{5, 10, 52}, + dictWord{7, 10, 277}, + dictWord{9, 10, 368}, + dictWord{139, 10, 791}, + dictWord{ + 135, + 0, + 1243, + }, + dictWord{133, 11, 539}, + dictWord{11, 11, 919}, + dictWord{141, 11, 409}, + dictWord{136, 0, 968}, + dictWord{133, 11, 470}, + dictWord{134, 0, 882}, + dictWord{132, 0, 907}, + dictWord{5, 0, 100}, + dictWord{10, 0, 329}, + dictWord{12, 0, 416}, + dictWord{149, 0, 29}, + dictWord{10, 10, 138}, + dictWord{139, 10, 476}, + dictWord{5, 10, 725}, + dictWord{5, 10, 727}, + dictWord{6, 11, 91}, + dictWord{7, 11, 435}, + dictWord{135, 10, 1811}, + dictWord{4, 11, 16}, + dictWord{5, 11, 316}, + dictWord{5, 11, 842}, + dictWord{6, 11, 370}, + dictWord{6, 11, 1778}, + dictWord{8, 11, 166}, + dictWord{11, 11, 812}, + dictWord{12, 11, 206}, + dictWord{12, 11, 351}, + dictWord{14, 11, 418}, + dictWord{16, 11, 15}, + dictWord{16, 11, 34}, + dictWord{18, 11, 3}, + dictWord{19, 11, 3}, + dictWord{19, 11, 7}, + dictWord{20, 11, 4}, + dictWord{ + 149, + 11, + 21, + }, + dictWord{132, 0, 176}, + dictWord{5, 0, 636}, + dictWord{5, 0, 998}, + dictWord{7, 0, 9}, + dictWord{7, 0, 1508}, + dictWord{8, 0, 26}, + dictWord{9, 0, 317}, + dictWord{ + 9, + 0, + 358, + }, + dictWord{10, 0, 210}, + dictWord{10, 0, 292}, + dictWord{10, 0, 533}, + dictWord{11, 0, 555}, + dictWord{12, 0, 526}, + dictWord{12, 0, 607}, + dictWord{ + 13, + 0, + 263, + }, + dictWord{13, 0, 459}, + dictWord{142, 0, 271}, + dictWord{6, 0, 256}, + dictWord{8, 0, 265}, + dictWord{4, 10, 38}, + dictWord{7, 10, 307}, + dictWord{7, 10, 999}, + dictWord{7, 10, 1481}, + dictWord{7, 10, 1732}, + dictWord{7, 10, 1738}, + dictWord{9, 10, 414}, + dictWord{11, 10, 316}, + dictWord{12, 10, 52}, + dictWord{13, 10, 420}, + dictWord{147, 10, 100}, + dictWord{135, 10, 1296}, + dictWord{4, 11, 611}, + dictWord{133, 11, 606}, + dictWord{4, 0, 643}, + dictWord{142, 11, 21}, + dictWord{ + 133, + 11, + 715, + }, + dictWord{133, 10, 723}, + dictWord{6, 0, 610}, + dictWord{135, 11, 597}, + dictWord{10, 0, 127}, + dictWord{141, 0, 27}, + dictWord{6, 0, 1995}, + dictWord{ + 6, + 0, + 2001, + }, + dictWord{8, 0, 119}, + dictWord{136, 0, 973}, + dictWord{4, 11, 149}, + dictWord{138, 11, 368}, + dictWord{12, 0, 522}, + dictWord{4, 11, 154}, + dictWord{ + 5, + 10, + 109, + }, + dictWord{6, 10, 1784}, + dictWord{7, 11, 1134}, + dictWord{7, 10, 1895}, + dictWord{8, 11, 105}, + dictWord{12, 10, 296}, + dictWord{140, 10, 302}, + dictWord{4, 11, 31}, + dictWord{6, 11, 429}, + dictWord{7, 11, 962}, + dictWord{9, 11, 458}, + dictWord{139, 11, 691}, + dictWord{10, 0, 553}, + dictWord{11, 0, 876}, + dictWord{13, 0, 193}, + dictWord{13, 0, 423}, + dictWord{14, 0, 166}, + dictWord{19, 0, 84}, + dictWord{4, 11, 312}, + dictWord{5, 10, 216}, + dictWord{7, 10, 1879}, + dictWord{ + 9, + 10, + 141, + }, + dictWord{9, 10, 270}, + dictWord{9, 10, 679}, + dictWord{10, 10, 159}, + dictWord{11, 10, 197}, + dictWord{12, 10, 538}, + dictWord{12, 10, 559}, + dictWord{14, 10, 144}, + dictWord{14, 10, 167}, + dictWord{143, 10, 67}, + dictWord{134, 0, 1582}, + dictWord{7, 0, 1578}, + dictWord{135, 11, 1578}, + dictWord{ + 137, + 10, + 81, + }, + dictWord{132, 11, 236}, + dictWord{134, 10, 391}, + dictWord{134, 0, 795}, + dictWord{7, 10, 322}, + dictWord{136, 10, 249}, + dictWord{5, 11, 836}, + dictWord{ + 5, + 11, + 857, + }, + dictWord{6, 11, 1680}, + dictWord{7, 11, 59}, + dictWord{147, 11, 53}, + dictWord{135, 0, 432}, + dictWord{10, 11, 68}, + dictWord{139, 11, 494}, + dictWord{4, 11, 81}, + dictWord{139, 11, 867}, + dictWord{7, 0, 126}, + dictWord{136, 0, 84}, + dictWord{142, 11, 280}, + dictWord{5, 11, 282}, + dictWord{8, 11, 650}, + dictWord{ + 9, + 11, + 295, + }, + dictWord{9, 11, 907}, + dictWord{138, 11, 443}, + dictWord{136, 0, 790}, + dictWord{5, 10, 632}, + dictWord{138, 10, 526}, + dictWord{6, 0, 64}, + dictWord{12, 0, 377}, + dictWord{13, 0, 309}, + dictWord{14, 0, 141}, + dictWord{14, 0, 429}, + dictWord{14, 11, 141}, + dictWord{142, 11, 429}, + dictWord{134, 0, 1529}, + dictWord{6, 0, 321}, + dictWord{7, 0, 1857}, + dictWord{9, 0, 530}, + dictWord{19, 0, 99}, + dictWord{7, 10, 948}, + dictWord{7, 10, 1042}, + dictWord{8, 10, 235}, + dictWord{ + 8, + 10, + 461, + }, + dictWord{9, 10, 453}, + dictWord{10, 10, 354}, + dictWord{145, 10, 77}, + dictWord{7, 0, 1104}, + dictWord{11, 0, 269}, + dictWord{11, 0, 539}, + dictWord{ + 11, + 0, + 627, + }, + dictWord{11, 0, 706}, + dictWord{11, 0, 975}, + dictWord{12, 0, 248}, + dictWord{12, 0, 434}, + dictWord{12, 0, 600}, + dictWord{12, 0, 622}, + dictWord{ + 13, + 0, + 297, + }, + dictWord{13, 0, 485}, + dictWord{14, 0, 69}, + dictWord{14, 0, 409}, + dictWord{143, 0, 108}, + dictWord{4, 10, 362}, + dictWord{7, 10, 52}, + dictWord{7, 10, 303}, + dictWord{10, 11, 70}, + dictWord{12, 11, 26}, + dictWord{14, 11, 17}, + dictWord{14, 11, 178}, + dictWord{15, 11, 34}, + dictWord{149, 11, 12}, + dictWord{11, 0, 977}, + dictWord{141, 0, 507}, + dictWord{9, 0, 34}, + dictWord{139, 0, 484}, + dictWord{5, 10, 196}, + dictWord{6, 10, 486}, + dictWord{7, 10, 212}, + dictWord{8, 10, 309}, + dictWord{136, 10, 346}, + dictWord{6, 0, 1700}, + dictWord{7, 0, 26}, + dictWord{7, 0, 293}, + dictWord{7, 0, 382}, + dictWord{7, 0, 1026}, + dictWord{7, 0, 1087}, + dictWord{ + 7, + 0, + 2027, + }, + dictWord{8, 0, 24}, + dictWord{8, 0, 114}, + dictWord{8, 0, 252}, + dictWord{8, 0, 727}, + dictWord{8, 0, 729}, + dictWord{9, 0, 30}, + dictWord{9, 0, 199}, + dictWord{ + 9, + 0, + 231, + }, + dictWord{9, 0, 251}, + dictWord{9, 0, 334}, + dictWord{9, 0, 361}, + dictWord{9, 0, 712}, + dictWord{10, 0, 55}, + dictWord{10, 0, 60}, + dictWord{10, 0, 232}, + dictWord{ + 10, + 0, + 332, + }, + dictWord{10, 0, 384}, + dictWord{10, 0, 396}, + dictWord{10, 0, 504}, + dictWord{10, 0, 542}, + dictWord{10, 0, 652}, + dictWord{11, 0, 20}, + dictWord{11, 0, 48}, + dictWord{11, 0, 207}, + dictWord{11, 0, 291}, + dictWord{11, 0, 298}, + dictWord{11, 0, 342}, + dictWord{11, 0, 365}, + dictWord{11, 0, 394}, + dictWord{11, 0, 620}, + dictWord{11, 0, 705}, + dictWord{11, 0, 1017}, + dictWord{12, 0, 123}, + dictWord{12, 0, 340}, + dictWord{12, 0, 406}, + dictWord{12, 0, 643}, + dictWord{13, 0, 61}, + dictWord{ + 13, + 0, + 269, + }, + dictWord{13, 0, 311}, + dictWord{13, 0, 319}, + dictWord{13, 0, 486}, + dictWord{14, 0, 234}, + dictWord{15, 0, 62}, + dictWord{15, 0, 85}, + dictWord{16, 0, 71}, + dictWord{18, 0, 119}, + dictWord{20, 0, 105}, + dictWord{135, 10, 1912}, + dictWord{4, 11, 71}, + dictWord{5, 11, 376}, + dictWord{7, 11, 119}, + dictWord{138, 11, 665}, + dictWord{10, 0, 918}, + dictWord{10, 0, 926}, + dictWord{4, 10, 686}, + dictWord{136, 11, 55}, + dictWord{138, 10, 625}, + dictWord{136, 10, 706}, + dictWord{ + 132, + 11, + 479, + }, + dictWord{4, 10, 30}, + dictWord{133, 10, 43}, + dictWord{6, 0, 379}, + dictWord{7, 0, 270}, + dictWord{8, 0, 176}, + dictWord{8, 0, 183}, + dictWord{9, 0, 432}, + dictWord{ + 9, + 0, + 661, + }, + dictWord{12, 0, 247}, + dictWord{12, 0, 617}, + dictWord{18, 0, 125}, + dictWord{7, 11, 607}, + dictWord{8, 11, 99}, + dictWord{152, 11, 4}, + dictWord{ + 5, + 0, + 792, + }, + dictWord{133, 0, 900}, + dictWord{4, 11, 612}, + dictWord{133, 11, 561}, + dictWord{4, 11, 41}, + dictWord{4, 10, 220}, + dictWord{5, 11, 74}, + dictWord{ + 7, + 10, + 1535, + }, + dictWord{7, 11, 1627}, + dictWord{11, 11, 871}, + dictWord{140, 11, 619}, + dictWord{135, 0, 1920}, + dictWord{7, 11, 94}, + dictWord{11, 11, 329}, + dictWord{11, 11, 965}, + dictWord{12, 11, 241}, + dictWord{14, 11, 354}, + dictWord{15, 11, 22}, + dictWord{148, 11, 63}, + dictWord{9, 11, 209}, + dictWord{137, 11, 300}, + dictWord{134, 0, 771}, + dictWord{135, 0, 1979}, + dictWord{4, 0, 901}, + dictWord{133, 0, 776}, + dictWord{142, 0, 254}, + dictWord{133, 11, 98}, + dictWord{ + 9, + 11, + 16, + }, + dictWord{141, 11, 386}, + dictWord{133, 11, 984}, + dictWord{4, 11, 182}, + dictWord{6, 11, 205}, + dictWord{135, 11, 220}, + dictWord{7, 10, 1725}, + dictWord{ + 7, + 10, + 1774, + }, + dictWord{138, 10, 393}, + dictWord{5, 10, 263}, + dictWord{134, 10, 414}, + dictWord{4, 11, 42}, + dictWord{9, 11, 205}, + dictWord{9, 11, 786}, + dictWord{138, 11, 659}, + dictWord{14, 0, 140}, + dictWord{148, 0, 41}, + dictWord{8, 0, 440}, + dictWord{10, 0, 359}, + dictWord{6, 10, 178}, + dictWord{6, 11, 289}, + dictWord{ + 6, + 10, + 1750, + }, + dictWord{7, 11, 1670}, + dictWord{9, 10, 690}, + dictWord{10, 10, 155}, + dictWord{10, 10, 373}, + dictWord{11, 10, 698}, + dictWord{12, 11, 57}, + dictWord{13, 10, 155}, + dictWord{20, 10, 93}, + dictWord{151, 11, 4}, + dictWord{4, 0, 37}, + dictWord{5, 0, 334}, + dictWord{7, 0, 1253}, + dictWord{151, 11, 25}, + dictWord{ + 4, + 0, + 508, + }, + dictWord{4, 11, 635}, + dictWord{5, 10, 97}, + dictWord{137, 10, 393}, + dictWord{139, 11, 533}, + dictWord{4, 0, 640}, + dictWord{133, 0, 513}, + dictWord{ + 134, + 10, + 1639, + }, + dictWord{132, 11, 371}, + dictWord{4, 11, 272}, + dictWord{7, 11, 836}, + dictWord{7, 11, 1651}, + dictWord{145, 11, 89}, + dictWord{5, 11, 825}, + dictWord{6, 11, 444}, + dictWord{6, 11, 1640}, + dictWord{136, 11, 308}, + dictWord{4, 10, 191}, + dictWord{7, 10, 934}, + dictWord{8, 10, 647}, + dictWord{145, 10, 97}, + dictWord{12, 0, 246}, + dictWord{15, 0, 162}, + dictWord{19, 0, 64}, + dictWord{20, 0, 8}, + dictWord{20, 0, 95}, + dictWord{22, 0, 24}, + dictWord{152, 0, 17}, + dictWord{4, 0, 533}, + dictWord{5, 10, 165}, + dictWord{9, 10, 346}, + dictWord{138, 10, 655}, + dictWord{5, 11, 737}, + dictWord{139, 10, 885}, + dictWord{133, 10, 877}, + dictWord{ + 8, + 10, + 128, + }, + dictWord{139, 10, 179}, + dictWord{137, 11, 307}, + dictWord{140, 0, 752}, + dictWord{133, 0, 920}, + dictWord{135, 0, 1048}, + dictWord{5, 0, 153}, + dictWord{ + 6, + 0, + 580, + }, + dictWord{6, 10, 1663}, + dictWord{7, 10, 132}, + dictWord{7, 10, 1154}, + dictWord{7, 10, 1415}, + dictWord{7, 10, 1507}, + dictWord{12, 10, 493}, + dictWord{15, 10, 105}, + dictWord{151, 10, 15}, + dictWord{5, 10, 459}, + dictWord{7, 10, 1073}, + dictWord{8, 10, 241}, + dictWord{136, 10, 334}, + dictWord{138, 0, 391}, + dictWord{135, 0, 1952}, + dictWord{133, 11, 525}, + dictWord{8, 11, 641}, + dictWord{11, 11, 388}, + dictWord{140, 11, 580}, + dictWord{142, 0, 126}, + dictWord{ + 134, + 0, + 640, + }, + dictWord{132, 0, 483}, + dictWord{7, 0, 1616}, + dictWord{9, 0, 69}, + dictWord{6, 10, 324}, + dictWord{6, 10, 520}, + dictWord{7, 10, 338}, + dictWord{ + 7, + 10, + 1729, + }, + dictWord{8, 10, 228}, + dictWord{139, 10, 750}, + dictWord{5, 11, 493}, + dictWord{134, 11, 528}, + dictWord{135, 0, 734}, + dictWord{4, 11, 174}, + dictWord{135, 11, 911}, + dictWord{138, 0, 480}, + dictWord{9, 0, 495}, + dictWord{146, 0, 104}, + dictWord{135, 10, 705}, + dictWord{9, 0, 472}, + dictWord{4, 10, 73}, + dictWord{6, 10, 612}, + dictWord{7, 10, 927}, + dictWord{7, 10, 1330}, + dictWord{7, 10, 1822}, + dictWord{8, 10, 217}, + dictWord{9, 10, 765}, + dictWord{9, 10, 766}, + dictWord{10, 10, 408}, + dictWord{11, 10, 51}, + dictWord{11, 10, 793}, + dictWord{12, 10, 266}, + dictWord{15, 10, 158}, + dictWord{20, 10, 89}, + dictWord{150, 10, 32}, + dictWord{7, 11, 548}, + dictWord{137, 11, 58}, + dictWord{4, 11, 32}, + dictWord{5, 11, 215}, + dictWord{6, 11, 269}, + dictWord{7, 11, 1782}, + dictWord{7, 11, 1892}, + dictWord{10, 11, 16}, + dictWord{11, 11, 822}, + dictWord{11, 11, 954}, + dictWord{141, 11, 481}, + dictWord{132, 0, 874}, + dictWord{9, 0, 229}, + dictWord{5, 10, 389}, + dictWord{136, 10, 636}, + dictWord{7, 11, 1749}, + dictWord{136, 11, 477}, + dictWord{134, 0, 948}, + dictWord{5, 11, 308}, + dictWord{135, 11, 1088}, + dictWord{ + 4, + 0, + 748, + }, + dictWord{139, 0, 1009}, + dictWord{136, 10, 21}, + dictWord{6, 0, 555}, + dictWord{135, 0, 485}, + dictWord{5, 11, 126}, + dictWord{8, 11, 297}, + dictWord{ + 9, + 11, + 366, + }, + dictWord{9, 11, 445}, + dictWord{12, 11, 53}, + dictWord{12, 11, 374}, + dictWord{141, 11, 492}, + dictWord{7, 11, 1551}, + dictWord{139, 11, 361}, + dictWord{136, 0, 193}, + dictWord{136, 0, 472}, + dictWord{8, 0, 653}, + dictWord{13, 0, 93}, + dictWord{147, 0, 14}, + dictWord{132, 0, 984}, + dictWord{132, 11, 175}, + dictWord{5, 0, 172}, + dictWord{6, 0, 1971}, + dictWord{132, 11, 685}, + dictWord{149, 11, 8}, + dictWord{133, 11, 797}, + dictWord{13, 0, 83}, + dictWord{5, 10, 189}, + dictWord{ + 7, + 10, + 442, + }, + dictWord{7, 10, 443}, + dictWord{8, 10, 281}, + dictWord{12, 10, 174}, + dictWord{141, 10, 261}, + dictWord{134, 0, 1568}, + dictWord{133, 11, 565}, + dictWord{139, 0, 384}, + dictWord{133, 0, 260}, + dictWord{7, 0, 758}, + dictWord{7, 0, 880}, + dictWord{7, 0, 1359}, + dictWord{9, 0, 164}, + dictWord{9, 0, 167}, + dictWord{ + 10, + 0, + 156, + }, + dictWord{10, 0, 588}, + dictWord{12, 0, 101}, + dictWord{14, 0, 48}, + dictWord{15, 0, 70}, + dictWord{6, 10, 2}, + dictWord{7, 10, 1262}, + dictWord{ + 7, + 10, + 1737, + }, + dictWord{8, 10, 22}, + dictWord{8, 10, 270}, + dictWord{8, 10, 612}, + dictWord{9, 10, 312}, + dictWord{9, 10, 436}, + dictWord{10, 10, 311}, + dictWord{ + 10, + 10, + 623, + }, + dictWord{11, 10, 72}, + dictWord{11, 10, 330}, + dictWord{11, 10, 455}, + dictWord{12, 10, 321}, + dictWord{12, 10, 504}, + dictWord{12, 10, 530}, + dictWord{ + 12, + 10, + 543, + }, + dictWord{13, 10, 17}, + dictWord{13, 10, 156}, + dictWord{13, 10, 334}, + dictWord{17, 10, 60}, + dictWord{148, 10, 64}, + dictWord{4, 11, 252}, + dictWord{ + 7, + 11, + 1068, + }, + dictWord{10, 11, 434}, + dictWord{11, 11, 228}, + dictWord{11, 11, 426}, + dictWord{13, 11, 231}, + dictWord{18, 11, 106}, + dictWord{148, 11, 87}, + dictWord{7, 10, 354}, + dictWord{10, 10, 410}, + dictWord{139, 10, 815}, + dictWord{6, 0, 367}, + dictWord{7, 10, 670}, + dictWord{7, 10, 1327}, + dictWord{8, 10, 411}, + dictWord{8, 10, 435}, + dictWord{9, 10, 653}, + dictWord{9, 10, 740}, + dictWord{10, 10, 385}, + dictWord{11, 10, 222}, + dictWord{11, 10, 324}, + dictWord{11, 10, 829}, + dictWord{140, 10, 611}, + dictWord{7, 0, 1174}, + dictWord{6, 10, 166}, + dictWord{135, 10, 374}, + dictWord{146, 0, 121}, + dictWord{132, 0, 828}, + dictWord{ + 5, + 11, + 231, + }, + dictWord{138, 11, 509}, + dictWord{7, 11, 601}, + dictWord{9, 11, 277}, + dictWord{9, 11, 674}, + dictWord{10, 11, 178}, + dictWord{10, 11, 257}, + dictWord{ + 10, + 11, + 418, + }, + dictWord{11, 11, 531}, + dictWord{11, 11, 544}, + dictWord{11, 11, 585}, + dictWord{12, 11, 113}, + dictWord{12, 11, 475}, + dictWord{13, 11, 99}, + dictWord{142, 11, 428}, + dictWord{134, 0, 1541}, + dictWord{135, 11, 1779}, + dictWord{5, 0, 343}, + dictWord{134, 10, 398}, + dictWord{135, 10, 50}, + dictWord{ + 135, + 11, + 1683, + }, + dictWord{4, 0, 440}, + dictWord{7, 0, 57}, + dictWord{8, 0, 167}, + dictWord{8, 0, 375}, + dictWord{9, 0, 82}, + dictWord{9, 0, 561}, + dictWord{9, 0, 744}, + dictWord{ + 10, + 0, + 620, + }, + dictWord{137, 11, 744}, + dictWord{134, 0, 926}, + dictWord{6, 10, 517}, + dictWord{7, 10, 1159}, + dictWord{10, 10, 621}, + dictWord{139, 10, 192}, + dictWord{137, 0, 827}, + dictWord{8, 0, 194}, + dictWord{136, 0, 756}, + dictWord{10, 10, 223}, + dictWord{139, 10, 645}, + dictWord{7, 10, 64}, + dictWord{ + 136, + 10, + 245, + }, + dictWord{4, 11, 399}, + dictWord{5, 11, 119}, + dictWord{5, 11, 494}, + dictWord{7, 11, 751}, + dictWord{137, 11, 556}, + dictWord{132, 0, 808}, + dictWord{ + 135, + 0, + 22, + }, + dictWord{7, 10, 1763}, + dictWord{140, 10, 310}, + dictWord{5, 0, 639}, + dictWord{7, 0, 1249}, + dictWord{11, 0, 896}, + dictWord{134, 11, 584}, + dictWord{ + 134, + 0, + 1614, + }, + dictWord{135, 0, 860}, + dictWord{135, 11, 1121}, + dictWord{5, 10, 129}, + dictWord{6, 10, 61}, + dictWord{135, 10, 947}, + dictWord{4, 0, 102}, + dictWord{ + 7, + 0, + 815, + }, + dictWord{7, 0, 1699}, + dictWord{139, 0, 964}, + dictWord{13, 10, 505}, + dictWord{141, 10, 506}, + dictWord{139, 10, 1000}, + dictWord{ + 132, + 11, + 679, + }, + dictWord{132, 0, 899}, + dictWord{132, 0, 569}, + dictWord{5, 11, 694}, + dictWord{137, 11, 714}, + dictWord{136, 0, 795}, + dictWord{6, 0, 2045}, + dictWord{ + 139, + 11, + 7, + }, + dictWord{6, 0, 52}, + dictWord{9, 0, 104}, + dictWord{9, 0, 559}, + dictWord{12, 0, 308}, + dictWord{147, 0, 87}, + dictWord{4, 0, 301}, + dictWord{132, 0, 604}, + dictWord{133, 10, 637}, + dictWord{136, 0, 779}, + dictWord{5, 11, 143}, + dictWord{5, 11, 769}, + dictWord{6, 11, 1760}, + dictWord{7, 11, 682}, + dictWord{7, 11, 1992}, + dictWord{136, 11, 736}, + dictWord{137, 10, 590}, + dictWord{147, 0, 32}, + dictWord{137, 11, 527}, + dictWord{5, 10, 280}, + dictWord{135, 10, 1226}, + dictWord{134, 0, 494}, + dictWord{6, 0, 677}, + dictWord{6, 0, 682}, + dictWord{134, 0, 1044}, + dictWord{133, 10, 281}, + dictWord{135, 10, 1064}, + dictWord{7, 0, 508}, + dictWord{133, 11, 860}, + dictWord{6, 11, 422}, + dictWord{7, 11, 0}, + dictWord{7, 11, 1544}, + dictWord{9, 11, 577}, + dictWord{11, 11, 990}, + dictWord{12, 11, 141}, + dictWord{12, 11, 453}, + dictWord{13, 11, 47}, + dictWord{141, 11, 266}, + dictWord{134, 0, 1014}, + dictWord{5, 11, 515}, + dictWord{137, 11, 131}, + dictWord{ + 134, + 0, + 957, + }, + dictWord{132, 11, 646}, + dictWord{6, 0, 310}, + dictWord{7, 0, 1849}, + dictWord{8, 0, 72}, + dictWord{8, 0, 272}, + dictWord{8, 0, 431}, + dictWord{9, 0, 12}, + dictWord{ + 9, + 0, + 376, + }, + dictWord{10, 0, 563}, + dictWord{10, 0, 630}, + dictWord{10, 0, 796}, + dictWord{10, 0, 810}, + dictWord{11, 0, 367}, + dictWord{11, 0, 599}, + dictWord{ + 11, + 0, + 686, + }, + dictWord{140, 0, 672}, + dictWord{7, 0, 570}, + dictWord{4, 11, 396}, + dictWord{7, 10, 120}, + dictWord{7, 11, 728}, + dictWord{8, 10, 489}, + dictWord{9, 11, 117}, + dictWord{9, 10, 319}, + dictWord{10, 10, 820}, + dictWord{11, 10, 1004}, + dictWord{12, 10, 379}, + dictWord{12, 10, 679}, + dictWord{13, 10, 117}, + dictWord{ + 13, + 11, + 202, + }, + dictWord{13, 10, 412}, + dictWord{14, 10, 25}, + dictWord{15, 10, 52}, + dictWord{15, 10, 161}, + dictWord{16, 10, 47}, + dictWord{20, 11, 51}, + dictWord{ + 149, + 10, + 2, + }, + dictWord{6, 11, 121}, + dictWord{6, 11, 124}, + dictWord{6, 11, 357}, + dictWord{7, 11, 1138}, + dictWord{7, 11, 1295}, + dictWord{8, 11, 162}, + dictWord{ + 139, + 11, + 655, + }, + dictWord{8, 0, 449}, + dictWord{4, 10, 937}, + dictWord{5, 10, 801}, + dictWord{136, 11, 449}, + dictWord{139, 11, 958}, + dictWord{6, 0, 181}, + dictWord{ + 7, + 0, + 537, + }, + dictWord{8, 0, 64}, + dictWord{9, 0, 127}, + dictWord{10, 0, 496}, + dictWord{12, 0, 510}, + dictWord{141, 0, 384}, + dictWord{138, 11, 253}, + dictWord{4, 0, 244}, + dictWord{135, 0, 233}, + dictWord{133, 11, 237}, + dictWord{132, 10, 365}, + dictWord{6, 0, 1650}, + dictWord{10, 0, 702}, + dictWord{139, 0, 245}, + dictWord{ + 5, + 10, + 7, + }, + dictWord{139, 10, 774}, + dictWord{13, 0, 463}, + dictWord{20, 0, 49}, + dictWord{13, 11, 463}, + dictWord{148, 11, 49}, + dictWord{4, 10, 734}, + dictWord{ + 5, + 10, + 662, + }, + dictWord{134, 10, 430}, + dictWord{4, 10, 746}, + dictWord{135, 10, 1090}, + dictWord{5, 10, 360}, + dictWord{136, 10, 237}, + dictWord{137, 0, 338}, + dictWord{143, 11, 10}, + dictWord{7, 11, 571}, + dictWord{138, 11, 366}, + dictWord{134, 0, 1279}, + dictWord{9, 11, 513}, + dictWord{10, 11, 22}, + dictWord{10, 11, 39}, + dictWord{12, 11, 122}, + dictWord{140, 11, 187}, + dictWord{133, 0, 896}, + dictWord{146, 0, 178}, + dictWord{134, 0, 695}, + dictWord{137, 0, 808}, + dictWord{ + 134, + 11, + 587, + }, + dictWord{7, 11, 107}, + dictWord{7, 11, 838}, + dictWord{8, 11, 550}, + dictWord{138, 11, 401}, + dictWord{7, 0, 1117}, + dictWord{136, 0, 539}, + dictWord{ + 4, + 10, + 277, + }, + dictWord{5, 10, 608}, + dictWord{6, 10, 493}, + dictWord{7, 10, 457}, + dictWord{140, 10, 384}, + dictWord{133, 11, 768}, + dictWord{12, 0, 257}, + dictWord{ + 7, + 10, + 27, + }, + dictWord{135, 10, 316}, + dictWord{140, 0, 1003}, + dictWord{4, 0, 207}, + dictWord{5, 0, 586}, + dictWord{5, 0, 676}, + dictWord{6, 0, 448}, + dictWord{ + 8, + 0, + 244, + }, + dictWord{11, 0, 1}, + dictWord{13, 0, 3}, + dictWord{16, 0, 54}, + dictWord{17, 0, 4}, + dictWord{18, 0, 13}, + dictWord{133, 10, 552}, + dictWord{4, 10, 401}, + dictWord{ + 137, + 10, + 264, + }, + dictWord{5, 0, 516}, + dictWord{7, 0, 1883}, + dictWord{135, 11, 1883}, + dictWord{12, 0, 960}, + dictWord{132, 11, 894}, + dictWord{5, 0, 4}, + dictWord{ + 5, + 0, + 810, + }, + dictWord{6, 0, 13}, + dictWord{6, 0, 538}, + dictWord{6, 0, 1690}, + dictWord{6, 0, 1726}, + dictWord{7, 0, 499}, + dictWord{7, 0, 1819}, + dictWord{8, 0, 148}, + dictWord{ + 8, + 0, + 696, + }, + dictWord{8, 0, 791}, + dictWord{12, 0, 125}, + dictWord{143, 0, 9}, + dictWord{135, 0, 1268}, + dictWord{11, 0, 30}, + dictWord{14, 0, 315}, + dictWord{ + 9, + 10, + 543, + }, + dictWord{10, 10, 524}, + dictWord{12, 10, 524}, + dictWord{16, 10, 18}, + dictWord{20, 10, 26}, + dictWord{148, 10, 65}, + dictWord{6, 0, 748}, + dictWord{ + 4, + 10, + 205, + }, + dictWord{5, 10, 623}, + dictWord{7, 10, 104}, + dictWord{136, 10, 519}, + dictWord{11, 0, 542}, + dictWord{139, 0, 852}, + dictWord{140, 0, 6}, + dictWord{ + 132, + 0, + 848, + }, + dictWord{7, 0, 1385}, + dictWord{11, 0, 582}, + dictWord{11, 0, 650}, + dictWord{11, 0, 901}, + dictWord{11, 0, 949}, + dictWord{12, 0, 232}, + dictWord{12, 0, 236}, + dictWord{13, 0, 413}, + dictWord{13, 0, 501}, + dictWord{18, 0, 116}, + dictWord{7, 10, 579}, + dictWord{9, 10, 41}, + dictWord{9, 10, 244}, + dictWord{9, 10, 669}, + dictWord{10, 10, 5}, + dictWord{11, 10, 861}, + dictWord{11, 10, 951}, + dictWord{139, 10, 980}, + dictWord{4, 0, 945}, + dictWord{6, 0, 1811}, + dictWord{6, 0, 1845}, + dictWord{ + 6, + 0, + 1853, + }, + dictWord{6, 0, 1858}, + dictWord{8, 0, 862}, + dictWord{12, 0, 782}, + dictWord{12, 0, 788}, + dictWord{18, 0, 160}, + dictWord{148, 0, 117}, + dictWord{ + 132, + 10, + 717, + }, + dictWord{4, 0, 925}, + dictWord{5, 0, 803}, + dictWord{8, 0, 698}, + dictWord{138, 0, 828}, + dictWord{134, 0, 1416}, + dictWord{132, 0, 610}, + dictWord{ + 139, + 0, + 992, + }, + dictWord{6, 0, 878}, + dictWord{134, 0, 1477}, + dictWord{135, 0, 1847}, + dictWord{138, 11, 531}, + dictWord{137, 11, 539}, + dictWord{134, 11, 272}, + dictWord{133, 0, 383}, + dictWord{134, 0, 1404}, + dictWord{132, 10, 489}, + dictWord{4, 11, 9}, + dictWord{5, 11, 128}, + dictWord{7, 11, 368}, + dictWord{ + 11, + 11, + 480, + }, + dictWord{148, 11, 3}, + dictWord{136, 0, 986}, + dictWord{9, 0, 660}, + dictWord{138, 0, 347}, + dictWord{135, 10, 892}, + dictWord{136, 11, 682}, + dictWord{ + 7, + 0, + 572, + }, + dictWord{9, 0, 592}, + dictWord{11, 0, 680}, + dictWord{12, 0, 356}, + dictWord{140, 0, 550}, + dictWord{7, 0, 1411}, + dictWord{138, 11, 527}, + dictWord{ + 4, + 11, + 2, + }, + dictWord{7, 11, 545}, + dictWord{135, 11, 894}, + dictWord{137, 10, 473}, + dictWord{11, 0, 64}, + dictWord{7, 11, 481}, + dictWord{7, 10, 819}, + dictWord{9, 10, 26}, + dictWord{9, 10, 392}, + dictWord{9, 11, 792}, + dictWord{10, 10, 152}, + dictWord{10, 10, 226}, + dictWord{12, 10, 276}, + dictWord{12, 10, 426}, + dictWord{ + 12, + 10, + 589, + }, + dictWord{13, 10, 460}, + dictWord{15, 10, 97}, + dictWord{19, 10, 48}, + dictWord{148, 10, 104}, + dictWord{135, 10, 51}, + dictWord{136, 11, 445}, + dictWord{136, 11, 646}, + dictWord{135, 0, 606}, + dictWord{132, 10, 674}, + dictWord{6, 0, 1829}, + dictWord{134, 0, 1830}, + dictWord{132, 10, 770}, + dictWord{ + 5, + 10, + 79, + }, + dictWord{7, 10, 1027}, + dictWord{7, 10, 1477}, + dictWord{139, 10, 52}, + dictWord{5, 11, 530}, + dictWord{142, 11, 113}, + dictWord{134, 10, 1666}, + dictWord{ + 7, + 0, + 748, + }, + dictWord{139, 0, 700}, + dictWord{134, 10, 195}, + dictWord{133, 10, 789}, + dictWord{9, 0, 87}, + dictWord{10, 0, 365}, + dictWord{4, 10, 251}, + dictWord{ + 4, + 10, + 688, + }, + dictWord{7, 10, 513}, + dictWord{135, 10, 1284}, + dictWord{136, 11, 111}, + dictWord{133, 0, 127}, + dictWord{6, 0, 198}, + dictWord{140, 0, 83}, + dictWord{133, 11, 556}, + dictWord{133, 10, 889}, + dictWord{4, 10, 160}, + dictWord{5, 10, 330}, + dictWord{7, 10, 1434}, + dictWord{136, 10, 174}, + dictWord{5, 0, 276}, + dictWord{6, 0, 55}, + dictWord{7, 0, 1369}, + dictWord{138, 0, 864}, + dictWord{8, 11, 16}, + dictWord{140, 11, 568}, + dictWord{6, 0, 1752}, + dictWord{136, 0, 726}, + dictWord{135, 0, 1066}, + dictWord{133, 0, 764}, + dictWord{6, 11, 186}, + dictWord{137, 11, 426}, + dictWord{11, 0, 683}, + dictWord{139, 11, 683}, + dictWord{ + 6, + 0, + 309, + }, + dictWord{7, 0, 331}, + dictWord{138, 0, 550}, + dictWord{133, 10, 374}, + dictWord{6, 0, 1212}, + dictWord{6, 0, 1852}, + dictWord{7, 0, 1062}, + dictWord{ + 8, + 0, + 874, + }, + dictWord{8, 0, 882}, + dictWord{138, 0, 936}, + dictWord{132, 11, 585}, + dictWord{134, 0, 1364}, + dictWord{7, 0, 986}, + dictWord{133, 10, 731}, + dictWord{ + 6, + 0, + 723, + }, + dictWord{6, 0, 1408}, + dictWord{138, 0, 381}, + dictWord{135, 0, 1573}, + dictWord{134, 0, 1025}, + dictWord{4, 10, 626}, + dictWord{5, 10, 642}, + dictWord{ + 6, + 10, + 425, + }, + dictWord{10, 10, 202}, + dictWord{139, 10, 141}, + dictWord{4, 11, 93}, + dictWord{5, 11, 252}, + dictWord{6, 11, 229}, + dictWord{7, 11, 291}, + dictWord{ + 9, + 11, + 550, + }, + dictWord{139, 11, 644}, + dictWord{137, 11, 749}, + dictWord{137, 11, 162}, + dictWord{132, 11, 381}, + dictWord{135, 0, 1559}, + dictWord{ + 6, + 0, + 194, + }, + dictWord{7, 0, 133}, + dictWord{10, 0, 493}, + dictWord{10, 0, 570}, + dictWord{139, 0, 664}, + dictWord{5, 0, 24}, + dictWord{5, 0, 569}, + dictWord{6, 0, 3}, + dictWord{ + 6, + 0, + 119, + }, + dictWord{6, 0, 143}, + dictWord{6, 0, 440}, + dictWord{7, 0, 295}, + dictWord{7, 0, 599}, + dictWord{7, 0, 1686}, + dictWord{7, 0, 1854}, + dictWord{8, 0, 424}, + dictWord{ + 9, + 0, + 43, + }, + dictWord{9, 0, 584}, + dictWord{9, 0, 760}, + dictWord{10, 0, 148}, + dictWord{10, 0, 328}, + dictWord{11, 0, 159}, + dictWord{11, 0, 253}, + dictWord{11, 0, 506}, + dictWord{12, 0, 487}, + dictWord{140, 0, 531}, + dictWord{6, 0, 661}, + dictWord{134, 0, 1517}, + dictWord{136, 10, 835}, + dictWord{151, 10, 17}, + dictWord{5, 0, 14}, + dictWord{5, 0, 892}, + dictWord{6, 0, 283}, + dictWord{7, 0, 234}, + dictWord{136, 0, 537}, + dictWord{139, 0, 541}, + dictWord{4, 0, 126}, + dictWord{8, 0, 635}, + dictWord{ + 147, + 0, + 34, + }, + dictWord{4, 0, 316}, + dictWord{4, 0, 495}, + dictWord{135, 0, 1561}, + dictWord{4, 11, 187}, + dictWord{5, 11, 184}, + dictWord{5, 11, 690}, + dictWord{ + 7, + 11, + 1869, + }, + dictWord{138, 11, 756}, + dictWord{139, 11, 783}, + dictWord{4, 0, 998}, + dictWord{137, 0, 861}, + dictWord{136, 0, 1009}, + dictWord{139, 11, 292}, + dictWord{5, 11, 21}, + dictWord{6, 11, 77}, + dictWord{6, 11, 157}, + dictWord{7, 11, 974}, + dictWord{7, 11, 1301}, + dictWord{7, 11, 1339}, + dictWord{7, 11, 1490}, + dictWord{ + 7, + 11, + 1873, + }, + dictWord{137, 11, 628}, + dictWord{7, 11, 1283}, + dictWord{9, 11, 227}, + dictWord{9, 11, 499}, + dictWord{10, 11, 341}, + dictWord{11, 11, 325}, + dictWord{11, 11, 408}, + dictWord{14, 11, 180}, + dictWord{15, 11, 144}, + dictWord{18, 11, 47}, + dictWord{147, 11, 49}, + dictWord{4, 0, 64}, + dictWord{5, 0, 352}, + dictWord{5, 0, 720}, + dictWord{6, 0, 368}, + dictWord{139, 0, 359}, + dictWord{5, 10, 384}, + dictWord{8, 10, 455}, + dictWord{140, 10, 48}, + dictWord{5, 10, 264}, + dictWord{ + 134, + 10, + 184, + }, + dictWord{7, 0, 1577}, + dictWord{10, 0, 304}, + dictWord{10, 0, 549}, + dictWord{12, 0, 365}, + dictWord{13, 0, 220}, + dictWord{13, 0, 240}, + dictWord{ + 142, + 0, + 33, + }, + dictWord{134, 0, 1107}, + dictWord{134, 0, 929}, + dictWord{135, 0, 1142}, + dictWord{6, 0, 175}, + dictWord{137, 0, 289}, + dictWord{5, 0, 432}, + dictWord{ + 133, + 0, + 913, + }, + dictWord{6, 0, 279}, + dictWord{7, 0, 219}, + dictWord{5, 10, 633}, + dictWord{135, 10, 1323}, + dictWord{7, 0, 785}, + dictWord{7, 10, 359}, + dictWord{ + 8, + 10, + 243, + }, + dictWord{140, 10, 175}, + dictWord{139, 0, 595}, + dictWord{132, 10, 105}, + dictWord{8, 11, 398}, + dictWord{9, 11, 681}, + dictWord{139, 11, 632}, + dictWord{140, 0, 80}, + dictWord{5, 0, 931}, + dictWord{134, 0, 1698}, + dictWord{142, 11, 241}, + dictWord{134, 11, 20}, + dictWord{134, 0, 1323}, + dictWord{11, 0, 526}, + dictWord{11, 0, 939}, + dictWord{141, 0, 290}, + dictWord{5, 0, 774}, + dictWord{6, 0, 780}, + dictWord{6, 0, 1637}, + dictWord{6, 0, 1686}, + dictWord{6, 0, 1751}, + dictWord{ + 8, + 0, + 559, + }, + dictWord{141, 0, 109}, + dictWord{141, 0, 127}, + dictWord{7, 0, 1167}, + dictWord{11, 0, 934}, + dictWord{13, 0, 391}, + dictWord{17, 0, 76}, + dictWord{ + 135, + 11, + 709, + }, + dictWord{135, 0, 963}, + dictWord{6, 0, 260}, + dictWord{135, 0, 1484}, + dictWord{134, 0, 573}, + dictWord{4, 10, 758}, + dictWord{139, 11, 941}, + dictWord{135, 10, 1649}, + dictWord{145, 11, 36}, + dictWord{4, 0, 292}, + dictWord{137, 0, 580}, + dictWord{4, 0, 736}, + dictWord{5, 0, 871}, + dictWord{6, 0, 1689}, + dictWord{135, 0, 1944}, + dictWord{7, 11, 945}, + dictWord{11, 11, 713}, + dictWord{139, 11, 744}, + dictWord{134, 0, 1164}, + dictWord{135, 11, 937}, + dictWord{ + 6, + 0, + 1922, + }, + dictWord{9, 0, 982}, + dictWord{15, 0, 173}, + dictWord{15, 0, 178}, + dictWord{15, 0, 200}, + dictWord{18, 0, 189}, + dictWord{18, 0, 207}, + dictWord{21, 0, 47}, + dictWord{135, 11, 1652}, + dictWord{7, 0, 1695}, + dictWord{139, 10, 128}, + dictWord{6, 0, 63}, + dictWord{135, 0, 920}, + dictWord{133, 0, 793}, + dictWord{ + 143, + 11, + 134, + }, + dictWord{133, 10, 918}, + dictWord{5, 0, 67}, + dictWord{6, 0, 62}, + dictWord{6, 0, 374}, + dictWord{135, 0, 1391}, + dictWord{9, 0, 790}, + dictWord{12, 0, 47}, + dictWord{4, 11, 579}, + dictWord{5, 11, 226}, + dictWord{5, 11, 323}, + dictWord{135, 11, 960}, + dictWord{10, 11, 784}, + dictWord{141, 11, 191}, + dictWord{4, 0, 391}, + dictWord{135, 0, 1169}, + dictWord{137, 0, 443}, + dictWord{13, 11, 232}, + dictWord{146, 11, 35}, + dictWord{132, 10, 340}, + dictWord{132, 0, 271}, + dictWord{ + 137, + 11, + 313, + }, + dictWord{5, 11, 973}, + dictWord{137, 11, 659}, + dictWord{134, 0, 1140}, + dictWord{6, 11, 135}, + dictWord{135, 11, 1176}, + dictWord{4, 0, 253}, + dictWord{5, 0, 544}, + dictWord{7, 0, 300}, + dictWord{137, 0, 340}, + dictWord{7, 0, 897}, + dictWord{5, 10, 985}, + dictWord{7, 10, 509}, + dictWord{145, 10, 96}, + dictWord{ + 138, + 11, + 735, + }, + dictWord{135, 10, 1919}, + dictWord{138, 0, 890}, + dictWord{5, 0, 818}, + dictWord{134, 0, 1122}, + dictWord{5, 0, 53}, + dictWord{5, 0, 541}, + dictWord{ + 6, + 0, + 94, + }, + dictWord{6, 0, 499}, + dictWord{7, 0, 230}, + dictWord{139, 0, 321}, + dictWord{4, 0, 920}, + dictWord{5, 0, 25}, + dictWord{5, 0, 790}, + dictWord{6, 0, 457}, + dictWord{ + 7, + 0, + 853, + }, + dictWord{8, 0, 788}, + dictWord{142, 11, 31}, + dictWord{132, 10, 247}, + dictWord{135, 11, 314}, + dictWord{132, 0, 468}, + dictWord{7, 0, 243}, + dictWord{ + 6, + 10, + 337, + }, + dictWord{7, 10, 494}, + dictWord{8, 10, 27}, + dictWord{8, 10, 599}, + dictWord{138, 10, 153}, + dictWord{4, 10, 184}, + dictWord{5, 10, 390}, + dictWord{ + 7, + 10, + 618, + }, + dictWord{7, 10, 1456}, + dictWord{139, 10, 710}, + dictWord{134, 0, 870}, + dictWord{134, 0, 1238}, + dictWord{134, 0, 1765}, + dictWord{10, 0, 853}, + dictWord{10, 0, 943}, + dictWord{14, 0, 437}, + dictWord{14, 0, 439}, + dictWord{14, 0, 443}, + dictWord{14, 0, 446}, + dictWord{14, 0, 452}, + dictWord{14, 0, 469}, + dictWord{ + 14, + 0, + 471, + }, + dictWord{14, 0, 473}, + dictWord{16, 0, 93}, + dictWord{16, 0, 102}, + dictWord{16, 0, 110}, + dictWord{148, 0, 121}, + dictWord{4, 0, 605}, + dictWord{ + 7, + 0, + 518, + }, + dictWord{7, 0, 1282}, + dictWord{7, 0, 1918}, + dictWord{10, 0, 180}, + dictWord{139, 0, 218}, + dictWord{133, 0, 822}, + dictWord{4, 0, 634}, + dictWord{ + 11, + 0, + 916, + }, + dictWord{142, 0, 419}, + dictWord{6, 11, 281}, + dictWord{7, 11, 6}, + dictWord{8, 11, 282}, + dictWord{8, 11, 480}, + dictWord{8, 11, 499}, + dictWord{9, 11, 198}, + dictWord{10, 11, 143}, + dictWord{10, 11, 169}, + dictWord{10, 11, 211}, + dictWord{10, 11, 417}, + dictWord{10, 11, 574}, + dictWord{11, 11, 147}, + dictWord{ + 11, + 11, + 395, + }, + dictWord{12, 11, 75}, + dictWord{12, 11, 407}, + dictWord{12, 11, 608}, + dictWord{13, 11, 500}, + dictWord{142, 11, 251}, + dictWord{134, 0, 898}, + dictWord{ + 6, + 0, + 36, + }, + dictWord{7, 0, 658}, + dictWord{8, 0, 454}, + dictWord{150, 11, 48}, + dictWord{133, 11, 674}, + dictWord{135, 11, 1776}, + dictWord{4, 11, 419}, + dictWord{ + 10, + 10, + 227, + }, + dictWord{11, 10, 497}, + dictWord{11, 10, 709}, + dictWord{140, 10, 415}, + dictWord{6, 10, 360}, + dictWord{7, 10, 1664}, + dictWord{136, 10, 478}, + dictWord{137, 0, 806}, + dictWord{12, 11, 508}, + dictWord{14, 11, 102}, + dictWord{14, 11, 226}, + dictWord{144, 11, 57}, + dictWord{135, 11, 1123}, + dictWord{ + 4, + 11, + 138, + }, + dictWord{7, 11, 1012}, + dictWord{7, 11, 1280}, + dictWord{137, 11, 76}, + dictWord{5, 11, 29}, + dictWord{140, 11, 638}, + dictWord{136, 10, 699}, + dictWord{134, 0, 1326}, + dictWord{132, 0, 104}, + dictWord{135, 11, 735}, + dictWord{132, 10, 739}, + dictWord{134, 0, 1331}, + dictWord{7, 0, 260}, + dictWord{ + 135, + 11, + 260, + }, + dictWord{135, 11, 1063}, + dictWord{7, 0, 45}, + dictWord{9, 0, 542}, + dictWord{9, 0, 566}, + dictWord{10, 0, 728}, + dictWord{137, 10, 869}, + dictWord{ + 4, + 10, + 67, + }, + dictWord{5, 10, 422}, + dictWord{7, 10, 1037}, + dictWord{7, 10, 1289}, + dictWord{7, 10, 1555}, + dictWord{9, 10, 741}, + dictWord{145, 10, 108}, + dictWord{ + 139, + 0, + 263, + }, + dictWord{134, 0, 1516}, + dictWord{14, 0, 146}, + dictWord{15, 0, 42}, + dictWord{16, 0, 23}, + dictWord{17, 0, 86}, + dictWord{146, 0, 17}, + dictWord{ + 138, + 0, + 468, + }, + dictWord{136, 0, 1005}, + dictWord{4, 11, 17}, + dictWord{5, 11, 23}, + dictWord{7, 11, 995}, + dictWord{11, 11, 383}, + dictWord{11, 11, 437}, + dictWord{ + 12, + 11, + 460, + }, + dictWord{140, 11, 532}, + dictWord{7, 0, 87}, + dictWord{142, 0, 288}, + dictWord{138, 10, 96}, + dictWord{135, 11, 626}, + dictWord{144, 10, 26}, + dictWord{ + 7, + 0, + 988, + }, + dictWord{7, 0, 1939}, + dictWord{9, 0, 64}, + dictWord{9, 0, 502}, + dictWord{12, 0, 22}, + dictWord{12, 0, 34}, + dictWord{13, 0, 12}, + dictWord{13, 0, 234}, + dictWord{147, 0, 77}, + dictWord{13, 0, 133}, + dictWord{8, 10, 203}, + dictWord{11, 10, 823}, + dictWord{11, 10, 846}, + dictWord{12, 10, 482}, + dictWord{13, 10, 277}, + dictWord{13, 10, 302}, + dictWord{13, 10, 464}, + dictWord{14, 10, 205}, + dictWord{142, 10, 221}, + dictWord{4, 10, 449}, + dictWord{133, 10, 718}, + dictWord{ + 135, + 0, + 141, + }, + dictWord{6, 0, 1842}, + dictWord{136, 0, 872}, + dictWord{8, 11, 70}, + dictWord{12, 11, 171}, + dictWord{141, 11, 272}, + dictWord{4, 10, 355}, + dictWord{ + 6, + 10, + 311, + }, + dictWord{9, 10, 256}, + dictWord{138, 10, 404}, + dictWord{132, 0, 619}, + dictWord{137, 0, 261}, + dictWord{10, 11, 233}, + dictWord{10, 10, 758}, + dictWord{139, 11, 76}, + dictWord{5, 0, 246}, + dictWord{8, 0, 189}, + dictWord{9, 0, 355}, + dictWord{9, 0, 512}, + dictWord{10, 0, 124}, + dictWord{10, 0, 453}, + dictWord{ + 11, + 0, + 143, + }, + dictWord{11, 0, 416}, + dictWord{11, 0, 859}, + dictWord{141, 0, 341}, + dictWord{134, 11, 442}, + dictWord{133, 10, 827}, + dictWord{5, 10, 64}, + dictWord{ + 140, + 10, + 581, + }, + dictWord{4, 10, 442}, + dictWord{7, 10, 1047}, + dictWord{7, 10, 1352}, + dictWord{135, 10, 1643}, + dictWord{134, 11, 1709}, + dictWord{5, 0, 678}, + dictWord{6, 0, 305}, + dictWord{7, 0, 775}, + dictWord{7, 0, 1065}, + dictWord{133, 10, 977}, + dictWord{11, 11, 69}, + dictWord{12, 11, 105}, + dictWord{12, 11, 117}, + dictWord{13, 11, 213}, + dictWord{14, 11, 13}, + dictWord{14, 11, 62}, + dictWord{14, 11, 177}, + dictWord{14, 11, 421}, + dictWord{15, 11, 19}, + dictWord{146, 11, 141}, + dictWord{137, 11, 309}, + dictWord{5, 0, 35}, + dictWord{7, 0, 862}, + dictWord{7, 0, 1886}, + dictWord{138, 0, 179}, + dictWord{136, 0, 285}, + dictWord{132, 0, 517}, + dictWord{7, 11, 976}, + dictWord{9, 11, 146}, + dictWord{10, 11, 206}, + dictWord{10, 11, 596}, + dictWord{13, 11, 218}, + dictWord{142, 11, 153}, + dictWord{ + 132, + 10, + 254, + }, + dictWord{6, 0, 214}, + dictWord{12, 0, 540}, + dictWord{4, 10, 275}, + dictWord{7, 10, 1219}, + dictWord{140, 10, 376}, + dictWord{8, 0, 667}, + dictWord{ + 11, + 0, + 403, + }, + dictWord{146, 0, 83}, + dictWord{12, 0, 74}, + dictWord{10, 11, 648}, + dictWord{11, 11, 671}, + dictWord{143, 11, 46}, + dictWord{135, 0, 125}, + dictWord{ + 134, + 10, + 1753, + }, + dictWord{133, 0, 761}, + dictWord{6, 0, 912}, + dictWord{4, 11, 518}, + dictWord{6, 10, 369}, + dictWord{6, 10, 502}, + dictWord{7, 10, 1036}, + dictWord{ + 7, + 11, + 1136, + }, + dictWord{8, 10, 348}, + dictWord{9, 10, 452}, + dictWord{10, 10, 26}, + dictWord{11, 10, 224}, + dictWord{11, 10, 387}, + dictWord{11, 10, 772}, + dictWord{12, 10, 95}, + dictWord{12, 10, 629}, + dictWord{13, 10, 195}, + dictWord{13, 10, 207}, + dictWord{13, 10, 241}, + dictWord{14, 10, 260}, + dictWord{14, 10, 270}, + dictWord{143, 10, 140}, + dictWord{10, 0, 131}, + dictWord{140, 0, 72}, + dictWord{132, 10, 269}, + dictWord{5, 10, 480}, + dictWord{7, 10, 532}, + dictWord{ + 7, + 10, + 1197, + }, + dictWord{7, 10, 1358}, + dictWord{8, 10, 291}, + dictWord{11, 10, 349}, + dictWord{142, 10, 396}, + dictWord{8, 11, 689}, + dictWord{137, 11, 863}, + dictWord{ + 8, + 0, + 333, + }, + dictWord{138, 0, 182}, + dictWord{4, 11, 18}, + dictWord{7, 11, 145}, + dictWord{7, 11, 444}, + dictWord{7, 11, 1278}, + dictWord{8, 11, 49}, + dictWord{ + 8, + 11, + 400, + }, + dictWord{9, 11, 71}, + dictWord{9, 11, 250}, + dictWord{10, 11, 459}, + dictWord{12, 11, 160}, + dictWord{144, 11, 24}, + dictWord{14, 11, 35}, + dictWord{ + 142, + 11, + 191, + }, + dictWord{135, 11, 1864}, + dictWord{135, 0, 1338}, + dictWord{148, 10, 15}, + dictWord{14, 0, 94}, + dictWord{15, 0, 65}, + dictWord{16, 0, 4}, + dictWord{ + 16, + 0, + 77, + }, + dictWord{16, 0, 80}, + dictWord{145, 0, 5}, + dictWord{12, 11, 82}, + dictWord{143, 11, 36}, + dictWord{133, 11, 1010}, + dictWord{133, 0, 449}, + dictWord{ + 133, + 0, + 646, + }, + dictWord{7, 0, 86}, + dictWord{8, 0, 103}, + dictWord{135, 10, 657}, + dictWord{7, 0, 2028}, + dictWord{138, 0, 641}, + dictWord{136, 10, 533}, + dictWord{ + 134, + 0, + 1, + }, + dictWord{139, 11, 970}, + dictWord{5, 11, 87}, + dictWord{7, 11, 313}, + dictWord{7, 11, 1103}, + dictWord{10, 11, 112}, + dictWord{10, 11, 582}, + dictWord{ + 11, + 11, + 389, + }, + dictWord{11, 11, 813}, + dictWord{12, 11, 385}, + dictWord{13, 11, 286}, + dictWord{14, 11, 124}, + dictWord{146, 11, 108}, + dictWord{6, 0, 869}, + dictWord{ + 132, + 11, + 267, + }, + dictWord{6, 0, 277}, + dictWord{7, 0, 1274}, + dictWord{7, 0, 1386}, + dictWord{146, 0, 87}, + dictWord{6, 0, 187}, + dictWord{7, 0, 39}, + dictWord{7, 0, 1203}, + dictWord{8, 0, 380}, + dictWord{14, 0, 117}, + dictWord{149, 0, 28}, + dictWord{4, 10, 211}, + dictWord{4, 10, 332}, + dictWord{5, 10, 335}, + dictWord{6, 10, 238}, + dictWord{ + 7, + 10, + 269, + }, + dictWord{7, 10, 811}, + dictWord{7, 10, 1797}, + dictWord{8, 10, 836}, + dictWord{9, 10, 507}, + dictWord{141, 10, 242}, + dictWord{4, 0, 785}, + dictWord{ + 5, + 0, + 368, + }, + dictWord{6, 0, 297}, + dictWord{7, 0, 793}, + dictWord{139, 0, 938}, + dictWord{7, 0, 464}, + dictWord{8, 0, 558}, + dictWord{11, 0, 105}, + dictWord{12, 0, 231}, + dictWord{14, 0, 386}, + dictWord{15, 0, 102}, + dictWord{148, 0, 75}, + dictWord{133, 10, 1009}, + dictWord{8, 0, 877}, + dictWord{140, 0, 731}, + dictWord{ + 139, + 11, + 289, + }, + dictWord{10, 11, 249}, + dictWord{139, 11, 209}, + dictWord{132, 11, 561}, + dictWord{134, 0, 1608}, + dictWord{132, 11, 760}, + dictWord{134, 0, 1429}, + dictWord{9, 11, 154}, + dictWord{140, 11, 485}, + dictWord{5, 10, 228}, + dictWord{6, 10, 203}, + dictWord{7, 10, 156}, + dictWord{8, 10, 347}, + dictWord{ + 137, + 10, + 265, + }, + dictWord{7, 0, 1010}, + dictWord{11, 0, 733}, + dictWord{11, 0, 759}, + dictWord{13, 0, 34}, + dictWord{14, 0, 427}, + dictWord{146, 0, 45}, + dictWord{7, 10, 1131}, + dictWord{135, 10, 1468}, + dictWord{136, 11, 255}, + dictWord{7, 0, 1656}, + dictWord{9, 0, 369}, + dictWord{10, 0, 338}, + dictWord{10, 0, 490}, + dictWord{ + 11, + 0, + 154, + }, + dictWord{11, 0, 545}, + dictWord{11, 0, 775}, + dictWord{13, 0, 77}, + dictWord{141, 0, 274}, + dictWord{133, 11, 621}, + dictWord{134, 0, 1038}, + dictWord{ + 4, + 11, + 368, + }, + dictWord{135, 11, 641}, + dictWord{6, 0, 2010}, + dictWord{8, 0, 979}, + dictWord{8, 0, 985}, + dictWord{10, 0, 951}, + dictWord{138, 0, 1011}, + dictWord{ + 134, + 0, + 1005, + }, + dictWord{19, 0, 121}, + dictWord{5, 10, 291}, + dictWord{5, 10, 318}, + dictWord{7, 10, 765}, + dictWord{9, 10, 389}, + dictWord{140, 10, 548}, + dictWord{ + 5, + 0, + 20, + }, + dictWord{6, 0, 298}, + dictWord{7, 0, 659}, + dictWord{137, 0, 219}, + dictWord{7, 0, 1440}, + dictWord{11, 0, 854}, + dictWord{11, 0, 872}, + dictWord{11, 0, 921}, + dictWord{12, 0, 551}, + dictWord{13, 0, 472}, + dictWord{142, 0, 367}, + dictWord{5, 0, 490}, + dictWord{6, 0, 615}, + dictWord{6, 0, 620}, + dictWord{135, 0, 683}, + dictWord{ + 6, + 0, + 1070, + }, + dictWord{134, 0, 1597}, + dictWord{139, 0, 522}, + dictWord{132, 0, 439}, + dictWord{136, 0, 669}, + dictWord{6, 0, 766}, + dictWord{6, 0, 1143}, + dictWord{ + 6, + 0, + 1245, + }, + dictWord{10, 10, 525}, + dictWord{139, 10, 82}, + dictWord{9, 11, 92}, + dictWord{147, 11, 91}, + dictWord{6, 0, 668}, + dictWord{134, 0, 1218}, + dictWord{ + 6, + 11, + 525, + }, + dictWord{9, 11, 876}, + dictWord{140, 11, 284}, + dictWord{132, 0, 233}, + dictWord{136, 0, 547}, + dictWord{132, 10, 422}, + dictWord{5, 10, 355}, + dictWord{145, 10, 0}, + dictWord{6, 11, 300}, + dictWord{135, 11, 1515}, + dictWord{4, 0, 482}, + dictWord{137, 10, 905}, + dictWord{4, 0, 886}, + dictWord{7, 0, 346}, + dictWord{133, 11, 594}, + dictWord{133, 10, 865}, + dictWord{5, 10, 914}, + dictWord{134, 10, 1625}, + dictWord{135, 0, 334}, + dictWord{5, 0, 795}, + dictWord{ + 6, + 0, + 1741, + }, + dictWord{133, 10, 234}, + dictWord{135, 10, 1383}, + dictWord{6, 11, 1641}, + dictWord{136, 11, 820}, + dictWord{135, 0, 371}, + dictWord{7, 11, 1313}, + dictWord{138, 11, 660}, + dictWord{135, 10, 1312}, + dictWord{135, 0, 622}, + dictWord{7, 0, 625}, + dictWord{135, 0, 1750}, + dictWord{135, 0, 339}, + dictWord{ + 4, + 0, + 203, + }, + dictWord{135, 0, 1936}, + dictWord{15, 0, 29}, + dictWord{16, 0, 38}, + dictWord{15, 11, 29}, + dictWord{144, 11, 38}, + dictWord{5, 0, 338}, + dictWord{ + 135, + 0, + 1256, + }, + dictWord{135, 10, 1493}, + dictWord{10, 0, 130}, + dictWord{6, 10, 421}, + dictWord{7, 10, 61}, + dictWord{7, 10, 1540}, + dictWord{138, 10, 501}, + dictWord{ + 6, + 11, + 389, + }, + dictWord{7, 11, 149}, + dictWord{9, 11, 142}, + dictWord{138, 11, 94}, + dictWord{137, 10, 341}, + dictWord{11, 0, 678}, + dictWord{12, 0, 307}, + dictWord{142, 10, 98}, + dictWord{6, 11, 8}, + dictWord{7, 11, 1881}, + dictWord{136, 11, 91}, + dictWord{135, 0, 2044}, + dictWord{6, 0, 770}, + dictWord{6, 0, 802}, + dictWord{ + 6, + 0, + 812, + }, + dictWord{7, 0, 311}, + dictWord{9, 0, 308}, + dictWord{12, 0, 255}, + dictWord{6, 10, 102}, + dictWord{7, 10, 72}, + dictWord{15, 10, 142}, + dictWord{ + 147, + 10, + 67, + }, + dictWord{151, 10, 30}, + dictWord{135, 10, 823}, + dictWord{135, 0, 1266}, + dictWord{135, 11, 1746}, + dictWord{135, 10, 1870}, + dictWord{4, 0, 400}, + dictWord{5, 0, 267}, + dictWord{135, 0, 232}, + dictWord{7, 11, 24}, + dictWord{11, 11, 542}, + dictWord{139, 11, 852}, + dictWord{135, 11, 1739}, + dictWord{4, 11, 503}, + dictWord{135, 11, 1661}, + dictWord{5, 11, 130}, + dictWord{7, 11, 1314}, + dictWord{9, 11, 610}, + dictWord{10, 11, 718}, + dictWord{11, 11, 601}, + dictWord{ + 11, + 11, + 819, + }, + dictWord{11, 11, 946}, + dictWord{140, 11, 536}, + dictWord{10, 11, 149}, + dictWord{11, 11, 280}, + dictWord{142, 11, 336}, + dictWord{7, 0, 739}, + dictWord{11, 0, 690}, + dictWord{7, 11, 1946}, + dictWord{8, 10, 48}, + dictWord{8, 10, 88}, + dictWord{8, 10, 582}, + dictWord{8, 10, 681}, + dictWord{9, 10, 373}, + dictWord{ + 9, + 10, + 864, + }, + dictWord{11, 10, 157}, + dictWord{11, 10, 843}, + dictWord{148, 10, 27}, + dictWord{134, 0, 990}, + dictWord{4, 10, 88}, + dictWord{5, 10, 137}, + dictWord{ + 5, + 10, + 174, + }, + dictWord{5, 10, 777}, + dictWord{6, 10, 1664}, + dictWord{6, 10, 1725}, + dictWord{7, 10, 77}, + dictWord{7, 10, 426}, + dictWord{7, 10, 1317}, + dictWord{ + 7, + 10, + 1355, + }, + dictWord{8, 10, 126}, + dictWord{8, 10, 563}, + dictWord{9, 10, 523}, + dictWord{9, 10, 750}, + dictWord{10, 10, 310}, + dictWord{10, 10, 836}, + dictWord{ + 11, + 10, + 42, + }, + dictWord{11, 10, 318}, + dictWord{11, 10, 731}, + dictWord{12, 10, 68}, + dictWord{12, 10, 92}, + dictWord{12, 10, 507}, + dictWord{12, 10, 692}, + dictWord{ + 13, + 10, + 81, + }, + dictWord{13, 10, 238}, + dictWord{13, 10, 374}, + dictWord{14, 10, 436}, + dictWord{18, 10, 138}, + dictWord{19, 10, 78}, + dictWord{19, 10, 111}, + dictWord{20, 10, 55}, + dictWord{20, 10, 77}, + dictWord{148, 10, 92}, + dictWord{141, 10, 418}, + dictWord{7, 0, 1831}, + dictWord{132, 10, 938}, + dictWord{6, 0, 776}, + dictWord{134, 0, 915}, + dictWord{138, 10, 351}, + dictWord{5, 11, 348}, + dictWord{6, 11, 522}, + dictWord{6, 10, 1668}, + dictWord{7, 10, 1499}, + dictWord{8, 10, 117}, + dictWord{9, 10, 314}, + dictWord{138, 10, 174}, + dictWord{135, 10, 707}, + dictWord{132, 0, 613}, + dictWord{133, 10, 403}, + dictWord{132, 11, 392}, + dictWord{ + 5, + 11, + 433, + }, + dictWord{9, 11, 633}, + dictWord{139, 11, 629}, + dictWord{133, 0, 763}, + dictWord{132, 0, 878}, + dictWord{132, 0, 977}, + dictWord{132, 0, 100}, + dictWord{6, 0, 463}, + dictWord{4, 10, 44}, + dictWord{5, 10, 311}, + dictWord{7, 10, 639}, + dictWord{7, 10, 762}, + dictWord{7, 10, 1827}, + dictWord{9, 10, 8}, + dictWord{ + 9, + 10, + 462, + }, + dictWord{148, 10, 83}, + dictWord{134, 11, 234}, + dictWord{4, 10, 346}, + dictWord{7, 10, 115}, + dictWord{9, 10, 180}, + dictWord{9, 10, 456}, + dictWord{ + 138, + 10, + 363, + }, + dictWord{5, 0, 362}, + dictWord{5, 0, 443}, + dictWord{6, 0, 318}, + dictWord{7, 0, 1019}, + dictWord{139, 0, 623}, + dictWord{5, 0, 463}, + dictWord{8, 0, 296}, + dictWord{7, 11, 140}, + dictWord{7, 11, 1950}, + dictWord{8, 11, 680}, + dictWord{11, 11, 817}, + dictWord{147, 11, 88}, + dictWord{7, 11, 1222}, + dictWord{ + 138, + 11, + 386, + }, + dictWord{142, 0, 137}, + dictWord{132, 0, 454}, + dictWord{7, 0, 1914}, + dictWord{6, 11, 5}, + dictWord{7, 10, 1051}, + dictWord{9, 10, 545}, + dictWord{ + 11, + 11, + 249, + }, + dictWord{12, 11, 313}, + dictWord{16, 11, 66}, + dictWord{145, 11, 26}, + dictWord{135, 0, 1527}, + dictWord{145, 0, 58}, + dictWord{148, 11, 59}, + dictWord{ + 5, + 0, + 48, + }, + dictWord{5, 0, 404}, + dictWord{6, 0, 557}, + dictWord{7, 0, 458}, + dictWord{8, 0, 597}, + dictWord{10, 0, 455}, + dictWord{10, 0, 606}, + dictWord{11, 0, 49}, + dictWord{ + 11, + 0, + 548, + }, + dictWord{12, 0, 476}, + dictWord{13, 0, 18}, + dictWord{141, 0, 450}, + dictWord{5, 11, 963}, + dictWord{134, 11, 1773}, + dictWord{133, 0, 729}, + dictWord{138, 11, 586}, + dictWord{5, 0, 442}, + dictWord{135, 0, 1984}, + dictWord{134, 0, 449}, + dictWord{144, 0, 40}, + dictWord{4, 0, 853}, + dictWord{7, 11, 180}, + dictWord{8, 11, 509}, + dictWord{136, 11, 792}, + dictWord{6, 10, 185}, + dictWord{7, 10, 1899}, + dictWord{9, 10, 875}, + dictWord{139, 10, 673}, + dictWord{ + 134, + 11, + 524, + }, + dictWord{12, 0, 227}, + dictWord{4, 10, 327}, + dictWord{5, 10, 478}, + dictWord{7, 10, 1332}, + dictWord{136, 10, 753}, + dictWord{6, 0, 1491}, + dictWord{ + 5, + 10, + 1020, + }, + dictWord{133, 10, 1022}, + dictWord{4, 10, 103}, + dictWord{133, 10, 401}, + dictWord{132, 11, 931}, + dictWord{4, 10, 499}, + dictWord{135, 10, 1421}, + dictWord{5, 0, 55}, + dictWord{7, 0, 376}, + dictWord{140, 0, 161}, + dictWord{133, 0, 450}, + dictWord{6, 0, 1174}, + dictWord{134, 0, 1562}, + dictWord{10, 0, 62}, + dictWord{13, 0, 400}, + dictWord{135, 11, 1837}, + dictWord{140, 0, 207}, + dictWord{135, 0, 869}, + dictWord{4, 11, 773}, + dictWord{5, 11, 618}, + dictWord{ + 137, + 11, + 756, + }, + dictWord{132, 10, 96}, + dictWord{4, 0, 213}, + dictWord{7, 0, 223}, + dictWord{8, 0, 80}, + dictWord{135, 10, 968}, + dictWord{4, 11, 90}, + dictWord{5, 11, 337}, + dictWord{5, 11, 545}, + dictWord{7, 11, 754}, + dictWord{9, 11, 186}, + dictWord{10, 11, 72}, + dictWord{10, 11, 782}, + dictWord{11, 11, 513}, + dictWord{11, 11, 577}, + dictWord{11, 11, 610}, + dictWord{11, 11, 889}, + dictWord{11, 11, 961}, + dictWord{12, 11, 354}, + dictWord{12, 11, 362}, + dictWord{12, 11, 461}, + dictWord{ + 12, + 11, + 595, + }, + dictWord{13, 11, 79}, + dictWord{143, 11, 121}, + dictWord{7, 0, 381}, + dictWord{7, 0, 806}, + dictWord{7, 0, 820}, + dictWord{8, 0, 354}, + dictWord{8, 0, 437}, + dictWord{8, 0, 787}, + dictWord{9, 0, 657}, + dictWord{10, 0, 58}, + dictWord{10, 0, 339}, + dictWord{10, 0, 749}, + dictWord{11, 0, 914}, + dictWord{12, 0, 162}, + dictWord{ + 13, + 0, + 75, + }, + dictWord{14, 0, 106}, + dictWord{14, 0, 198}, + dictWord{14, 0, 320}, + dictWord{14, 0, 413}, + dictWord{146, 0, 43}, + dictWord{136, 0, 747}, + dictWord{ + 136, + 0, + 954, + }, + dictWord{134, 0, 1073}, + dictWord{135, 0, 556}, + dictWord{7, 11, 151}, + dictWord{9, 11, 329}, + dictWord{139, 11, 254}, + dictWord{5, 0, 692}, + dictWord{ + 134, + 0, + 1395, + }, + dictWord{6, 10, 563}, + dictWord{137, 10, 224}, + dictWord{134, 0, 191}, + dictWord{132, 0, 804}, + dictWord{9, 11, 187}, + dictWord{10, 11, 36}, + dictWord{17, 11, 44}, + dictWord{146, 11, 64}, + dictWord{7, 11, 165}, + dictWord{7, 11, 919}, + dictWord{136, 11, 517}, + dictWord{4, 11, 506}, + dictWord{5, 11, 295}, + dictWord{7, 11, 1680}, + dictWord{15, 11, 14}, + dictWord{144, 11, 5}, + dictWord{4, 0, 706}, + dictWord{6, 0, 162}, + dictWord{7, 0, 1960}, + dictWord{136, 0, 831}, + dictWord{ + 135, + 11, + 1376, + }, + dictWord{7, 11, 987}, + dictWord{9, 11, 688}, + dictWord{10, 11, 522}, + dictWord{11, 11, 788}, + dictWord{140, 11, 566}, + dictWord{150, 0, 35}, + dictWord{138, 0, 426}, + dictWord{135, 0, 1235}, + dictWord{135, 11, 1741}, + dictWord{7, 11, 389}, + dictWord{7, 11, 700}, + dictWord{7, 11, 940}, + dictWord{ + 8, + 11, + 514, + }, + dictWord{9, 11, 116}, + dictWord{9, 11, 535}, + dictWord{10, 11, 118}, + dictWord{11, 11, 107}, + dictWord{11, 11, 148}, + dictWord{11, 11, 922}, + dictWord{ + 12, + 11, + 254, + }, + dictWord{12, 11, 421}, + dictWord{142, 11, 238}, + dictWord{134, 0, 1234}, + dictWord{132, 11, 743}, + dictWord{4, 10, 910}, + dictWord{5, 10, 832}, + dictWord{135, 11, 1335}, + dictWord{141, 0, 96}, + dictWord{135, 11, 185}, + dictWord{146, 0, 149}, + dictWord{4, 0, 204}, + dictWord{137, 0, 902}, + dictWord{ + 4, + 11, + 784, + }, + dictWord{133, 11, 745}, + dictWord{136, 0, 833}, + dictWord{136, 0, 949}, + dictWord{7, 0, 366}, + dictWord{9, 0, 287}, + dictWord{12, 0, 199}, + dictWord{ + 12, + 0, + 556, + }, + dictWord{12, 0, 577}, + dictWord{5, 11, 81}, + dictWord{7, 11, 146}, + dictWord{7, 11, 1342}, + dictWord{7, 11, 1446}, + dictWord{8, 11, 53}, + dictWord{8, 11, 561}, + dictWord{8, 11, 694}, + dictWord{8, 11, 754}, + dictWord{9, 11, 97}, + dictWord{9, 11, 115}, + dictWord{9, 11, 894}, + dictWord{10, 11, 462}, + dictWord{10, 11, 813}, + dictWord{11, 11, 230}, + dictWord{11, 11, 657}, + dictWord{11, 11, 699}, + dictWord{11, 11, 748}, + dictWord{12, 11, 119}, + dictWord{12, 11, 200}, + dictWord{ + 12, + 11, + 283, + }, + dictWord{14, 11, 273}, + dictWord{145, 11, 15}, + dictWord{5, 11, 408}, + dictWord{137, 11, 747}, + dictWord{9, 11, 498}, + dictWord{140, 11, 181}, + dictWord{ + 6, + 0, + 2020, + }, + dictWord{136, 0, 992}, + dictWord{5, 0, 356}, + dictWord{135, 0, 224}, + dictWord{134, 0, 784}, + dictWord{7, 0, 630}, + dictWord{9, 0, 567}, + dictWord{ + 11, + 0, + 150, + }, + dictWord{11, 0, 444}, + dictWord{13, 0, 119}, + dictWord{8, 10, 528}, + dictWord{137, 10, 348}, + dictWord{134, 0, 539}, + dictWord{4, 10, 20}, + dictWord{ + 133, + 10, + 616, + }, + dictWord{142, 0, 27}, + dictWord{7, 11, 30}, + dictWord{8, 11, 86}, + dictWord{8, 11, 315}, + dictWord{8, 11, 700}, + dictWord{9, 11, 576}, + dictWord{9, 11, 858}, + dictWord{11, 11, 310}, + dictWord{11, 11, 888}, + dictWord{11, 11, 904}, + dictWord{12, 11, 361}, + dictWord{141, 11, 248}, + dictWord{138, 11, 839}, + dictWord{ + 134, + 0, + 755, + }, + dictWord{134, 0, 1063}, + dictWord{7, 10, 1091}, + dictWord{135, 10, 1765}, + dictWord{134, 11, 428}, + dictWord{7, 11, 524}, + dictWord{8, 11, 169}, + dictWord{8, 11, 234}, + dictWord{9, 11, 480}, + dictWord{138, 11, 646}, + dictWord{139, 0, 814}, + dictWord{7, 11, 1462}, + dictWord{139, 11, 659}, + dictWord{ + 4, + 10, + 26, + }, + dictWord{5, 10, 429}, + dictWord{6, 10, 245}, + dictWord{7, 10, 704}, + dictWord{7, 10, 1379}, + dictWord{135, 10, 1474}, + dictWord{7, 11, 1205}, + dictWord{ + 138, + 11, + 637, + }, + dictWord{139, 11, 803}, + dictWord{132, 10, 621}, + dictWord{136, 0, 987}, + dictWord{4, 11, 266}, + dictWord{8, 11, 4}, + dictWord{9, 11, 39}, + dictWord{ + 10, + 11, + 166, + }, + dictWord{11, 11, 918}, + dictWord{12, 11, 635}, + dictWord{20, 11, 10}, + dictWord{22, 11, 27}, + dictWord{150, 11, 43}, + dictWord{4, 0, 235}, + dictWord{ + 135, + 0, + 255, + }, + dictWord{4, 0, 194}, + dictWord{5, 0, 584}, + dictWord{6, 0, 384}, + dictWord{7, 0, 583}, + dictWord{10, 0, 761}, + dictWord{11, 0, 760}, + dictWord{139, 0, 851}, + dictWord{133, 10, 542}, + dictWord{134, 0, 1086}, + dictWord{133, 10, 868}, + dictWord{8, 0, 1016}, + dictWord{136, 0, 1018}, + dictWord{7, 0, 1396}, + dictWord{ + 7, + 11, + 1396, + }, + dictWord{136, 10, 433}, + dictWord{135, 10, 1495}, + dictWord{138, 10, 215}, + dictWord{141, 10, 124}, + dictWord{7, 11, 157}, + dictWord{ + 8, + 11, + 279, + }, + dictWord{9, 11, 759}, + dictWord{16, 11, 31}, + dictWord{16, 11, 39}, + dictWord{16, 11, 75}, + dictWord{18, 11, 24}, + dictWord{20, 11, 42}, + dictWord{152, 11, 1}, + dictWord{5, 0, 562}, + dictWord{134, 11, 604}, + dictWord{134, 0, 913}, + dictWord{5, 0, 191}, + dictWord{137, 0, 271}, + dictWord{4, 0, 470}, + dictWord{6, 0, 153}, + dictWord{7, 0, 1503}, + dictWord{7, 0, 1923}, + dictWord{10, 0, 701}, + dictWord{11, 0, 132}, + dictWord{11, 0, 227}, + dictWord{11, 0, 320}, + dictWord{11, 0, 436}, + dictWord{ + 11, + 0, + 525, + }, + dictWord{11, 0, 855}, + dictWord{11, 0, 873}, + dictWord{12, 0, 41}, + dictWord{12, 0, 286}, + dictWord{13, 0, 103}, + dictWord{13, 0, 284}, + dictWord{ + 14, + 0, + 255, + }, + dictWord{14, 0, 262}, + dictWord{15, 0, 117}, + dictWord{143, 0, 127}, + dictWord{7, 0, 475}, + dictWord{12, 0, 45}, + dictWord{147, 10, 112}, + dictWord{ + 132, + 11, + 567, + }, + dictWord{137, 11, 859}, + dictWord{6, 0, 713}, + dictWord{6, 0, 969}, + dictWord{6, 0, 1290}, + dictWord{134, 0, 1551}, + dictWord{133, 0, 327}, + dictWord{ + 6, + 0, + 552, + }, + dictWord{6, 0, 1292}, + dictWord{7, 0, 1754}, + dictWord{137, 0, 604}, + dictWord{4, 0, 223}, + dictWord{6, 0, 359}, + dictWord{11, 0, 3}, + dictWord{13, 0, 108}, + dictWord{14, 0, 89}, + dictWord{16, 0, 22}, + dictWord{5, 11, 762}, + dictWord{7, 11, 1880}, + dictWord{9, 11, 680}, + dictWord{139, 11, 798}, + dictWord{5, 0, 80}, + dictWord{ + 6, + 0, + 405, + }, + dictWord{7, 0, 403}, + dictWord{7, 0, 1502}, + dictWord{8, 0, 456}, + dictWord{9, 0, 487}, + dictWord{9, 0, 853}, + dictWord{9, 0, 889}, + dictWord{10, 0, 309}, + dictWord{ + 11, + 0, + 721, + }, + dictWord{11, 0, 994}, + dictWord{12, 0, 430}, + dictWord{141, 0, 165}, + dictWord{133, 11, 298}, + dictWord{132, 10, 647}, + dictWord{134, 0, 2016}, + dictWord{18, 10, 10}, + dictWord{146, 11, 10}, + dictWord{4, 0, 453}, + dictWord{5, 0, 887}, + dictWord{6, 0, 535}, + dictWord{8, 0, 6}, + dictWord{8, 0, 543}, + dictWord{ + 136, + 0, + 826, + }, + dictWord{136, 0, 975}, + dictWord{10, 0, 961}, + dictWord{138, 0, 962}, + dictWord{138, 10, 220}, + dictWord{6, 0, 1891}, + dictWord{6, 0, 1893}, + dictWord{ + 9, + 0, + 916, + }, + dictWord{9, 0, 965}, + dictWord{9, 0, 972}, + dictWord{12, 0, 801}, + dictWord{12, 0, 859}, + dictWord{12, 0, 883}, + dictWord{15, 0, 226}, + dictWord{149, 0, 51}, + dictWord{132, 10, 109}, + dictWord{135, 11, 267}, + dictWord{7, 11, 92}, + dictWord{7, 11, 182}, + dictWord{8, 11, 453}, + dictWord{9, 11, 204}, + dictWord{11, 11, 950}, + dictWord{12, 11, 94}, + dictWord{12, 11, 644}, + dictWord{16, 11, 20}, + dictWord{16, 11, 70}, + dictWord{16, 11, 90}, + dictWord{147, 11, 55}, + dictWord{ + 134, + 10, + 1746, + }, + dictWord{6, 11, 71}, + dictWord{7, 11, 845}, + dictWord{7, 11, 1308}, + dictWord{8, 11, 160}, + dictWord{137, 11, 318}, + dictWord{5, 0, 101}, + dictWord{6, 0, 88}, + dictWord{7, 0, 263}, + dictWord{7, 0, 628}, + dictWord{7, 0, 1677}, + dictWord{8, 0, 349}, + dictWord{9, 0, 100}, + dictWord{10, 0, 677}, + dictWord{14, 0, 169}, + dictWord{ + 14, + 0, + 302, + }, + dictWord{14, 0, 313}, + dictWord{15, 0, 48}, + dictWord{15, 0, 84}, + dictWord{7, 11, 237}, + dictWord{8, 11, 664}, + dictWord{9, 11, 42}, + dictWord{9, 11, 266}, + dictWord{9, 11, 380}, + dictWord{9, 11, 645}, + dictWord{10, 11, 177}, + dictWord{138, 11, 276}, + dictWord{138, 11, 69}, + dictWord{4, 0, 310}, + dictWord{7, 0, 708}, + dictWord{7, 0, 996}, + dictWord{9, 0, 795}, + dictWord{10, 0, 390}, + dictWord{10, 0, 733}, + dictWord{11, 0, 451}, + dictWord{12, 0, 249}, + dictWord{14, 0, 115}, + dictWord{ + 14, + 0, + 286, + }, + dictWord{143, 0, 100}, + dictWord{5, 0, 587}, + dictWord{4, 10, 40}, + dictWord{10, 10, 67}, + dictWord{11, 10, 117}, + dictWord{11, 10, 768}, + dictWord{ + 139, + 10, + 935, + }, + dictWord{6, 0, 1942}, + dictWord{7, 0, 512}, + dictWord{136, 0, 983}, + dictWord{7, 10, 992}, + dictWord{8, 10, 301}, + dictWord{9, 10, 722}, + dictWord{12, 10, 63}, + dictWord{13, 10, 29}, + dictWord{14, 10, 161}, + dictWord{143, 10, 18}, + dictWord{136, 11, 76}, + dictWord{139, 10, 923}, + dictWord{134, 0, 645}, + dictWord{ + 134, + 0, + 851, + }, + dictWord{4, 0, 498}, + dictWord{132, 11, 293}, + dictWord{7, 0, 217}, + dictWord{8, 0, 140}, + dictWord{10, 0, 610}, + dictWord{14, 11, 352}, + dictWord{ + 17, + 11, + 53, + }, + dictWord{18, 11, 146}, + dictWord{18, 11, 152}, + dictWord{19, 11, 11}, + dictWord{150, 11, 54}, + dictWord{134, 0, 1448}, + dictWord{138, 11, 841}, + dictWord{133, 0, 905}, + dictWord{4, 11, 605}, + dictWord{7, 11, 518}, + dictWord{7, 11, 1282}, + dictWord{7, 11, 1918}, + dictWord{10, 11, 180}, + dictWord{139, 11, 218}, + dictWord{139, 11, 917}, + dictWord{135, 10, 825}, + dictWord{140, 10, 328}, + dictWord{4, 0, 456}, + dictWord{7, 0, 105}, + dictWord{7, 0, 358}, + dictWord{7, 0, 1637}, + dictWord{8, 0, 643}, + dictWord{139, 0, 483}, + dictWord{134, 0, 792}, + dictWord{6, 11, 96}, + dictWord{135, 11, 1426}, + dictWord{137, 11, 691}, + dictWord{ + 4, + 11, + 651, + }, + dictWord{133, 11, 289}, + dictWord{7, 11, 688}, + dictWord{8, 11, 35}, + dictWord{9, 11, 511}, + dictWord{10, 11, 767}, + dictWord{147, 11, 118}, + dictWord{ + 150, + 0, + 56, + }, + dictWord{5, 0, 243}, + dictWord{5, 0, 535}, + dictWord{6, 10, 204}, + dictWord{10, 10, 320}, + dictWord{10, 10, 583}, + dictWord{13, 10, 502}, + dictWord{ + 14, + 10, + 72, + }, + dictWord{14, 10, 274}, + dictWord{14, 10, 312}, + dictWord{14, 10, 344}, + dictWord{15, 10, 159}, + dictWord{16, 10, 62}, + dictWord{16, 10, 69}, + dictWord{ + 17, + 10, + 30, + }, + dictWord{18, 10, 42}, + dictWord{18, 10, 53}, + dictWord{18, 10, 84}, + dictWord{18, 10, 140}, + dictWord{19, 10, 68}, + dictWord{19, 10, 85}, + dictWord{20, 10, 5}, + dictWord{20, 10, 45}, + dictWord{20, 10, 101}, + dictWord{22, 10, 7}, + dictWord{150, 10, 20}, + dictWord{4, 10, 558}, + dictWord{6, 10, 390}, + dictWord{7, 10, 162}, + dictWord{7, 10, 689}, + dictWord{9, 10, 360}, + dictWord{138, 10, 653}, + dictWord{146, 11, 23}, + dictWord{135, 0, 1748}, + dictWord{5, 10, 856}, + dictWord{ + 6, + 10, + 1672, + }, + dictWord{6, 10, 1757}, + dictWord{134, 10, 1781}, + dictWord{5, 0, 539}, + dictWord{5, 0, 754}, + dictWord{6, 0, 876}, + dictWord{132, 11, 704}, + dictWord{ + 135, + 11, + 1078, + }, + dictWord{5, 10, 92}, + dictWord{10, 10, 736}, + dictWord{140, 10, 102}, + dictWord{17, 0, 91}, + dictWord{5, 10, 590}, + dictWord{137, 10, 213}, + dictWord{134, 0, 1565}, + dictWord{6, 0, 91}, + dictWord{135, 0, 435}, + dictWord{4, 0, 939}, + dictWord{140, 0, 792}, + dictWord{134, 0, 1399}, + dictWord{4, 0, 16}, + dictWord{ + 5, + 0, + 316, + }, + dictWord{5, 0, 842}, + dictWord{6, 0, 370}, + dictWord{6, 0, 1778}, + dictWord{8, 0, 166}, + dictWord{11, 0, 812}, + dictWord{12, 0, 206}, + dictWord{12, 0, 351}, + dictWord{14, 0, 418}, + dictWord{16, 0, 15}, + dictWord{16, 0, 34}, + dictWord{18, 0, 3}, + dictWord{19, 0, 3}, + dictWord{19, 0, 7}, + dictWord{20, 0, 4}, + dictWord{21, 0, 21}, + dictWord{ + 4, + 11, + 720, + }, + dictWord{133, 11, 306}, + dictWord{144, 0, 95}, + dictWord{133, 11, 431}, + dictWord{132, 11, 234}, + dictWord{135, 0, 551}, + dictWord{4, 0, 999}, + dictWord{6, 0, 1966}, + dictWord{134, 0, 2042}, + dictWord{7, 0, 619}, + dictWord{10, 0, 547}, + dictWord{11, 0, 122}, + dictWord{12, 0, 601}, + dictWord{15, 0, 7}, + dictWord{148, 0, 20}, + dictWord{5, 11, 464}, + dictWord{6, 11, 236}, + dictWord{7, 11, 276}, + dictWord{7, 11, 696}, + dictWord{7, 11, 914}, + dictWord{7, 11, 1108}, + dictWord{ + 7, + 11, + 1448, + }, + dictWord{9, 11, 15}, + dictWord{9, 11, 564}, + dictWord{10, 11, 14}, + dictWord{12, 11, 565}, + dictWord{13, 11, 449}, + dictWord{14, 11, 53}, + dictWord{ + 15, + 11, + 13, + }, + dictWord{16, 11, 64}, + dictWord{145, 11, 41}, + dictWord{6, 0, 884}, + dictWord{6, 0, 1019}, + dictWord{134, 0, 1150}, + dictWord{6, 11, 1767}, + dictWord{ + 12, + 11, + 194, + }, + dictWord{145, 11, 107}, + dictWord{136, 10, 503}, + dictWord{133, 11, 840}, + dictWord{7, 0, 671}, + dictWord{134, 10, 466}, + dictWord{132, 0, 888}, + dictWord{4, 0, 149}, + dictWord{138, 0, 368}, + dictWord{4, 0, 154}, + dictWord{7, 0, 1134}, + dictWord{136, 0, 105}, + dictWord{135, 0, 983}, + dictWord{9, 11, 642}, + dictWord{11, 11, 236}, + dictWord{142, 11, 193}, + dictWord{4, 0, 31}, + dictWord{6, 0, 429}, + dictWord{7, 0, 962}, + dictWord{9, 0, 458}, + dictWord{139, 0, 691}, + dictWord{ + 6, + 0, + 643, + }, + dictWord{134, 0, 1102}, + dictWord{132, 0, 312}, + dictWord{4, 11, 68}, + dictWord{5, 11, 634}, + dictWord{6, 11, 386}, + dictWord{7, 11, 794}, + dictWord{ + 8, + 11, + 273, + }, + dictWord{9, 11, 563}, + dictWord{10, 11, 105}, + dictWord{10, 11, 171}, + dictWord{11, 11, 94}, + dictWord{139, 11, 354}, + dictWord{133, 0, 740}, + dictWord{ + 135, + 0, + 1642, + }, + dictWord{4, 11, 95}, + dictWord{7, 11, 416}, + dictWord{8, 11, 211}, + dictWord{139, 11, 830}, + dictWord{132, 0, 236}, + dictWord{138, 10, 241}, + dictWord{7, 11, 731}, + dictWord{13, 11, 20}, + dictWord{143, 11, 11}, + dictWord{5, 0, 836}, + dictWord{5, 0, 857}, + dictWord{6, 0, 1680}, + dictWord{135, 0, 59}, + dictWord{ + 10, + 0, + 68, + }, + dictWord{11, 0, 494}, + dictWord{152, 11, 6}, + dictWord{4, 0, 81}, + dictWord{139, 0, 867}, + dictWord{135, 0, 795}, + dictWord{133, 11, 689}, + dictWord{ + 4, + 0, + 1001, + }, + dictWord{5, 0, 282}, + dictWord{6, 0, 1932}, + dictWord{6, 0, 1977}, + dictWord{6, 0, 1987}, + dictWord{6, 0, 1992}, + dictWord{8, 0, 650}, + dictWord{8, 0, 919}, + dictWord{8, 0, 920}, + dictWord{8, 0, 923}, + dictWord{8, 0, 926}, + dictWord{8, 0, 927}, + dictWord{8, 0, 931}, + dictWord{8, 0, 939}, + dictWord{8, 0, 947}, + dictWord{8, 0, 956}, + dictWord{8, 0, 997}, + dictWord{9, 0, 907}, + dictWord{10, 0, 950}, + dictWord{10, 0, 953}, + dictWord{10, 0, 954}, + dictWord{10, 0, 956}, + dictWord{10, 0, 958}, + dictWord{ + 10, + 0, + 959, + }, + dictWord{10, 0, 964}, + dictWord{10, 0, 970}, + dictWord{10, 0, 972}, + dictWord{10, 0, 973}, + dictWord{10, 0, 975}, + dictWord{10, 0, 976}, + dictWord{ + 10, + 0, + 980, + }, + dictWord{10, 0, 981}, + dictWord{10, 0, 984}, + dictWord{10, 0, 988}, + dictWord{10, 0, 990}, + dictWord{10, 0, 995}, + dictWord{10, 0, 999}, + dictWord{ + 10, + 0, + 1002, + }, + dictWord{10, 0, 1003}, + dictWord{10, 0, 1005}, + dictWord{10, 0, 1006}, + dictWord{10, 0, 1008}, + dictWord{10, 0, 1009}, + dictWord{10, 0, 1012}, + dictWord{10, 0, 1014}, + dictWord{10, 0, 1015}, + dictWord{10, 0, 1019}, + dictWord{10, 0, 1020}, + dictWord{10, 0, 1022}, + dictWord{12, 0, 959}, + dictWord{12, 0, 961}, + dictWord{12, 0, 962}, + dictWord{12, 0, 963}, + dictWord{12, 0, 964}, + dictWord{12, 0, 965}, + dictWord{12, 0, 967}, + dictWord{12, 0, 968}, + dictWord{12, 0, 969}, + dictWord{12, 0, 970}, + dictWord{12, 0, 971}, + dictWord{12, 0, 972}, + dictWord{12, 0, 973}, + dictWord{12, 0, 974}, + dictWord{12, 0, 975}, + dictWord{12, 0, 976}, + dictWord{ + 12, + 0, + 977, + }, + dictWord{12, 0, 979}, + dictWord{12, 0, 981}, + dictWord{12, 0, 982}, + dictWord{12, 0, 983}, + dictWord{12, 0, 984}, + dictWord{12, 0, 985}, + dictWord{ + 12, + 0, + 986, + }, + dictWord{12, 0, 987}, + dictWord{12, 0, 989}, + dictWord{12, 0, 990}, + dictWord{12, 0, 992}, + dictWord{12, 0, 993}, + dictWord{12, 0, 995}, + dictWord{12, 0, 998}, + dictWord{12, 0, 999}, + dictWord{12, 0, 1000}, + dictWord{12, 0, 1001}, + dictWord{12, 0, 1002}, + dictWord{12, 0, 1004}, + dictWord{12, 0, 1005}, + dictWord{ + 12, + 0, + 1006, + }, + dictWord{12, 0, 1007}, + dictWord{12, 0, 1008}, + dictWord{12, 0, 1009}, + dictWord{12, 0, 1010}, + dictWord{12, 0, 1011}, + dictWord{12, 0, 1012}, + dictWord{12, 0, 1014}, + dictWord{12, 0, 1015}, + dictWord{12, 0, 1016}, + dictWord{12, 0, 1017}, + dictWord{12, 0, 1018}, + dictWord{12, 0, 1019}, + dictWord{ + 12, + 0, + 1022, + }, + dictWord{12, 0, 1023}, + dictWord{14, 0, 475}, + dictWord{14, 0, 477}, + dictWord{14, 0, 478}, + dictWord{14, 0, 479}, + dictWord{14, 0, 480}, + dictWord{ + 14, + 0, + 482, + }, + dictWord{14, 0, 483}, + dictWord{14, 0, 484}, + dictWord{14, 0, 485}, + dictWord{14, 0, 486}, + dictWord{14, 0, 487}, + dictWord{14, 0, 488}, + dictWord{14, 0, 489}, + dictWord{14, 0, 490}, + dictWord{14, 0, 491}, + dictWord{14, 0, 492}, + dictWord{14, 0, 493}, + dictWord{14, 0, 494}, + dictWord{14, 0, 495}, + dictWord{14, 0, 496}, + dictWord{14, 0, 497}, + dictWord{14, 0, 498}, + dictWord{14, 0, 499}, + dictWord{14, 0, 500}, + dictWord{14, 0, 501}, + dictWord{14, 0, 502}, + dictWord{14, 0, 503}, + dictWord{ + 14, + 0, + 504, + }, + dictWord{14, 0, 506}, + dictWord{14, 0, 507}, + dictWord{14, 0, 508}, + dictWord{14, 0, 509}, + dictWord{14, 0, 510}, + dictWord{14, 0, 511}, + dictWord{ + 16, + 0, + 113, + }, + dictWord{16, 0, 114}, + dictWord{16, 0, 115}, + dictWord{16, 0, 117}, + dictWord{16, 0, 118}, + dictWord{16, 0, 119}, + dictWord{16, 0, 121}, + dictWord{16, 0, 122}, + dictWord{16, 0, 123}, + dictWord{16, 0, 124}, + dictWord{16, 0, 125}, + dictWord{16, 0, 126}, + dictWord{16, 0, 127}, + dictWord{18, 0, 242}, + dictWord{18, 0, 243}, + dictWord{18, 0, 244}, + dictWord{18, 0, 245}, + dictWord{18, 0, 248}, + dictWord{18, 0, 249}, + dictWord{18, 0, 250}, + dictWord{18, 0, 251}, + dictWord{18, 0, 252}, + dictWord{ + 18, + 0, + 253, + }, + dictWord{18, 0, 254}, + dictWord{18, 0, 255}, + dictWord{20, 0, 125}, + dictWord{20, 0, 126}, + dictWord{148, 0, 127}, + dictWord{7, 11, 1717}, + dictWord{ + 7, + 11, + 1769, + }, + dictWord{138, 11, 546}, + dictWord{7, 11, 1127}, + dictWord{7, 11, 1572}, + dictWord{10, 11, 297}, + dictWord{10, 11, 422}, + dictWord{11, 11, 764}, + dictWord{11, 11, 810}, + dictWord{12, 11, 264}, + dictWord{13, 11, 102}, + dictWord{13, 11, 300}, + dictWord{13, 11, 484}, + dictWord{14, 11, 147}, + dictWord{ + 14, + 11, + 229, + }, + dictWord{17, 11, 71}, + dictWord{18, 11, 118}, + dictWord{147, 11, 120}, + dictWord{6, 0, 1148}, + dictWord{134, 0, 1586}, + dictWord{132, 0, 775}, + dictWord{135, 10, 954}, + dictWord{133, 11, 864}, + dictWord{133, 11, 928}, + dictWord{138, 11, 189}, + dictWord{135, 10, 1958}, + dictWord{6, 10, 549}, + dictWord{ + 8, + 10, + 34, + }, + dictWord{8, 10, 283}, + dictWord{9, 10, 165}, + dictWord{138, 10, 475}, + dictWord{5, 10, 652}, + dictWord{5, 10, 701}, + dictWord{135, 10, 449}, + dictWord{135, 11, 695}, + dictWord{4, 10, 655}, + dictWord{7, 10, 850}, + dictWord{17, 10, 75}, + dictWord{146, 10, 137}, + dictWord{140, 11, 682}, + dictWord{ + 133, + 11, + 523, + }, + dictWord{8, 0, 970}, + dictWord{136, 10, 670}, + dictWord{136, 11, 555}, + dictWord{7, 11, 76}, + dictWord{8, 11, 44}, + dictWord{9, 11, 884}, + dictWord{ + 10, + 11, + 580, + }, + dictWord{11, 11, 399}, + dictWord{11, 11, 894}, + dictWord{15, 11, 122}, + dictWord{18, 11, 144}, + dictWord{147, 11, 61}, + dictWord{6, 10, 159}, + dictWord{ + 6, + 10, + 364, + }, + dictWord{7, 10, 516}, + dictWord{7, 10, 1439}, + dictWord{137, 10, 518}, + dictWord{4, 0, 71}, + dictWord{5, 0, 376}, + dictWord{7, 0, 119}, + dictWord{ + 138, + 0, + 665, + }, + dictWord{141, 10, 151}, + dictWord{11, 0, 827}, + dictWord{14, 0, 34}, + dictWord{143, 0, 148}, + dictWord{133, 11, 518}, + dictWord{4, 0, 479}, + dictWord{ + 135, + 11, + 1787, + }, + dictWord{135, 11, 1852}, + dictWord{135, 10, 993}, + dictWord{7, 0, 607}, + dictWord{136, 0, 99}, + dictWord{134, 0, 1960}, + dictWord{132, 0, 793}, + dictWord{4, 0, 41}, + dictWord{5, 0, 74}, + dictWord{7, 0, 1627}, + dictWord{11, 0, 871}, + dictWord{140, 0, 619}, + dictWord{7, 0, 94}, + dictWord{11, 0, 329}, + dictWord{ + 11, + 0, + 965, + }, + dictWord{12, 0, 241}, + dictWord{14, 0, 354}, + dictWord{15, 0, 22}, + dictWord{148, 0, 63}, + dictWord{7, 10, 501}, + dictWord{9, 10, 111}, + dictWord{10, 10, 141}, + dictWord{11, 10, 332}, + dictWord{13, 10, 43}, + dictWord{13, 10, 429}, + dictWord{14, 10, 130}, + dictWord{14, 10, 415}, + dictWord{145, 10, 102}, + dictWord{ + 9, + 0, + 209, + }, + dictWord{137, 0, 300}, + dictWord{134, 0, 1497}, + dictWord{138, 11, 255}, + dictWord{4, 11, 934}, + dictWord{5, 11, 138}, + dictWord{136, 11, 610}, + dictWord{133, 0, 98}, + dictWord{6, 0, 1316}, + dictWord{10, 11, 804}, + dictWord{138, 11, 832}, + dictWord{8, 11, 96}, + dictWord{9, 11, 36}, + dictWord{10, 11, 607}, + dictWord{11, 11, 423}, + dictWord{11, 11, 442}, + dictWord{12, 11, 309}, + dictWord{14, 11, 199}, + dictWord{15, 11, 90}, + dictWord{145, 11, 110}, + dictWord{ + 132, + 0, + 463, + }, + dictWord{5, 10, 149}, + dictWord{136, 10, 233}, + dictWord{133, 10, 935}, + dictWord{4, 11, 652}, + dictWord{8, 11, 320}, + dictWord{9, 11, 13}, + dictWord{ + 9, + 11, + 398, + }, + dictWord{9, 11, 727}, + dictWord{10, 11, 75}, + dictWord{10, 11, 184}, + dictWord{10, 11, 230}, + dictWord{10, 11, 564}, + dictWord{10, 11, 569}, + dictWord{ + 11, + 11, + 973, + }, + dictWord{12, 11, 70}, + dictWord{12, 11, 189}, + dictWord{13, 11, 57}, + dictWord{13, 11, 257}, + dictWord{22, 11, 6}, + dictWord{150, 11, 16}, + dictWord{ + 142, + 0, + 291, + }, + dictWord{12, 10, 582}, + dictWord{146, 10, 131}, + dictWord{136, 10, 801}, + dictWord{133, 0, 984}, + dictWord{145, 11, 116}, + dictWord{4, 11, 692}, + dictWord{133, 11, 321}, + dictWord{4, 0, 182}, + dictWord{6, 0, 205}, + dictWord{135, 0, 220}, + dictWord{4, 0, 42}, + dictWord{9, 0, 205}, + dictWord{9, 0, 786}, + dictWord{ + 138, + 0, + 659, + }, + dictWord{6, 0, 801}, + dictWord{11, 11, 130}, + dictWord{140, 11, 609}, + dictWord{132, 0, 635}, + dictWord{5, 11, 345}, + dictWord{135, 11, 1016}, + dictWord{139, 0, 533}, + dictWord{132, 0, 371}, + dictWord{4, 0, 272}, + dictWord{135, 0, 836}, + dictWord{6, 0, 1282}, + dictWord{135, 11, 1100}, + dictWord{5, 0, 825}, + dictWord{134, 0, 1640}, + dictWord{135, 11, 1325}, + dictWord{133, 11, 673}, + dictWord{4, 11, 287}, + dictWord{133, 11, 1018}, + dictWord{135, 0, 357}, + dictWord{ + 6, + 0, + 467, + }, + dictWord{137, 0, 879}, + dictWord{7, 0, 317}, + dictWord{135, 0, 569}, + dictWord{6, 0, 924}, + dictWord{134, 0, 1588}, + dictWord{5, 11, 34}, + dictWord{ + 5, + 10, + 406, + }, + dictWord{10, 11, 724}, + dictWord{12, 11, 444}, + dictWord{13, 11, 354}, + dictWord{18, 11, 32}, + dictWord{23, 11, 24}, + dictWord{23, 11, 31}, + dictWord{ + 152, + 11, + 5, + }, + dictWord{6, 0, 1795}, + dictWord{6, 0, 1835}, + dictWord{6, 0, 1836}, + dictWord{6, 0, 1856}, + dictWord{8, 0, 844}, + dictWord{8, 0, 849}, + dictWord{8, 0, 854}, + dictWord{8, 0, 870}, + dictWord{8, 0, 887}, + dictWord{10, 0, 852}, + dictWord{138, 0, 942}, + dictWord{6, 10, 69}, + dictWord{135, 10, 117}, + dictWord{137, 0, 307}, + dictWord{ + 4, + 0, + 944, + }, + dictWord{6, 0, 1799}, + dictWord{6, 0, 1825}, + dictWord{10, 0, 848}, + dictWord{10, 0, 875}, + dictWord{10, 0, 895}, + dictWord{10, 0, 899}, + dictWord{ + 10, + 0, + 902, + }, + dictWord{140, 0, 773}, + dictWord{11, 0, 43}, + dictWord{13, 0, 72}, + dictWord{141, 0, 142}, + dictWord{135, 10, 1830}, + dictWord{134, 11, 382}, + dictWord{ + 4, + 10, + 432, + }, + dictWord{135, 10, 824}, + dictWord{132, 11, 329}, + dictWord{7, 0, 1820}, + dictWord{139, 11, 124}, + dictWord{133, 10, 826}, + dictWord{ + 133, + 0, + 525, + }, + dictWord{132, 11, 906}, + dictWord{7, 11, 1940}, + dictWord{136, 11, 366}, + dictWord{138, 11, 10}, + dictWord{4, 11, 123}, + dictWord{4, 11, 649}, + dictWord{ + 5, + 11, + 605, + }, + dictWord{7, 11, 1509}, + dictWord{136, 11, 36}, + dictWord{6, 0, 110}, + dictWord{135, 0, 1681}, + dictWord{133, 0, 493}, + dictWord{133, 11, 767}, + dictWord{4, 0, 174}, + dictWord{135, 0, 911}, + dictWord{138, 11, 786}, + dictWord{8, 0, 417}, + dictWord{137, 0, 782}, + dictWord{133, 10, 1000}, + dictWord{7, 0, 733}, + dictWord{137, 0, 583}, + dictWord{4, 10, 297}, + dictWord{6, 10, 529}, + dictWord{7, 10, 152}, + dictWord{7, 10, 713}, + dictWord{7, 10, 1845}, + dictWord{8, 10, 710}, + dictWord{8, 10, 717}, + dictWord{12, 10, 639}, + dictWord{140, 10, 685}, + dictWord{4, 0, 32}, + dictWord{5, 0, 215}, + dictWord{6, 0, 269}, + dictWord{7, 0, 1782}, + dictWord{ + 7, + 0, + 1892, + }, + dictWord{10, 0, 16}, + dictWord{11, 0, 822}, + dictWord{11, 0, 954}, + dictWord{141, 0, 481}, + dictWord{4, 11, 273}, + dictWord{5, 11, 658}, + dictWord{ + 133, + 11, + 995, + }, + dictWord{136, 0, 477}, + dictWord{134, 11, 72}, + dictWord{135, 11, 1345}, + dictWord{5, 0, 308}, + dictWord{7, 0, 1088}, + dictWord{4, 10, 520}, + dictWord{ + 135, + 10, + 575, + }, + dictWord{133, 11, 589}, + dictWord{5, 0, 126}, + dictWord{8, 0, 297}, + dictWord{9, 0, 366}, + dictWord{140, 0, 374}, + dictWord{7, 0, 1551}, + dictWord{ + 139, + 0, + 361, + }, + dictWord{5, 11, 117}, + dictWord{6, 11, 514}, + dictWord{6, 11, 541}, + dictWord{7, 11, 1164}, + dictWord{7, 11, 1436}, + dictWord{8, 11, 220}, + dictWord{ + 8, + 11, + 648, + }, + dictWord{10, 11, 688}, + dictWord{139, 11, 560}, + dictWord{133, 11, 686}, + dictWord{4, 0, 946}, + dictWord{6, 0, 1807}, + dictWord{8, 0, 871}, + dictWord{ + 10, + 0, + 854, + }, + dictWord{10, 0, 870}, + dictWord{10, 0, 888}, + dictWord{10, 0, 897}, + dictWord{10, 0, 920}, + dictWord{12, 0, 722}, + dictWord{12, 0, 761}, + dictWord{ + 12, + 0, + 763, + }, + dictWord{12, 0, 764}, + dictWord{14, 0, 454}, + dictWord{14, 0, 465}, + dictWord{16, 0, 107}, + dictWord{18, 0, 167}, + dictWord{18, 0, 168}, + dictWord{ + 146, + 0, + 172, + }, + dictWord{132, 0, 175}, + dictWord{135, 0, 1307}, + dictWord{132, 0, 685}, + dictWord{135, 11, 1834}, + dictWord{133, 0, 797}, + dictWord{6, 0, 745}, + dictWord{ + 6, + 0, + 858, + }, + dictWord{134, 0, 963}, + dictWord{133, 0, 565}, + dictWord{5, 10, 397}, + dictWord{6, 10, 154}, + dictWord{7, 11, 196}, + dictWord{7, 10, 676}, + dictWord{ + 8, + 10, + 443, + }, + dictWord{8, 10, 609}, + dictWord{9, 10, 24}, + dictWord{9, 10, 325}, + dictWord{10, 10, 35}, + dictWord{10, 11, 765}, + dictWord{11, 11, 347}, + dictWord{ + 11, + 10, + 535, + }, + dictWord{11, 11, 552}, + dictWord{11, 11, 576}, + dictWord{11, 10, 672}, + dictWord{11, 11, 790}, + dictWord{11, 10, 1018}, + dictWord{12, 11, 263}, + dictWord{12, 10, 637}, + dictWord{13, 11, 246}, + dictWord{13, 11, 270}, + dictWord{13, 11, 395}, + dictWord{14, 11, 74}, + dictWord{14, 11, 176}, + dictWord{ + 14, + 11, + 190, + }, + dictWord{14, 11, 398}, + dictWord{14, 11, 412}, + dictWord{15, 11, 32}, + dictWord{15, 11, 63}, + dictWord{16, 10, 30}, + dictWord{16, 11, 88}, + dictWord{ + 147, + 11, + 105, + }, + dictWord{13, 11, 84}, + dictWord{141, 11, 122}, + dictWord{4, 0, 252}, + dictWord{7, 0, 1068}, + dictWord{10, 0, 434}, + dictWord{11, 0, 228}, + dictWord{ + 11, + 0, + 426, + }, + dictWord{13, 0, 231}, + dictWord{18, 0, 106}, + dictWord{148, 0, 87}, + dictWord{137, 0, 826}, + dictWord{4, 11, 589}, + dictWord{139, 11, 282}, + dictWord{ + 5, + 11, + 381, + }, + dictWord{135, 11, 1792}, + dictWord{132, 0, 791}, + dictWord{5, 0, 231}, + dictWord{10, 0, 509}, + dictWord{133, 10, 981}, + dictWord{7, 0, 601}, + dictWord{ + 9, + 0, + 277, + }, + dictWord{9, 0, 674}, + dictWord{10, 0, 178}, + dictWord{10, 0, 418}, + dictWord{10, 0, 571}, + dictWord{11, 0, 531}, + dictWord{12, 0, 113}, + dictWord{12, 0, 475}, + dictWord{13, 0, 99}, + dictWord{142, 0, 428}, + dictWord{4, 10, 56}, + dictWord{7, 11, 616}, + dictWord{7, 10, 1791}, + dictWord{8, 10, 607}, + dictWord{8, 10, 651}, + dictWord{10, 11, 413}, + dictWord{11, 10, 465}, + dictWord{11, 10, 835}, + dictWord{12, 10, 337}, + dictWord{141, 10, 480}, + dictWord{7, 0, 1591}, + dictWord{144, 0, 43}, + dictWord{9, 10, 158}, + dictWord{138, 10, 411}, + dictWord{135, 0, 1683}, + dictWord{8, 0, 289}, + dictWord{11, 0, 45}, + dictWord{12, 0, 278}, + dictWord{140, 0, 537}, + dictWord{6, 11, 120}, + dictWord{7, 11, 1188}, + dictWord{7, 11, 1710}, + dictWord{8, 11, 286}, + dictWord{9, 11, 667}, + dictWord{11, 11, 592}, + dictWord{ + 139, + 11, + 730, + }, + dictWord{136, 10, 617}, + dictWord{135, 0, 1120}, + dictWord{135, 11, 1146}, + dictWord{139, 10, 563}, + dictWord{4, 11, 352}, + dictWord{4, 10, 369}, + dictWord{135, 11, 687}, + dictWord{143, 11, 38}, + dictWord{4, 0, 399}, + dictWord{5, 0, 119}, + dictWord{5, 0, 494}, + dictWord{7, 0, 751}, + dictWord{9, 0, 556}, + dictWord{ + 14, + 11, + 179, + }, + dictWord{15, 11, 151}, + dictWord{150, 11, 11}, + dictWord{4, 11, 192}, + dictWord{5, 11, 49}, + dictWord{6, 11, 200}, + dictWord{6, 11, 293}, + dictWord{ + 6, + 11, + 1696, + }, + dictWord{135, 11, 488}, + dictWord{4, 0, 398}, + dictWord{133, 0, 660}, + dictWord{7, 0, 1030}, + dictWord{134, 10, 622}, + dictWord{135, 11, 595}, + dictWord{141, 0, 168}, + dictWord{132, 11, 147}, + dictWord{7, 0, 973}, + dictWord{10, 10, 624}, + dictWord{142, 10, 279}, + dictWord{132, 10, 363}, + dictWord{ + 132, + 0, + 642, + }, + dictWord{133, 11, 934}, + dictWord{134, 0, 1615}, + dictWord{7, 11, 505}, + dictWord{135, 11, 523}, + dictWord{7, 0, 594}, + dictWord{7, 0, 851}, + dictWord{ + 7, + 0, + 1858, + }, + dictWord{9, 0, 411}, + dictWord{9, 0, 574}, + dictWord{9, 0, 666}, + dictWord{9, 0, 737}, + dictWord{10, 0, 346}, + dictWord{10, 0, 712}, + dictWord{11, 0, 246}, + dictWord{11, 0, 432}, + dictWord{11, 0, 517}, + dictWord{11, 0, 647}, + dictWord{11, 0, 679}, + dictWord{11, 0, 727}, + dictWord{12, 0, 304}, + dictWord{12, 0, 305}, + dictWord{ + 12, + 0, + 323, + }, + dictWord{12, 0, 483}, + dictWord{12, 0, 572}, + dictWord{12, 0, 593}, + dictWord{12, 0, 602}, + dictWord{13, 0, 95}, + dictWord{13, 0, 101}, + dictWord{ + 13, + 0, + 171, + }, + dictWord{13, 0, 315}, + dictWord{13, 0, 378}, + dictWord{13, 0, 425}, + dictWord{13, 0, 475}, + dictWord{14, 0, 63}, + dictWord{14, 0, 380}, + dictWord{14, 0, 384}, + dictWord{15, 0, 133}, + dictWord{18, 0, 112}, + dictWord{148, 0, 72}, + dictWord{135, 0, 1093}, + dictWord{132, 0, 679}, + dictWord{8, 0, 913}, + dictWord{10, 0, 903}, + dictWord{10, 0, 915}, + dictWord{12, 0, 648}, + dictWord{12, 0, 649}, + dictWord{14, 0, 455}, + dictWord{16, 0, 112}, + dictWord{138, 11, 438}, + dictWord{137, 0, 203}, + dictWord{134, 10, 292}, + dictWord{134, 0, 1492}, + dictWord{7, 0, 1374}, + dictWord{8, 0, 540}, + dictWord{5, 10, 177}, + dictWord{6, 10, 616}, + dictWord{7, 10, 827}, + dictWord{9, 10, 525}, + dictWord{138, 10, 656}, + dictWord{135, 0, 1486}, + dictWord{9, 0, 714}, + dictWord{138, 10, 31}, + dictWord{136, 0, 825}, + dictWord{ + 134, + 0, + 1511, + }, + dictWord{132, 11, 637}, + dictWord{134, 0, 952}, + dictWord{4, 10, 161}, + dictWord{133, 10, 631}, + dictWord{5, 0, 143}, + dictWord{5, 0, 769}, + dictWord{ + 6, + 0, + 1760, + }, + dictWord{7, 0, 682}, + dictWord{7, 0, 1992}, + dictWord{136, 0, 736}, + dictWord{132, 0, 700}, + dictWord{134, 0, 1540}, + dictWord{132, 11, 777}, + dictWord{ + 9, + 11, + 867, + }, + dictWord{138, 11, 837}, + dictWord{7, 0, 1557}, + dictWord{135, 10, 1684}, + dictWord{133, 0, 860}, + dictWord{6, 0, 422}, + dictWord{7, 0, 0}, + dictWord{ + 7, + 0, + 1544, + }, + dictWord{9, 0, 605}, + dictWord{11, 0, 990}, + dictWord{12, 0, 235}, + dictWord{12, 0, 453}, + dictWord{13, 0, 47}, + dictWord{13, 0, 266}, + dictWord{9, 10, 469}, + dictWord{9, 10, 709}, + dictWord{12, 10, 512}, + dictWord{14, 10, 65}, + dictWord{145, 10, 12}, + dictWord{11, 0, 807}, + dictWord{10, 10, 229}, + dictWord{11, 10, 73}, + dictWord{139, 10, 376}, + dictWord{6, 11, 170}, + dictWord{7, 11, 1080}, + dictWord{8, 11, 395}, + dictWord{8, 11, 487}, + dictWord{11, 11, 125}, + dictWord{ + 141, + 11, + 147, + }, + dictWord{5, 0, 515}, + dictWord{137, 0, 131}, + dictWord{7, 0, 1605}, + dictWord{11, 0, 962}, + dictWord{146, 0, 139}, + dictWord{132, 0, 646}, + dictWord{ + 4, + 0, + 396, + }, + dictWord{7, 0, 728}, + dictWord{9, 0, 117}, + dictWord{13, 0, 202}, + dictWord{148, 0, 51}, + dictWord{6, 0, 121}, + dictWord{6, 0, 124}, + dictWord{6, 0, 357}, + dictWord{ + 7, + 0, + 1138, + }, + dictWord{7, 0, 1295}, + dictWord{8, 0, 162}, + dictWord{8, 0, 508}, + dictWord{11, 0, 655}, + dictWord{4, 11, 535}, + dictWord{6, 10, 558}, + dictWord{ + 7, + 10, + 651, + }, + dictWord{8, 11, 618}, + dictWord{9, 10, 0}, + dictWord{10, 10, 34}, + dictWord{139, 10, 1008}, + dictWord{135, 11, 1245}, + dictWord{138, 0, 357}, + dictWord{ + 150, + 11, + 23, + }, + dictWord{133, 0, 237}, + dictWord{135, 0, 1784}, + dictWord{7, 10, 1832}, + dictWord{138, 10, 374}, + dictWord{132, 0, 713}, + dictWord{132, 11, 46}, + dictWord{6, 0, 1536}, + dictWord{10, 0, 348}, + dictWord{5, 11, 811}, + dictWord{6, 11, 1679}, + dictWord{6, 11, 1714}, + dictWord{135, 11, 2032}, + dictWord{ + 11, + 11, + 182, + }, + dictWord{142, 11, 195}, + dictWord{6, 0, 523}, + dictWord{7, 0, 738}, + dictWord{7, 10, 771}, + dictWord{7, 10, 1731}, + dictWord{9, 10, 405}, + dictWord{ + 138, + 10, + 421, + }, + dictWord{7, 11, 1458}, + dictWord{9, 11, 407}, + dictWord{139, 11, 15}, + dictWord{6, 11, 34}, + dictWord{7, 11, 69}, + dictWord{7, 11, 640}, + dictWord{ + 7, + 11, + 1089, + }, + dictWord{8, 11, 708}, + dictWord{8, 11, 721}, + dictWord{9, 11, 363}, + dictWord{9, 11, 643}, + dictWord{10, 11, 628}, + dictWord{148, 11, 98}, + dictWord{ + 133, + 0, + 434, + }, + dictWord{135, 0, 1877}, + dictWord{7, 0, 571}, + dictWord{138, 0, 366}, + dictWord{5, 10, 881}, + dictWord{133, 10, 885}, + dictWord{9, 0, 513}, + dictWord{ + 10, + 0, + 25, + }, + dictWord{10, 0, 39}, + dictWord{12, 0, 122}, + dictWord{140, 0, 187}, + dictWord{132, 0, 580}, + dictWord{5, 10, 142}, + dictWord{134, 10, 546}, + dictWord{ + 132, + 11, + 462, + }, + dictWord{137, 0, 873}, + dictWord{5, 10, 466}, + dictWord{11, 10, 571}, + dictWord{12, 10, 198}, + dictWord{13, 10, 283}, + dictWord{14, 10, 186}, + dictWord{15, 10, 21}, + dictWord{143, 10, 103}, + dictWord{7, 0, 171}, + dictWord{4, 10, 185}, + dictWord{5, 10, 257}, + dictWord{5, 10, 839}, + dictWord{5, 10, 936}, + dictWord{ + 9, + 10, + 399, + }, + dictWord{10, 10, 258}, + dictWord{10, 10, 395}, + dictWord{10, 10, 734}, + dictWord{11, 10, 1014}, + dictWord{12, 10, 23}, + dictWord{13, 10, 350}, + dictWord{14, 10, 150}, + dictWord{147, 10, 6}, + dictWord{134, 0, 625}, + dictWord{7, 0, 107}, + dictWord{7, 0, 838}, + dictWord{8, 0, 550}, + dictWord{138, 0, 401}, + dictWord{ + 5, + 11, + 73, + }, + dictWord{6, 11, 23}, + dictWord{134, 11, 338}, + dictWord{4, 0, 943}, + dictWord{6, 0, 1850}, + dictWord{12, 0, 713}, + dictWord{142, 0, 434}, + dictWord{ + 11, + 0, + 588, + }, + dictWord{11, 0, 864}, + dictWord{11, 0, 936}, + dictWord{11, 0, 968}, + dictWord{12, 0, 73}, + dictWord{12, 0, 343}, + dictWord{12, 0, 394}, + dictWord{13, 0, 275}, + dictWord{14, 0, 257}, + dictWord{15, 0, 160}, + dictWord{7, 10, 404}, + dictWord{7, 10, 1377}, + dictWord{7, 10, 1430}, + dictWord{7, 10, 2017}, + dictWord{8, 10, 149}, + dictWord{8, 10, 239}, + dictWord{8, 10, 512}, + dictWord{8, 10, 793}, + dictWord{8, 10, 818}, + dictWord{9, 10, 474}, + dictWord{9, 10, 595}, + dictWord{10, 10, 122}, + dictWord{10, 10, 565}, + dictWord{10, 10, 649}, + dictWord{10, 10, 783}, + dictWord{11, 10, 239}, + dictWord{11, 10, 295}, + dictWord{11, 10, 447}, + dictWord{ + 11, + 10, + 528, + }, + dictWord{11, 10, 639}, + dictWord{11, 10, 800}, + dictWord{12, 10, 25}, + dictWord{12, 10, 157}, + dictWord{12, 10, 316}, + dictWord{12, 10, 390}, + dictWord{ + 12, + 10, + 391, + }, + dictWord{12, 10, 395}, + dictWord{12, 10, 478}, + dictWord{12, 10, 503}, + dictWord{12, 10, 592}, + dictWord{12, 10, 680}, + dictWord{13, 10, 50}, + dictWord{13, 10, 53}, + dictWord{13, 10, 132}, + dictWord{13, 10, 198}, + dictWord{13, 10, 322}, + dictWord{13, 10, 415}, + dictWord{13, 10, 511}, + dictWord{14, 10, 71}, + dictWord{14, 10, 395}, + dictWord{15, 10, 71}, + dictWord{15, 10, 136}, + dictWord{17, 10, 123}, + dictWord{18, 10, 93}, + dictWord{147, 10, 58}, + dictWord{ + 133, + 0, + 768, + }, + dictWord{11, 0, 103}, + dictWord{142, 0, 0}, + dictWord{136, 10, 712}, + dictWord{132, 0, 799}, + dictWord{132, 0, 894}, + dictWord{7, 11, 725}, + dictWord{ + 8, + 11, + 498, + }, + dictWord{139, 11, 268}, + dictWord{135, 11, 1798}, + dictWord{135, 11, 773}, + dictWord{141, 11, 360}, + dictWord{4, 10, 377}, + dictWord{152, 10, 13}, + dictWord{135, 0, 1673}, + dictWord{132, 11, 583}, + dictWord{134, 0, 1052}, + dictWord{133, 11, 220}, + dictWord{140, 11, 69}, + dictWord{132, 11, 544}, + dictWord{ + 4, + 10, + 180, + }, + dictWord{135, 10, 1906}, + dictWord{134, 0, 272}, + dictWord{4, 0, 441}, + dictWord{134, 0, 1421}, + dictWord{4, 0, 9}, + dictWord{5, 0, 128}, + dictWord{ + 7, + 0, + 368, + }, + dictWord{11, 0, 480}, + dictWord{148, 0, 3}, + dictWord{5, 11, 176}, + dictWord{6, 11, 437}, + dictWord{6, 11, 564}, + dictWord{11, 11, 181}, + dictWord{ + 141, + 11, + 183, + }, + dictWord{132, 10, 491}, + dictWord{7, 0, 1182}, + dictWord{141, 11, 67}, + dictWord{6, 0, 1346}, + dictWord{4, 10, 171}, + dictWord{138, 10, 234}, + dictWord{ + 4, + 10, + 586, + }, + dictWord{7, 10, 1186}, + dictWord{138, 10, 631}, + dictWord{136, 0, 682}, + dictWord{134, 0, 1004}, + dictWord{15, 0, 24}, + dictWord{143, 11, 24}, + dictWord{134, 0, 968}, + dictWord{4, 0, 2}, + dictWord{6, 0, 742}, + dictWord{6, 0, 793}, + dictWord{7, 0, 545}, + dictWord{7, 0, 894}, + dictWord{9, 10, 931}, + dictWord{ + 10, + 10, + 334, + }, + dictWord{148, 10, 71}, + dictWord{136, 11, 600}, + dictWord{133, 10, 765}, + dictWord{9, 0, 769}, + dictWord{140, 0, 185}, + dictWord{4, 11, 790}, + dictWord{ + 5, + 11, + 273, + }, + dictWord{134, 11, 394}, + dictWord{7, 0, 474}, + dictWord{137, 0, 578}, + dictWord{4, 11, 135}, + dictWord{6, 11, 127}, + dictWord{7, 11, 1185}, + dictWord{ + 7, + 11, + 1511, + }, + dictWord{8, 11, 613}, + dictWord{11, 11, 5}, + dictWord{12, 11, 133}, + dictWord{12, 11, 495}, + dictWord{12, 11, 586}, + dictWord{14, 11, 385}, + dictWord{15, 11, 118}, + dictWord{17, 11, 20}, + dictWord{146, 11, 98}, + dictWord{133, 10, 424}, + dictWord{5, 0, 530}, + dictWord{142, 0, 113}, + dictWord{6, 11, 230}, + dictWord{7, 11, 961}, + dictWord{7, 11, 1085}, + dictWord{136, 11, 462}, + dictWord{7, 11, 1954}, + dictWord{137, 11, 636}, + dictWord{136, 10, 714}, + dictWord{ + 149, + 11, + 6, + }, + dictWord{135, 10, 685}, + dictWord{9, 10, 420}, + dictWord{10, 10, 269}, + dictWord{10, 10, 285}, + dictWord{10, 10, 576}, + dictWord{11, 10, 397}, + dictWord{13, 10, 175}, + dictWord{145, 10, 90}, + dictWord{132, 10, 429}, + dictWord{5, 0, 556}, + dictWord{5, 11, 162}, + dictWord{136, 11, 68}, + dictWord{132, 11, 654}, + dictWord{4, 11, 156}, + dictWord{7, 11, 998}, + dictWord{7, 11, 1045}, + dictWord{7, 11, 1860}, + dictWord{9, 11, 48}, + dictWord{9, 11, 692}, + dictWord{11, 11, 419}, + dictWord{139, 11, 602}, + dictWord{6, 0, 1317}, + dictWord{8, 0, 16}, + dictWord{9, 0, 825}, + dictWord{12, 0, 568}, + dictWord{7, 11, 1276}, + dictWord{8, 11, 474}, + dictWord{137, 11, 652}, + dictWord{18, 0, 97}, + dictWord{7, 10, 18}, + dictWord{7, 10, 699}, + dictWord{7, 10, 1966}, + dictWord{8, 10, 752}, + dictWord{9, 10, 273}, + dictWord{ + 9, + 10, + 412, + }, + dictWord{9, 10, 703}, + dictWord{10, 10, 71}, + dictWord{10, 10, 427}, + dictWord{138, 10, 508}, + dictWord{10, 0, 703}, + dictWord{7, 11, 1454}, + dictWord{138, 11, 703}, + dictWord{4, 10, 53}, + dictWord{5, 10, 186}, + dictWord{135, 10, 752}, + dictWord{134, 0, 892}, + dictWord{134, 0, 1571}, + dictWord{8, 10, 575}, + dictWord{10, 10, 289}, + dictWord{139, 10, 319}, + dictWord{6, 0, 186}, + dictWord{137, 0, 426}, + dictWord{134, 0, 1101}, + dictWord{132, 10, 675}, + dictWord{ + 132, + 0, + 585, + }, + dictWord{6, 0, 1870}, + dictWord{137, 0, 937}, + dictWord{152, 11, 10}, + dictWord{9, 11, 197}, + dictWord{10, 11, 300}, + dictWord{12, 11, 473}, + dictWord{ + 13, + 11, + 90, + }, + dictWord{141, 11, 405}, + dictWord{4, 0, 93}, + dictWord{5, 0, 252}, + dictWord{6, 0, 229}, + dictWord{7, 0, 291}, + dictWord{9, 0, 550}, + dictWord{139, 0, 644}, + dictWord{137, 0, 749}, + dictWord{9, 0, 162}, + dictWord{6, 10, 209}, + dictWord{8, 10, 468}, + dictWord{9, 10, 210}, + dictWord{11, 10, 36}, + dictWord{12, 10, 28}, + dictWord{12, 10, 630}, + dictWord{13, 10, 21}, + dictWord{13, 10, 349}, + dictWord{14, 10, 7}, + dictWord{145, 10, 13}, + dictWord{132, 0, 381}, + dictWord{132, 11, 606}, + dictWord{4, 10, 342}, + dictWord{135, 10, 1179}, + dictWord{7, 11, 1587}, + dictWord{7, 11, 1707}, + dictWord{10, 11, 528}, + dictWord{139, 11, 504}, + dictWord{ + 12, + 11, + 39, + }, + dictWord{13, 11, 265}, + dictWord{141, 11, 439}, + dictWord{4, 10, 928}, + dictWord{133, 10, 910}, + dictWord{7, 10, 1838}, + dictWord{7, 11, 1978}, + dictWord{136, 11, 676}, + dictWord{6, 0, 762}, + dictWord{6, 0, 796}, + dictWord{134, 0, 956}, + dictWord{4, 10, 318}, + dictWord{4, 10, 496}, + dictWord{7, 10, 856}, + dictWord{139, 10, 654}, + dictWord{137, 11, 242}, + dictWord{4, 11, 361}, + dictWord{133, 11, 315}, + dictWord{132, 11, 461}, + dictWord{132, 11, 472}, + dictWord{ + 132, + 0, + 857, + }, + dictWord{5, 0, 21}, + dictWord{6, 0, 77}, + dictWord{6, 0, 157}, + dictWord{7, 0, 974}, + dictWord{7, 0, 1301}, + dictWord{7, 0, 1339}, + dictWord{7, 0, 1490}, + dictWord{ + 7, + 0, + 1873, + }, + dictWord{9, 0, 628}, + dictWord{7, 10, 915}, + dictWord{8, 10, 247}, + dictWord{147, 10, 0}, + dictWord{4, 10, 202}, + dictWord{5, 10, 382}, + dictWord{ + 6, + 10, + 454, + }, + dictWord{7, 10, 936}, + dictWord{7, 10, 1803}, + dictWord{8, 10, 758}, + dictWord{9, 10, 375}, + dictWord{9, 10, 895}, + dictWord{10, 10, 743}, + dictWord{ + 10, + 10, + 792, + }, + dictWord{11, 10, 978}, + dictWord{11, 10, 1012}, + dictWord{142, 10, 109}, + dictWord{7, 11, 617}, + dictWord{10, 11, 498}, + dictWord{11, 11, 501}, + dictWord{12, 11, 16}, + dictWord{140, 11, 150}, + dictWord{7, 10, 1150}, + dictWord{7, 10, 1425}, + dictWord{7, 10, 1453}, + dictWord{10, 11, 747}, + dictWord{ + 140, + 10, + 513, + }, + dictWord{133, 11, 155}, + dictWord{11, 0, 919}, + dictWord{141, 0, 409}, + dictWord{138, 10, 791}, + dictWord{10, 0, 633}, + dictWord{139, 11, 729}, + dictWord{ + 7, + 11, + 163, + }, + dictWord{8, 11, 319}, + dictWord{9, 11, 402}, + dictWord{10, 11, 24}, + dictWord{10, 11, 681}, + dictWord{11, 11, 200}, + dictWord{11, 11, 567}, + dictWord{12, 11, 253}, + dictWord{12, 11, 410}, + dictWord{142, 11, 219}, + dictWord{5, 11, 475}, + dictWord{7, 11, 1780}, + dictWord{9, 11, 230}, + dictWord{11, 11, 297}, + dictWord{11, 11, 558}, + dictWord{14, 11, 322}, + dictWord{147, 11, 76}, + dictWord{7, 0, 332}, + dictWord{6, 10, 445}, + dictWord{137, 10, 909}, + dictWord{ + 135, + 11, + 1956, + }, + dictWord{136, 11, 274}, + dictWord{134, 10, 578}, + dictWord{135, 0, 1489}, + dictWord{135, 11, 1848}, + dictWord{5, 11, 944}, + dictWord{ + 134, + 11, + 1769, + }, + dictWord{132, 11, 144}, + dictWord{136, 10, 766}, + dictWord{4, 0, 832}, + dictWord{135, 10, 541}, + dictWord{8, 0, 398}, + dictWord{9, 0, 681}, + dictWord{ + 139, + 0, + 632, + }, + dictWord{136, 0, 645}, + dictWord{9, 0, 791}, + dictWord{10, 0, 93}, + dictWord{16, 0, 13}, + dictWord{17, 0, 23}, + dictWord{18, 0, 135}, + dictWord{19, 0, 12}, + dictWord{20, 0, 1}, + dictWord{20, 0, 12}, + dictWord{148, 0, 14}, + dictWord{6, 11, 247}, + dictWord{137, 11, 555}, + dictWord{134, 0, 20}, + dictWord{132, 0, 800}, + dictWord{135, 0, 1841}, + dictWord{139, 10, 983}, + dictWord{137, 10, 768}, + dictWord{132, 10, 584}, + dictWord{141, 11, 51}, + dictWord{6, 0, 1993}, + dictWord{ + 4, + 11, + 620, + }, + dictWord{138, 11, 280}, + dictWord{136, 0, 769}, + dictWord{11, 0, 290}, + dictWord{11, 0, 665}, + dictWord{7, 11, 1810}, + dictWord{11, 11, 866}, + dictWord{ + 12, + 11, + 103, + }, + dictWord{13, 11, 495}, + dictWord{17, 11, 67}, + dictWord{147, 11, 74}, + dictWord{134, 0, 1426}, + dictWord{139, 0, 60}, + dictWord{4, 10, 326}, + dictWord{135, 10, 1770}, + dictWord{7, 0, 1874}, + dictWord{9, 0, 641}, + dictWord{132, 10, 226}, + dictWord{6, 0, 644}, + dictWord{5, 10, 426}, + dictWord{8, 10, 30}, + dictWord{ + 9, + 10, + 2, + }, + dictWord{11, 10, 549}, + dictWord{147, 10, 122}, + dictWord{5, 11, 428}, + dictWord{138, 11, 442}, + dictWord{135, 11, 1871}, + dictWord{ + 135, + 0, + 1757, + }, + dictWord{147, 10, 117}, + dictWord{135, 0, 937}, + dictWord{135, 0, 1652}, + dictWord{6, 0, 654}, + dictWord{134, 0, 1476}, + dictWord{133, 11, 99}, + dictWord{135, 0, 527}, + dictWord{132, 10, 345}, + dictWord{4, 10, 385}, + dictWord{4, 11, 397}, + dictWord{7, 10, 265}, + dictWord{135, 10, 587}, + dictWord{4, 0, 579}, + dictWord{5, 0, 226}, + dictWord{5, 0, 323}, + dictWord{135, 0, 960}, + dictWord{134, 0, 1486}, + dictWord{8, 11, 502}, + dictWord{144, 11, 9}, + dictWord{4, 10, 347}, + dictWord{ + 5, + 10, + 423, + }, + dictWord{5, 10, 996}, + dictWord{135, 10, 1329}, + dictWord{7, 11, 727}, + dictWord{146, 11, 73}, + dictWord{4, 11, 485}, + dictWord{7, 11, 353}, + dictWord{7, 10, 1259}, + dictWord{7, 11, 1523}, + dictWord{9, 10, 125}, + dictWord{139, 10, 65}, + dictWord{6, 0, 325}, + dictWord{5, 10, 136}, + dictWord{6, 11, 366}, + dictWord{ + 7, + 11, + 1384, + }, + dictWord{7, 11, 1601}, + dictWord{136, 10, 644}, + dictWord{138, 11, 160}, + dictWord{6, 0, 1345}, + dictWord{137, 11, 282}, + dictWord{18, 0, 91}, + dictWord{147, 0, 70}, + dictWord{136, 0, 404}, + dictWord{4, 11, 157}, + dictWord{133, 11, 471}, + dictWord{133, 0, 973}, + dictWord{6, 0, 135}, + dictWord{ + 135, + 0, + 1176, + }, + dictWord{8, 11, 116}, + dictWord{11, 11, 551}, + dictWord{142, 11, 159}, + dictWord{4, 0, 549}, + dictWord{4, 10, 433}, + dictWord{133, 10, 719}, + dictWord{ + 136, + 0, + 976, + }, + dictWord{5, 11, 160}, + dictWord{7, 11, 363}, + dictWord{7, 11, 589}, + dictWord{10, 11, 170}, + dictWord{141, 11, 55}, + dictWord{144, 0, 21}, + dictWord{ + 144, + 0, + 51, + }, + dictWord{135, 0, 314}, + dictWord{135, 10, 1363}, + dictWord{4, 11, 108}, + dictWord{7, 11, 405}, + dictWord{10, 11, 491}, + dictWord{139, 11, 498}, + dictWord{146, 0, 4}, + dictWord{4, 10, 555}, + dictWord{8, 10, 536}, + dictWord{10, 10, 288}, + dictWord{139, 10, 1005}, + dictWord{135, 11, 1005}, + dictWord{6, 0, 281}, + dictWord{7, 0, 6}, + dictWord{8, 0, 282}, + dictWord{8, 0, 480}, + dictWord{8, 0, 499}, + dictWord{9, 0, 198}, + dictWord{10, 0, 143}, + dictWord{10, 0, 169}, + dictWord{ + 10, + 0, + 211, + }, + dictWord{10, 0, 417}, + dictWord{10, 0, 574}, + dictWord{11, 0, 147}, + dictWord{11, 0, 395}, + dictWord{12, 0, 75}, + dictWord{12, 0, 407}, + dictWord{12, 0, 608}, + dictWord{13, 0, 500}, + dictWord{142, 0, 251}, + dictWord{6, 0, 1093}, + dictWord{6, 0, 1405}, + dictWord{9, 10, 370}, + dictWord{138, 10, 90}, + dictWord{4, 11, 926}, + dictWord{133, 11, 983}, + dictWord{135, 0, 1776}, + dictWord{134, 0, 1528}, + dictWord{132, 0, 419}, + dictWord{132, 11, 538}, + dictWord{6, 11, 294}, + dictWord{ + 7, + 11, + 1267, + }, + dictWord{136, 11, 624}, + dictWord{135, 11, 1772}, + dictWord{138, 11, 301}, + dictWord{4, 10, 257}, + dictWord{135, 10, 2031}, + dictWord{4, 0, 138}, + dictWord{7, 0, 1012}, + dictWord{7, 0, 1280}, + dictWord{9, 0, 76}, + dictWord{135, 10, 1768}, + dictWord{132, 11, 757}, + dictWord{5, 0, 29}, + dictWord{140, 0, 638}, + dictWord{7, 11, 655}, + dictWord{135, 11, 1844}, + dictWord{7, 0, 1418}, + dictWord{6, 11, 257}, + dictWord{135, 11, 1522}, + dictWord{8, 11, 469}, + dictWord{ + 138, + 11, + 47, + }, + dictWord{142, 11, 278}, + dictWord{6, 10, 83}, + dictWord{6, 10, 1733}, + dictWord{135, 10, 1389}, + dictWord{11, 11, 204}, + dictWord{11, 11, 243}, + dictWord{140, 11, 293}, + dictWord{135, 11, 1875}, + dictWord{6, 0, 1710}, + dictWord{135, 0, 2038}, + dictWord{137, 11, 299}, + dictWord{4, 0, 17}, + dictWord{5, 0, 23}, + dictWord{7, 0, 995}, + dictWord{11, 0, 383}, + dictWord{11, 0, 437}, + dictWord{12, 0, 460}, + dictWord{140, 0, 532}, + dictWord{133, 0, 862}, + dictWord{137, 10, 696}, + dictWord{6, 0, 592}, + dictWord{138, 0, 946}, + dictWord{138, 11, 599}, + dictWord{7, 10, 1718}, + dictWord{9, 10, 95}, + dictWord{9, 10, 274}, + dictWord{10, 10, 279}, + dictWord{10, 10, 317}, + dictWord{10, 10, 420}, + dictWord{11, 10, 303}, + dictWord{11, 10, 808}, + dictWord{12, 10, 134}, + dictWord{12, 10, 367}, + dictWord{ + 13, + 10, + 149, + }, + dictWord{13, 10, 347}, + dictWord{14, 10, 349}, + dictWord{14, 10, 406}, + dictWord{18, 10, 22}, + dictWord{18, 10, 89}, + dictWord{18, 10, 122}, + dictWord{ + 147, + 10, + 47, + }, + dictWord{8, 0, 70}, + dictWord{12, 0, 171}, + dictWord{141, 0, 272}, + dictWord{133, 10, 26}, + dictWord{132, 10, 550}, + dictWord{137, 0, 812}, + dictWord{ + 10, + 0, + 233, + }, + dictWord{139, 0, 76}, + dictWord{134, 0, 988}, + dictWord{134, 0, 442}, + dictWord{136, 10, 822}, + dictWord{7, 0, 896}, + dictWord{4, 10, 902}, + dictWord{ + 5, + 10, + 809, + }, + dictWord{134, 10, 122}, + dictWord{5, 11, 150}, + dictWord{7, 11, 106}, + dictWord{8, 11, 603}, + dictWord{9, 11, 593}, + dictWord{9, 11, 634}, + dictWord{ + 10, + 11, + 44, + }, + dictWord{10, 11, 173}, + dictWord{11, 11, 462}, + dictWord{11, 11, 515}, + dictWord{13, 11, 216}, + dictWord{13, 11, 288}, + dictWord{142, 11, 400}, + dictWord{136, 0, 483}, + dictWord{135, 10, 262}, + dictWord{6, 0, 1709}, + dictWord{133, 10, 620}, + dictWord{4, 10, 34}, + dictWord{5, 10, 574}, + dictWord{7, 10, 279}, + dictWord{7, 10, 1624}, + dictWord{136, 10, 601}, + dictWord{137, 10, 170}, + dictWord{147, 0, 119}, + dictWord{12, 11, 108}, + dictWord{141, 11, 291}, + dictWord{ + 11, + 0, + 69, + }, + dictWord{12, 0, 105}, + dictWord{12, 0, 117}, + dictWord{13, 0, 213}, + dictWord{14, 0, 13}, + dictWord{14, 0, 62}, + dictWord{14, 0, 177}, + dictWord{14, 0, 421}, + dictWord{15, 0, 19}, + dictWord{146, 0, 141}, + dictWord{137, 0, 309}, + dictWord{11, 11, 278}, + dictWord{142, 11, 73}, + dictWord{7, 0, 608}, + dictWord{7, 0, 976}, + dictWord{9, 0, 146}, + dictWord{10, 0, 206}, + dictWord{10, 0, 596}, + dictWord{13, 0, 218}, + dictWord{142, 0, 153}, + dictWord{133, 10, 332}, + dictWord{6, 10, 261}, + dictWord{ + 8, + 10, + 182, + }, + dictWord{139, 10, 943}, + dictWord{4, 11, 493}, + dictWord{144, 11, 55}, + dictWord{134, 10, 1721}, + dictWord{132, 0, 768}, + dictWord{4, 10, 933}, + dictWord{133, 10, 880}, + dictWord{7, 11, 555}, + dictWord{7, 11, 1316}, + dictWord{7, 11, 1412}, + dictWord{7, 11, 1839}, + dictWord{9, 11, 192}, + dictWord{ + 9, + 11, + 589, + }, + dictWord{11, 11, 241}, + dictWord{11, 11, 676}, + dictWord{11, 11, 811}, + dictWord{11, 11, 891}, + dictWord{12, 11, 140}, + dictWord{12, 11, 346}, + dictWord{ + 12, + 11, + 479, + }, + dictWord{13, 11, 30}, + dictWord{13, 11, 49}, + dictWord{13, 11, 381}, + dictWord{14, 11, 188}, + dictWord{15, 11, 150}, + dictWord{16, 11, 76}, + dictWord{18, 11, 30}, + dictWord{148, 11, 52}, + dictWord{4, 0, 518}, + dictWord{135, 0, 1136}, + dictWord{6, 11, 568}, + dictWord{7, 11, 112}, + dictWord{7, 11, 1804}, + dictWord{8, 11, 362}, + dictWord{8, 11, 410}, + dictWord{8, 11, 830}, + dictWord{9, 11, 514}, + dictWord{11, 11, 649}, + dictWord{142, 11, 157}, + dictWord{135, 11, 673}, + dictWord{8, 0, 689}, + dictWord{137, 0, 863}, + dictWord{4, 0, 18}, + dictWord{7, 0, 145}, + dictWord{7, 0, 444}, + dictWord{7, 0, 1278}, + dictWord{8, 0, 49}, + dictWord{8, 0, 400}, + dictWord{9, 0, 71}, + dictWord{9, 0, 250}, + dictWord{10, 0, 459}, + dictWord{12, 0, 160}, + dictWord{16, 0, 24}, + dictWord{132, 11, 625}, + dictWord{140, 0, 1020}, + dictWord{4, 0, 997}, + dictWord{6, 0, 1946}, + dictWord{6, 0, 1984}, + dictWord{134, 0, 1998}, + dictWord{6, 11, 16}, + dictWord{6, 11, 158}, + dictWord{7, 11, 43}, + dictWord{ + 7, + 11, + 129, + }, + dictWord{7, 11, 181}, + dictWord{8, 11, 276}, + dictWord{8, 11, 377}, + dictWord{10, 11, 523}, + dictWord{11, 11, 816}, + dictWord{12, 11, 455}, + dictWord{ + 13, + 11, + 303, + }, + dictWord{142, 11, 135}, + dictWord{133, 10, 812}, + dictWord{134, 0, 658}, + dictWord{4, 11, 1}, + dictWord{7, 11, 1143}, + dictWord{7, 11, 1463}, + dictWord{8, 11, 61}, + dictWord{9, 11, 207}, + dictWord{9, 11, 390}, + dictWord{9, 11, 467}, + dictWord{139, 11, 836}, + dictWord{150, 11, 26}, + dictWord{140, 0, 106}, + dictWord{6, 0, 1827}, + dictWord{10, 0, 931}, + dictWord{18, 0, 166}, + dictWord{20, 0, 114}, + dictWord{4, 10, 137}, + dictWord{7, 10, 1178}, + dictWord{7, 11, 1319}, + dictWord{135, 10, 1520}, + dictWord{133, 0, 1010}, + dictWord{4, 11, 723}, + dictWord{5, 11, 895}, + dictWord{7, 11, 1031}, + dictWord{8, 11, 199}, + dictWord{8, 11, 340}, + dictWord{9, 11, 153}, + dictWord{9, 11, 215}, + dictWord{10, 11, 21}, + dictWord{10, 11, 59}, + dictWord{10, 11, 80}, + dictWord{10, 11, 224}, + dictWord{11, 11, 229}, + dictWord{11, 11, 652}, + dictWord{12, 11, 192}, + dictWord{13, 11, 146}, + dictWord{142, 11, 91}, + dictWord{132, 11, 295}, + dictWord{6, 11, 619}, + dictWord{ + 7, + 11, + 898, + }, + dictWord{7, 11, 1092}, + dictWord{8, 11, 485}, + dictWord{18, 11, 28}, + dictWord{147, 11, 116}, + dictWord{137, 11, 51}, + dictWord{6, 10, 1661}, + dictWord{ + 7, + 10, + 1975, + }, + dictWord{7, 10, 2009}, + dictWord{135, 10, 2011}, + dictWord{5, 11, 309}, + dictWord{140, 11, 211}, + dictWord{5, 0, 87}, + dictWord{7, 0, 313}, + dictWord{ + 7, + 0, + 1103, + }, + dictWord{10, 0, 208}, + dictWord{10, 0, 582}, + dictWord{11, 0, 389}, + dictWord{11, 0, 813}, + dictWord{12, 0, 385}, + dictWord{13, 0, 286}, + dictWord{ + 14, + 0, + 124, + }, + dictWord{146, 0, 108}, + dictWord{5, 11, 125}, + dictWord{8, 11, 77}, + dictWord{138, 11, 15}, + dictWord{132, 0, 267}, + dictWord{133, 0, 703}, + dictWord{ + 137, + 11, + 155, + }, + dictWord{133, 11, 439}, + dictWord{11, 11, 164}, + dictWord{140, 11, 76}, + dictWord{9, 0, 496}, + dictWord{5, 10, 89}, + dictWord{7, 10, 1915}, + dictWord{ + 9, + 10, + 185, + }, + dictWord{9, 10, 235}, + dictWord{10, 10, 64}, + dictWord{10, 10, 270}, + dictWord{10, 10, 403}, + dictWord{10, 10, 469}, + dictWord{10, 10, 529}, + dictWord{10, 10, 590}, + dictWord{11, 10, 140}, + dictWord{11, 10, 860}, + dictWord{13, 10, 1}, + dictWord{13, 10, 422}, + dictWord{14, 10, 341}, + dictWord{14, 10, 364}, + dictWord{17, 10, 93}, + dictWord{18, 10, 113}, + dictWord{19, 10, 97}, + dictWord{147, 10, 113}, + dictWord{133, 10, 695}, + dictWord{135, 0, 1121}, + dictWord{ + 5, + 10, + 6, + }, + dictWord{6, 10, 183}, + dictWord{7, 10, 680}, + dictWord{7, 10, 978}, + dictWord{7, 10, 1013}, + dictWord{7, 10, 1055}, + dictWord{12, 10, 230}, + dictWord{ + 13, + 10, + 172, + }, + dictWord{146, 10, 29}, + dictWord{4, 11, 8}, + dictWord{7, 11, 1152}, + dictWord{7, 11, 1153}, + dictWord{7, 11, 1715}, + dictWord{9, 11, 374}, + dictWord{ + 10, + 11, + 478, + }, + dictWord{139, 11, 648}, + dictWord{135, 11, 1099}, + dictWord{6, 10, 29}, + dictWord{139, 10, 63}, + dictWord{4, 0, 561}, + dictWord{10, 0, 249}, + dictWord{ + 139, + 0, + 209, + }, + dictWord{132, 0, 760}, + dictWord{7, 11, 799}, + dictWord{138, 11, 511}, + dictWord{136, 11, 87}, + dictWord{9, 0, 154}, + dictWord{140, 0, 485}, + dictWord{136, 0, 255}, + dictWord{132, 0, 323}, + dictWord{140, 0, 419}, + dictWord{132, 10, 311}, + dictWord{134, 10, 1740}, + dictWord{4, 0, 368}, + dictWord{ + 135, + 0, + 641, + }, + dictWord{7, 10, 170}, + dictWord{8, 10, 90}, + dictWord{8, 10, 177}, + dictWord{8, 10, 415}, + dictWord{11, 10, 714}, + dictWord{142, 10, 281}, + dictWord{ + 4, + 11, + 69, + }, + dictWord{5, 11, 122}, + dictWord{9, 11, 656}, + dictWord{138, 11, 464}, + dictWord{5, 11, 849}, + dictWord{134, 11, 1633}, + dictWord{8, 0, 522}, + dictWord{ + 142, + 0, + 328, + }, + dictWord{11, 10, 91}, + dictWord{13, 10, 129}, + dictWord{15, 10, 101}, + dictWord{145, 10, 125}, + dictWord{7, 0, 562}, + dictWord{8, 0, 551}, + dictWord{ + 4, + 10, + 494, + }, + dictWord{6, 10, 74}, + dictWord{7, 10, 44}, + dictWord{11, 11, 499}, + dictWord{12, 10, 17}, + dictWord{15, 10, 5}, + dictWord{148, 10, 11}, + dictWord{4, 10, 276}, + dictWord{133, 10, 296}, + dictWord{9, 0, 92}, + dictWord{147, 0, 91}, + dictWord{4, 10, 7}, + dictWord{5, 10, 90}, + dictWord{5, 10, 158}, + dictWord{6, 10, 542}, + dictWord{ + 7, + 10, + 221, + }, + dictWord{7, 10, 1574}, + dictWord{9, 10, 490}, + dictWord{10, 10, 540}, + dictWord{11, 10, 443}, + dictWord{139, 10, 757}, + dictWord{6, 0, 525}, + dictWord{ + 6, + 0, + 1976, + }, + dictWord{8, 0, 806}, + dictWord{9, 0, 876}, + dictWord{140, 0, 284}, + dictWord{5, 11, 859}, + dictWord{7, 10, 588}, + dictWord{7, 11, 1160}, + dictWord{ + 8, + 11, + 107, + }, + dictWord{9, 10, 175}, + dictWord{9, 11, 291}, + dictWord{9, 11, 439}, + dictWord{10, 10, 530}, + dictWord{10, 11, 663}, + dictWord{11, 11, 609}, + dictWord{ + 140, + 11, + 197, + }, + dictWord{7, 11, 168}, + dictWord{13, 11, 196}, + dictWord{141, 11, 237}, + dictWord{139, 0, 958}, + dictWord{133, 0, 594}, + dictWord{135, 10, 580}, + dictWord{7, 10, 88}, + dictWord{136, 10, 627}, + dictWord{6, 0, 479}, + dictWord{6, 0, 562}, + dictWord{7, 0, 1060}, + dictWord{13, 0, 6}, + dictWord{5, 10, 872}, + dictWord{ + 6, + 10, + 57, + }, + dictWord{7, 10, 471}, + dictWord{9, 10, 447}, + dictWord{137, 10, 454}, + dictWord{136, 11, 413}, + dictWord{145, 11, 19}, + dictWord{4, 11, 117}, + dictWord{ + 6, + 11, + 372, + }, + dictWord{7, 11, 1905}, + dictWord{142, 11, 323}, + dictWord{4, 11, 722}, + dictWord{139, 11, 471}, + dictWord{17, 0, 61}, + dictWord{5, 10, 31}, + dictWord{134, 10, 614}, + dictWord{8, 10, 330}, + dictWord{140, 10, 477}, + dictWord{7, 10, 1200}, + dictWord{138, 10, 460}, + dictWord{6, 10, 424}, + dictWord{ + 135, + 10, + 1866, + }, + dictWord{6, 0, 1641}, + dictWord{136, 0, 820}, + dictWord{6, 0, 1556}, + dictWord{134, 0, 1618}, + dictWord{9, 11, 5}, + dictWord{12, 11, 216}, + dictWord{ + 12, + 11, + 294, + }, + dictWord{12, 11, 298}, + dictWord{12, 11, 400}, + dictWord{12, 11, 518}, + dictWord{13, 11, 229}, + dictWord{143, 11, 139}, + dictWord{15, 11, 155}, + dictWord{144, 11, 79}, + dictWord{4, 0, 302}, + dictWord{135, 0, 1766}, + dictWord{5, 10, 13}, + dictWord{134, 10, 142}, + dictWord{6, 0, 148}, + dictWord{7, 0, 1313}, + dictWord{ + 7, + 10, + 116, + }, + dictWord{8, 10, 322}, + dictWord{8, 10, 755}, + dictWord{9, 10, 548}, + dictWord{10, 10, 714}, + dictWord{11, 10, 884}, + dictWord{141, 10, 324}, + dictWord{137, 0, 676}, + dictWord{9, 11, 88}, + dictWord{139, 11, 270}, + dictWord{5, 11, 12}, + dictWord{7, 11, 375}, + dictWord{137, 11, 438}, + dictWord{134, 0, 1674}, + dictWord{7, 10, 1472}, + dictWord{135, 10, 1554}, + dictWord{11, 0, 178}, + dictWord{7, 10, 1071}, + dictWord{7, 10, 1541}, + dictWord{7, 10, 1767}, + dictWord{ + 7, + 10, + 1806, + }, + dictWord{11, 10, 162}, + dictWord{11, 10, 242}, + dictWord{12, 10, 605}, + dictWord{15, 10, 26}, + dictWord{144, 10, 44}, + dictWord{6, 0, 389}, + dictWord{ + 7, + 0, + 149, + }, + dictWord{9, 0, 142}, + dictWord{138, 0, 94}, + dictWord{140, 11, 71}, + dictWord{145, 10, 115}, + dictWord{6, 0, 8}, + dictWord{7, 0, 1881}, + dictWord{8, 0, 91}, + dictWord{11, 11, 966}, + dictWord{12, 11, 287}, + dictWord{13, 11, 342}, + dictWord{13, 11, 402}, + dictWord{15, 11, 110}, + dictWord{143, 11, 163}, + dictWord{ + 4, + 11, + 258, + }, + dictWord{136, 11, 639}, + dictWord{6, 11, 22}, + dictWord{7, 11, 903}, + dictWord{138, 11, 577}, + dictWord{133, 11, 681}, + dictWord{135, 10, 1111}, + dictWord{135, 11, 1286}, + dictWord{9, 0, 112}, + dictWord{8, 10, 1}, + dictWord{138, 10, 326}, + dictWord{5, 10, 488}, + dictWord{6, 10, 527}, + dictWord{7, 10, 489}, + dictWord{ + 7, + 10, + 1636, + }, + dictWord{8, 10, 121}, + dictWord{8, 10, 144}, + dictWord{8, 10, 359}, + dictWord{9, 10, 193}, + dictWord{9, 10, 241}, + dictWord{9, 10, 336}, + dictWord{ + 9, + 10, + 882, + }, + dictWord{11, 10, 266}, + dictWord{11, 10, 372}, + dictWord{11, 10, 944}, + dictWord{12, 10, 401}, + dictWord{140, 10, 641}, + dictWord{4, 11, 664}, + dictWord{133, 11, 804}, + dictWord{6, 0, 747}, + dictWord{134, 0, 1015}, + dictWord{135, 0, 1746}, + dictWord{9, 10, 31}, + dictWord{10, 10, 244}, + dictWord{ + 10, + 10, + 699, + }, + dictWord{12, 10, 149}, + dictWord{141, 10, 497}, + dictWord{133, 10, 377}, + dictWord{135, 0, 24}, + dictWord{6, 0, 1352}, + dictWord{5, 11, 32}, + dictWord{ + 145, + 10, + 101, + }, + dictWord{7, 0, 1530}, + dictWord{10, 0, 158}, + dictWord{13, 0, 13}, + dictWord{13, 0, 137}, + dictWord{13, 0, 258}, + dictWord{14, 0, 111}, + dictWord{ + 14, + 0, + 225, + }, + dictWord{14, 0, 253}, + dictWord{14, 0, 304}, + dictWord{14, 0, 339}, + dictWord{14, 0, 417}, + dictWord{146, 0, 33}, + dictWord{4, 0, 503}, + dictWord{ + 135, + 0, + 1661, + }, + dictWord{5, 0, 130}, + dictWord{6, 0, 845}, + dictWord{7, 0, 1314}, + dictWord{9, 0, 610}, + dictWord{10, 0, 718}, + dictWord{11, 0, 601}, + dictWord{11, 0, 819}, + dictWord{11, 0, 946}, + dictWord{140, 0, 536}, + dictWord{10, 0, 149}, + dictWord{11, 0, 280}, + dictWord{142, 0, 336}, + dictWord{134, 0, 1401}, + dictWord{ + 135, + 0, + 1946, + }, + dictWord{8, 0, 663}, + dictWord{144, 0, 8}, + dictWord{134, 0, 1607}, + dictWord{135, 10, 2023}, + dictWord{4, 11, 289}, + dictWord{7, 11, 629}, + dictWord{ + 7, + 11, + 1698, + }, + dictWord{7, 11, 1711}, + dictWord{140, 11, 215}, + dictWord{6, 11, 450}, + dictWord{136, 11, 109}, + dictWord{10, 0, 882}, + dictWord{10, 0, 883}, + dictWord{10, 0, 914}, + dictWord{138, 0, 928}, + dictWord{133, 10, 843}, + dictWord{136, 11, 705}, + dictWord{132, 10, 554}, + dictWord{133, 10, 536}, + dictWord{ + 5, + 0, + 417, + }, + dictWord{9, 10, 79}, + dictWord{11, 10, 625}, + dictWord{145, 10, 7}, + dictWord{7, 11, 1238}, + dictWord{142, 11, 37}, + dictWord{4, 0, 392}, + dictWord{ + 135, + 0, + 1597, + }, + dictWord{5, 0, 433}, + dictWord{9, 0, 633}, + dictWord{11, 0, 629}, + dictWord{132, 10, 424}, + dictWord{7, 10, 336}, + dictWord{136, 10, 785}, + dictWord{ + 134, + 11, + 355, + }, + dictWord{6, 0, 234}, + dictWord{7, 0, 769}, + dictWord{9, 0, 18}, + dictWord{138, 0, 358}, + dictWord{4, 10, 896}, + dictWord{134, 10, 1777}, + dictWord{ + 138, + 11, + 323, + }, + dictWord{7, 0, 140}, + dictWord{7, 0, 1950}, + dictWord{8, 0, 680}, + dictWord{11, 0, 817}, + dictWord{147, 0, 88}, + dictWord{7, 0, 1222}, + dictWord{ + 138, + 0, + 386, + }, + dictWord{139, 11, 908}, + dictWord{11, 0, 249}, + dictWord{12, 0, 313}, + dictWord{16, 0, 66}, + dictWord{145, 0, 26}, + dictWord{134, 0, 5}, + dictWord{7, 10, 750}, + dictWord{9, 10, 223}, + dictWord{11, 10, 27}, + dictWord{11, 10, 466}, + dictWord{12, 10, 624}, + dictWord{14, 10, 265}, + dictWord{146, 10, 61}, + dictWord{ + 134, + 11, + 26, + }, + dictWord{134, 0, 1216}, + dictWord{5, 0, 963}, + dictWord{134, 0, 1773}, + dictWord{4, 11, 414}, + dictWord{5, 11, 467}, + dictWord{9, 11, 654}, + dictWord{ + 10, + 11, + 451, + }, + dictWord{12, 11, 59}, + dictWord{141, 11, 375}, + dictWord{135, 11, 17}, + dictWord{4, 10, 603}, + dictWord{133, 10, 661}, + dictWord{4, 10, 11}, + dictWord{ + 6, + 10, + 128, + }, + dictWord{7, 10, 231}, + dictWord{7, 10, 1533}, + dictWord{138, 10, 725}, + dictWord{135, 11, 955}, + dictWord{7, 0, 180}, + dictWord{8, 0, 509}, + dictWord{ + 136, + 0, + 792, + }, + dictWord{132, 10, 476}, + dictWord{132, 0, 1002}, + dictWord{133, 11, 538}, + dictWord{135, 10, 1807}, + dictWord{132, 0, 931}, + dictWord{7, 0, 943}, + dictWord{11, 0, 614}, + dictWord{140, 0, 747}, + dictWord{135, 0, 1837}, + dictWord{9, 10, 20}, + dictWord{10, 10, 324}, + dictWord{10, 10, 807}, + dictWord{ + 139, + 10, + 488, + }, + dictWord{134, 0, 641}, + dictWord{6, 11, 280}, + dictWord{10, 11, 502}, + dictWord{11, 11, 344}, + dictWord{140, 11, 38}, + dictWord{5, 11, 45}, + dictWord{ + 7, + 11, + 1161, + }, + dictWord{11, 11, 448}, + dictWord{11, 11, 880}, + dictWord{13, 11, 139}, + dictWord{13, 11, 407}, + dictWord{15, 11, 16}, + dictWord{17, 11, 95}, + dictWord{ + 18, + 11, + 66, + }, + dictWord{18, 11, 88}, + dictWord{18, 11, 123}, + dictWord{149, 11, 7}, + dictWord{9, 0, 280}, + dictWord{138, 0, 134}, + dictWord{22, 0, 22}, + dictWord{23, 0, 5}, + dictWord{151, 0, 29}, + dictWord{136, 11, 777}, + dictWord{4, 0, 90}, + dictWord{5, 0, 545}, + dictWord{7, 0, 754}, + dictWord{9, 0, 186}, + dictWord{10, 0, 72}, + dictWord{ + 10, + 0, + 782, + }, + dictWord{11, 0, 577}, + dictWord{11, 0, 610}, + dictWord{11, 0, 960}, + dictWord{12, 0, 354}, + dictWord{12, 0, 362}, + dictWord{12, 0, 595}, + dictWord{ + 4, + 11, + 410, + }, + dictWord{135, 11, 521}, + dictWord{135, 11, 1778}, + dictWord{5, 10, 112}, + dictWord{6, 10, 103}, + dictWord{134, 10, 150}, + dictWord{138, 10, 356}, + dictWord{132, 0, 742}, + dictWord{7, 0, 151}, + dictWord{9, 0, 329}, + dictWord{139, 0, 254}, + dictWord{8, 0, 853}, + dictWord{8, 0, 881}, + dictWord{8, 0, 911}, + dictWord{ + 8, + 0, + 912, + }, + dictWord{10, 0, 872}, + dictWord{12, 0, 741}, + dictWord{12, 0, 742}, + dictWord{152, 0, 18}, + dictWord{4, 11, 573}, + dictWord{136, 11, 655}, + dictWord{ + 6, + 0, + 921, + }, + dictWord{134, 0, 934}, + dictWord{9, 0, 187}, + dictWord{10, 0, 36}, + dictWord{11, 0, 1016}, + dictWord{17, 0, 44}, + dictWord{146, 0, 64}, + dictWord{7, 0, 833}, + dictWord{136, 0, 517}, + dictWord{4, 0, 506}, + dictWord{5, 0, 295}, + dictWord{135, 0, 1680}, + dictWord{4, 10, 708}, + dictWord{8, 10, 15}, + dictWord{9, 10, 50}, + dictWord{ + 9, + 10, + 386, + }, + dictWord{11, 10, 18}, + dictWord{11, 10, 529}, + dictWord{140, 10, 228}, + dictWord{7, 0, 251}, + dictWord{7, 0, 1701}, + dictWord{8, 0, 436}, + dictWord{ + 4, + 10, + 563, + }, + dictWord{7, 10, 592}, + dictWord{7, 10, 637}, + dictWord{7, 10, 770}, + dictWord{8, 10, 463}, + dictWord{9, 10, 60}, + dictWord{9, 10, 335}, + dictWord{9, 10, 904}, + dictWord{10, 10, 73}, + dictWord{11, 10, 434}, + dictWord{12, 10, 585}, + dictWord{13, 10, 331}, + dictWord{18, 10, 110}, + dictWord{148, 10, 60}, + dictWord{ + 132, + 10, + 502, + }, + dictWord{136, 0, 584}, + dictWord{6, 10, 347}, + dictWord{138, 10, 161}, + dictWord{7, 0, 987}, + dictWord{9, 0, 688}, + dictWord{10, 0, 522}, + dictWord{ + 11, + 0, + 788, + }, + dictWord{12, 0, 137}, + dictWord{12, 0, 566}, + dictWord{14, 0, 9}, + dictWord{14, 0, 24}, + dictWord{14, 0, 64}, + dictWord{7, 11, 899}, + dictWord{142, 11, 325}, + dictWord{4, 0, 214}, + dictWord{5, 0, 500}, + dictWord{5, 10, 102}, + dictWord{6, 10, 284}, + dictWord{7, 10, 1079}, + dictWord{7, 10, 1423}, + dictWord{7, 10, 1702}, + dictWord{ + 8, + 10, + 470, + }, + dictWord{9, 10, 554}, + dictWord{9, 10, 723}, + dictWord{139, 10, 333}, + dictWord{7, 10, 246}, + dictWord{135, 10, 840}, + dictWord{6, 10, 10}, + dictWord{ + 8, + 10, + 571, + }, + dictWord{9, 10, 739}, + dictWord{143, 10, 91}, + dictWord{133, 10, 626}, + dictWord{146, 0, 195}, + dictWord{134, 0, 1775}, + dictWord{7, 0, 389}, + dictWord{7, 0, 700}, + dictWord{7, 0, 940}, + dictWord{8, 0, 514}, + dictWord{9, 0, 116}, + dictWord{9, 0, 535}, + dictWord{10, 0, 118}, + dictWord{11, 0, 107}, + dictWord{ + 11, + 0, + 148, + }, + dictWord{11, 0, 922}, + dictWord{12, 0, 254}, + dictWord{12, 0, 421}, + dictWord{142, 0, 238}, + dictWord{5, 10, 18}, + dictWord{6, 10, 526}, + dictWord{13, 10, 24}, + dictWord{13, 10, 110}, + dictWord{19, 10, 5}, + dictWord{147, 10, 44}, + dictWord{132, 0, 743}, + dictWord{11, 0, 292}, + dictWord{4, 10, 309}, + dictWord{5, 10, 462}, + dictWord{7, 10, 970}, + dictWord{135, 10, 1097}, + dictWord{22, 10, 30}, + dictWord{150, 10, 33}, + dictWord{139, 11, 338}, + dictWord{135, 11, 1598}, + dictWord{ + 7, + 0, + 1283, + }, + dictWord{9, 0, 227}, + dictWord{11, 0, 325}, + dictWord{11, 0, 408}, + dictWord{14, 0, 180}, + dictWord{146, 0, 47}, + dictWord{4, 0, 953}, + dictWord{6, 0, 1805}, + dictWord{6, 0, 1814}, + dictWord{6, 0, 1862}, + dictWord{140, 0, 774}, + dictWord{6, 11, 611}, + dictWord{135, 11, 1733}, + dictWord{135, 11, 1464}, + dictWord{ + 5, + 0, + 81, + }, + dictWord{7, 0, 146}, + dictWord{7, 0, 1342}, + dictWord{8, 0, 53}, + dictWord{8, 0, 561}, + dictWord{8, 0, 694}, + dictWord{8, 0, 754}, + dictWord{9, 0, 115}, + dictWord{ + 9, + 0, + 179, + }, + dictWord{9, 0, 894}, + dictWord{10, 0, 462}, + dictWord{10, 0, 813}, + dictWord{11, 0, 230}, + dictWord{11, 0, 657}, + dictWord{11, 0, 699}, + dictWord{11, 0, 748}, + dictWord{12, 0, 119}, + dictWord{12, 0, 200}, + dictWord{12, 0, 283}, + dictWord{142, 0, 273}, + dictWord{5, 0, 408}, + dictWord{6, 0, 789}, + dictWord{6, 0, 877}, + dictWord{ + 6, + 0, + 1253, + }, + dictWord{6, 0, 1413}, + dictWord{137, 0, 747}, + dictWord{134, 10, 1704}, + dictWord{135, 11, 663}, + dictWord{6, 0, 1910}, + dictWord{6, 0, 1915}, + dictWord{6, 0, 1923}, + dictWord{9, 0, 913}, + dictWord{9, 0, 928}, + dictWord{9, 0, 950}, + dictWord{9, 0, 954}, + dictWord{9, 0, 978}, + dictWord{9, 0, 993}, + dictWord{12, 0, 812}, + dictWord{12, 0, 819}, + dictWord{12, 0, 831}, + dictWord{12, 0, 833}, + dictWord{12, 0, 838}, + dictWord{12, 0, 909}, + dictWord{12, 0, 928}, + dictWord{12, 0, 931}, + dictWord{12, 0, 950}, + dictWord{15, 0, 186}, + dictWord{15, 0, 187}, + dictWord{15, 0, 195}, + dictWord{15, 0, 196}, + dictWord{15, 0, 209}, + dictWord{15, 0, 215}, + dictWord{ + 15, + 0, + 236, + }, + dictWord{15, 0, 241}, + dictWord{15, 0, 249}, + dictWord{15, 0, 253}, + dictWord{18, 0, 180}, + dictWord{18, 0, 221}, + dictWord{18, 0, 224}, + dictWord{ + 18, + 0, + 227, + }, + dictWord{18, 0, 229}, + dictWord{149, 0, 60}, + dictWord{7, 0, 1826}, + dictWord{135, 0, 1938}, + dictWord{11, 0, 490}, + dictWord{18, 0, 143}, + dictWord{ + 5, + 10, + 86, + }, + dictWord{7, 10, 743}, + dictWord{9, 10, 85}, + dictWord{10, 10, 281}, + dictWord{10, 10, 432}, + dictWord{12, 10, 251}, + dictWord{13, 10, 118}, + dictWord{ + 142, + 10, + 378, + }, + dictWord{5, 10, 524}, + dictWord{133, 10, 744}, + dictWord{141, 11, 442}, + dictWord{10, 10, 107}, + dictWord{140, 10, 436}, + dictWord{135, 11, 503}, + dictWord{134, 0, 1162}, + dictWord{132, 10, 927}, + dictWord{7, 0, 30}, + dictWord{8, 0, 86}, + dictWord{8, 0, 315}, + dictWord{8, 0, 700}, + dictWord{9, 0, 576}, + dictWord{ + 9, + 0, + 858, + }, + dictWord{10, 0, 414}, + dictWord{11, 0, 310}, + dictWord{11, 0, 888}, + dictWord{11, 0, 904}, + dictWord{12, 0, 361}, + dictWord{13, 0, 248}, + dictWord{13, 0, 371}, + dictWord{14, 0, 142}, + dictWord{12, 10, 670}, + dictWord{146, 10, 94}, + dictWord{134, 0, 721}, + dictWord{4, 11, 113}, + dictWord{5, 11, 163}, + dictWord{5, 11, 735}, + dictWord{7, 11, 1009}, + dictWord{7, 10, 1149}, + dictWord{9, 11, 9}, + dictWord{9, 10, 156}, + dictWord{9, 11, 771}, + dictWord{12, 11, 90}, + dictWord{13, 11, 138}, + dictWord{13, 11, 410}, + dictWord{143, 11, 128}, + dictWord{138, 0, 839}, + dictWord{133, 10, 778}, + dictWord{137, 0, 617}, + dictWord{133, 10, 502}, + dictWord{ + 8, + 10, + 196, + }, + dictWord{10, 10, 283}, + dictWord{139, 10, 406}, + dictWord{6, 0, 428}, + dictWord{7, 0, 524}, + dictWord{8, 0, 169}, + dictWord{8, 0, 234}, + dictWord{9, 0, 480}, + dictWord{138, 0, 646}, + dictWord{133, 10, 855}, + dictWord{134, 0, 1648}, + dictWord{7, 0, 1205}, + dictWord{138, 0, 637}, + dictWord{7, 0, 1596}, + dictWord{ + 4, + 11, + 935, + }, + dictWord{133, 11, 823}, + dictWord{5, 11, 269}, + dictWord{7, 11, 434}, + dictWord{7, 11, 891}, + dictWord{8, 11, 339}, + dictWord{9, 11, 702}, + dictWord{ + 11, + 11, + 594, + }, + dictWord{11, 11, 718}, + dictWord{145, 11, 100}, + dictWord{7, 11, 878}, + dictWord{9, 11, 485}, + dictWord{141, 11, 264}, + dictWord{4, 0, 266}, + dictWord{ + 8, + 0, + 4, + }, + dictWord{9, 0, 39}, + dictWord{10, 0, 166}, + dictWord{11, 0, 918}, + dictWord{12, 0, 635}, + dictWord{20, 0, 10}, + dictWord{22, 0, 27}, + dictWord{22, 0, 43}, + dictWord{ + 22, + 0, + 52, + }, + dictWord{134, 11, 1713}, + dictWord{7, 10, 1400}, + dictWord{9, 10, 446}, + dictWord{138, 10, 45}, + dictWord{135, 11, 900}, + dictWord{132, 0, 862}, + dictWord{134, 0, 1554}, + dictWord{135, 11, 1033}, + dictWord{19, 0, 16}, + dictWord{147, 11, 16}, + dictWord{135, 11, 1208}, + dictWord{7, 0, 157}, + dictWord{ + 136, + 0, + 279, + }, + dictWord{6, 0, 604}, + dictWord{136, 0, 391}, + dictWord{13, 10, 455}, + dictWord{15, 10, 99}, + dictWord{15, 10, 129}, + dictWord{144, 10, 68}, + dictWord{ + 135, + 10, + 172, + }, + dictWord{7, 0, 945}, + dictWord{11, 0, 713}, + dictWord{139, 0, 744}, + dictWord{4, 0, 973}, + dictWord{10, 0, 877}, + dictWord{10, 0, 937}, + dictWord{ + 10, + 0, + 938, + }, + dictWord{140, 0, 711}, + dictWord{139, 0, 1022}, + dictWord{132, 10, 568}, + dictWord{142, 11, 143}, + dictWord{4, 0, 567}, + dictWord{9, 0, 859}, + dictWord{ + 132, + 10, + 732, + }, + dictWord{7, 0, 1846}, + dictWord{136, 0, 628}, + dictWord{136, 10, 733}, + dictWord{133, 0, 762}, + dictWord{4, 10, 428}, + dictWord{135, 10, 1789}, + dictWord{10, 0, 784}, + dictWord{13, 0, 191}, + dictWord{7, 10, 2015}, + dictWord{140, 10, 665}, + dictWord{133, 0, 298}, + dictWord{7, 0, 633}, + dictWord{7, 0, 905}, + dictWord{7, 0, 909}, + dictWord{7, 0, 1538}, + dictWord{9, 0, 767}, + dictWord{140, 0, 636}, + dictWord{138, 10, 806}, + dictWord{132, 0, 795}, + dictWord{139, 0, 301}, + dictWord{135, 0, 1970}, + dictWord{5, 11, 625}, + dictWord{135, 11, 1617}, + dictWord{135, 11, 275}, + dictWord{7, 11, 37}, + dictWord{8, 11, 425}, + dictWord{ + 8, + 11, + 693, + }, + dictWord{9, 11, 720}, + dictWord{10, 11, 380}, + dictWord{10, 11, 638}, + dictWord{11, 11, 273}, + dictWord{11, 11, 307}, + dictWord{11, 11, 473}, + dictWord{ + 12, + 11, + 61, + }, + dictWord{143, 11, 43}, + dictWord{135, 11, 198}, + dictWord{134, 0, 1236}, + dictWord{7, 0, 369}, + dictWord{12, 0, 644}, + dictWord{12, 0, 645}, + dictWord{144, 0, 90}, + dictWord{19, 0, 15}, + dictWord{149, 0, 27}, + dictWord{6, 0, 71}, + dictWord{7, 0, 845}, + dictWord{8, 0, 160}, + dictWord{9, 0, 318}, + dictWord{6, 10, 1623}, + dictWord{134, 10, 1681}, + dictWord{134, 0, 1447}, + dictWord{134, 0, 1255}, + dictWord{138, 0, 735}, + dictWord{8, 0, 76}, + dictWord{132, 11, 168}, + dictWord{ + 6, + 10, + 1748, + }, + dictWord{8, 10, 715}, + dictWord{9, 10, 802}, + dictWord{10, 10, 46}, + dictWord{10, 10, 819}, + dictWord{13, 10, 308}, + dictWord{14, 10, 351}, + dictWord{14, 10, 363}, + dictWord{146, 10, 67}, + dictWord{135, 11, 91}, + dictWord{6, 0, 474}, + dictWord{4, 10, 63}, + dictWord{133, 10, 347}, + dictWord{133, 10, 749}, + dictWord{138, 0, 841}, + dictWord{133, 10, 366}, + dictWord{6, 0, 836}, + dictWord{132, 11, 225}, + dictWord{135, 0, 1622}, + dictWord{135, 10, 89}, + dictWord{ + 140, + 0, + 735, + }, + dictWord{134, 0, 1601}, + dictWord{138, 11, 145}, + dictWord{6, 0, 1390}, + dictWord{137, 0, 804}, + dictWord{142, 0, 394}, + dictWord{6, 11, 15}, + dictWord{ + 7, + 11, + 70, + }, + dictWord{10, 11, 240}, + dictWord{147, 11, 93}, + dictWord{6, 0, 96}, + dictWord{135, 0, 1426}, + dictWord{4, 0, 651}, + dictWord{133, 0, 289}, + dictWord{ + 7, + 11, + 956, + }, + dictWord{7, 10, 977}, + dictWord{7, 11, 1157}, + dictWord{7, 11, 1506}, + dictWord{7, 11, 1606}, + dictWord{7, 11, 1615}, + dictWord{7, 11, 1619}, + dictWord{ + 7, + 11, + 1736, + }, + dictWord{7, 11, 1775}, + dictWord{8, 11, 590}, + dictWord{9, 11, 324}, + dictWord{9, 11, 736}, + dictWord{9, 11, 774}, + dictWord{9, 11, 776}, + dictWord{ + 9, + 11, + 784, + }, + dictWord{10, 11, 567}, + dictWord{10, 11, 708}, + dictWord{11, 11, 518}, + dictWord{11, 11, 613}, + dictWord{11, 11, 695}, + dictWord{11, 11, 716}, + dictWord{11, 11, 739}, + dictWord{11, 11, 770}, + dictWord{11, 11, 771}, + dictWord{11, 11, 848}, + dictWord{11, 11, 857}, + dictWord{11, 11, 931}, + dictWord{ + 11, + 11, + 947, + }, + dictWord{12, 11, 326}, + dictWord{12, 11, 387}, + dictWord{12, 11, 484}, + dictWord{12, 11, 528}, + dictWord{12, 11, 552}, + dictWord{12, 11, 613}, + dictWord{ + 13, + 11, + 189, + }, + dictWord{13, 11, 256}, + dictWord{13, 11, 340}, + dictWord{13, 11, 432}, + dictWord{13, 11, 436}, + dictWord{13, 11, 440}, + dictWord{13, 11, 454}, + dictWord{14, 11, 174}, + dictWord{14, 11, 220}, + dictWord{14, 11, 284}, + dictWord{14, 11, 390}, + dictWord{145, 11, 121}, + dictWord{7, 0, 688}, + dictWord{8, 0, 35}, + dictWord{9, 0, 511}, + dictWord{10, 0, 767}, + dictWord{147, 0, 118}, + dictWord{134, 0, 667}, + dictWord{4, 0, 513}, + dictWord{5, 10, 824}, + dictWord{133, 10, 941}, + dictWord{7, 10, 440}, + dictWord{8, 10, 230}, + dictWord{139, 10, 106}, + dictWord{134, 0, 2034}, + dictWord{135, 11, 1399}, + dictWord{143, 11, 66}, + dictWord{ + 135, + 11, + 1529, + }, + dictWord{4, 11, 145}, + dictWord{6, 11, 176}, + dictWord{7, 11, 395}, + dictWord{9, 11, 562}, + dictWord{144, 11, 28}, + dictWord{132, 11, 501}, + dictWord{132, 0, 704}, + dictWord{134, 0, 1524}, + dictWord{7, 0, 1078}, + dictWord{134, 11, 464}, + dictWord{6, 11, 509}, + dictWord{10, 11, 82}, + dictWord{20, 11, 91}, + dictWord{151, 11, 13}, + dictWord{4, 0, 720}, + dictWord{133, 0, 306}, + dictWord{133, 0, 431}, + dictWord{7, 0, 1196}, + dictWord{4, 10, 914}, + dictWord{5, 10, 800}, + dictWord{133, 10, 852}, + dictWord{135, 11, 1189}, + dictWord{10, 0, 54}, + dictWord{141, 10, 115}, + dictWord{7, 10, 564}, + dictWord{142, 10, 168}, + dictWord{ + 5, + 0, + 464, + }, + dictWord{6, 0, 236}, + dictWord{7, 0, 696}, + dictWord{7, 0, 914}, + dictWord{7, 0, 1108}, + dictWord{7, 0, 1448}, + dictWord{9, 0, 15}, + dictWord{9, 0, 564}, + dictWord{ + 10, + 0, + 14, + }, + dictWord{12, 0, 565}, + dictWord{13, 0, 449}, + dictWord{14, 0, 53}, + dictWord{15, 0, 13}, + dictWord{16, 0, 64}, + dictWord{17, 0, 41}, + dictWord{4, 10, 918}, + dictWord{133, 10, 876}, + dictWord{6, 0, 1418}, + dictWord{134, 10, 1764}, + dictWord{4, 10, 92}, + dictWord{133, 10, 274}, + dictWord{134, 0, 907}, + dictWord{ + 4, + 11, + 114, + }, + dictWord{8, 10, 501}, + dictWord{9, 11, 492}, + dictWord{13, 11, 462}, + dictWord{142, 11, 215}, + dictWord{4, 11, 77}, + dictWord{5, 11, 361}, + dictWord{ + 6, + 11, + 139, + }, + dictWord{6, 11, 401}, + dictWord{6, 11, 404}, + dictWord{7, 11, 413}, + dictWord{7, 11, 715}, + dictWord{7, 11, 1716}, + dictWord{11, 11, 279}, + dictWord{ + 12, + 11, + 179, + }, + dictWord{12, 11, 258}, + dictWord{13, 11, 244}, + dictWord{142, 11, 358}, + dictWord{6, 0, 1767}, + dictWord{12, 0, 194}, + dictWord{145, 0, 107}, + dictWord{ + 134, + 11, + 1717, + }, + dictWord{5, 10, 743}, + dictWord{142, 11, 329}, + dictWord{4, 10, 49}, + dictWord{7, 10, 280}, + dictWord{135, 10, 1633}, + dictWord{5, 0, 840}, + dictWord{7, 11, 1061}, + dictWord{8, 11, 82}, + dictWord{11, 11, 250}, + dictWord{12, 11, 420}, + dictWord{141, 11, 184}, + dictWord{135, 11, 724}, + dictWord{ + 134, + 0, + 900, + }, + dictWord{136, 10, 47}, + dictWord{134, 0, 1436}, + dictWord{144, 11, 0}, + dictWord{6, 0, 675}, + dictWord{7, 0, 1008}, + dictWord{7, 0, 1560}, + dictWord{ + 9, + 0, + 642, + }, + dictWord{11, 0, 236}, + dictWord{14, 0, 193}, + dictWord{5, 10, 272}, + dictWord{5, 10, 908}, + dictWord{5, 10, 942}, + dictWord{8, 10, 197}, + dictWord{9, 10, 47}, + dictWord{11, 10, 538}, + dictWord{139, 10, 742}, + dictWord{4, 0, 68}, + dictWord{5, 0, 628}, + dictWord{5, 0, 634}, + dictWord{6, 0, 386}, + dictWord{7, 0, 794}, + dictWord{ + 8, + 0, + 273, + }, + dictWord{9, 0, 563}, + dictWord{10, 0, 105}, + dictWord{10, 0, 171}, + dictWord{11, 0, 94}, + dictWord{139, 0, 354}, + dictWord{135, 10, 1911}, + dictWord{ + 137, + 10, + 891, + }, + dictWord{4, 0, 95}, + dictWord{6, 0, 1297}, + dictWord{6, 0, 1604}, + dictWord{7, 0, 416}, + dictWord{139, 0, 830}, + dictWord{6, 11, 513}, + dictWord{ + 135, + 11, + 1052, + }, + dictWord{7, 0, 731}, + dictWord{13, 0, 20}, + dictWord{143, 0, 11}, + dictWord{137, 11, 899}, + dictWord{10, 0, 850}, + dictWord{140, 0, 697}, + dictWord{ + 4, + 0, + 662, + }, + dictWord{7, 11, 1417}, + dictWord{12, 11, 382}, + dictWord{17, 11, 48}, + dictWord{152, 11, 12}, + dictWord{133, 0, 736}, + dictWord{132, 0, 861}, + dictWord{ + 4, + 10, + 407, + }, + dictWord{132, 10, 560}, + dictWord{141, 10, 490}, + dictWord{6, 11, 545}, + dictWord{7, 11, 565}, + dictWord{7, 11, 1669}, + dictWord{10, 11, 114}, + dictWord{11, 11, 642}, + dictWord{140, 11, 618}, + dictWord{6, 0, 871}, + dictWord{134, 0, 1000}, + dictWord{5, 0, 864}, + dictWord{10, 0, 648}, + dictWord{11, 0, 671}, + dictWord{15, 0, 46}, + dictWord{133, 11, 5}, + dictWord{133, 0, 928}, + dictWord{11, 0, 90}, + dictWord{13, 0, 7}, + dictWord{4, 10, 475}, + dictWord{11, 10, 35}, + dictWord{ + 13, + 10, + 71, + }, + dictWord{13, 10, 177}, + dictWord{142, 10, 422}, + dictWord{136, 0, 332}, + dictWord{135, 11, 192}, + dictWord{134, 0, 1055}, + dictWord{136, 11, 763}, + dictWord{11, 0, 986}, + dictWord{140, 0, 682}, + dictWord{7, 0, 76}, + dictWord{8, 0, 44}, + dictWord{9, 0, 884}, + dictWord{10, 0, 580}, + dictWord{11, 0, 399}, + dictWord{ + 11, + 0, + 894, + }, + dictWord{143, 0, 122}, + dictWord{135, 11, 1237}, + dictWord{135, 10, 636}, + dictWord{11, 0, 300}, + dictWord{6, 10, 222}, + dictWord{7, 10, 1620}, + dictWord{ + 8, + 10, + 409, + }, + dictWord{137, 10, 693}, + dictWord{4, 11, 87}, + dictWord{5, 11, 250}, + dictWord{10, 11, 601}, + dictWord{13, 11, 298}, + dictWord{13, 11, 353}, + dictWord{141, 11, 376}, + dictWord{5, 0, 518}, + dictWord{10, 0, 340}, + dictWord{11, 0, 175}, + dictWord{149, 0, 16}, + dictWord{140, 0, 771}, + dictWord{6, 0, 1108}, + dictWord{137, 0, 831}, + dictWord{132, 0, 836}, + dictWord{135, 0, 1852}, + dictWord{4, 0, 957}, + dictWord{6, 0, 1804}, + dictWord{8, 0, 842}, + dictWord{8, 0, 843}, + dictWord{ + 8, + 0, + 851, + }, + dictWord{8, 0, 855}, + dictWord{140, 0, 767}, + dictWord{135, 11, 814}, + dictWord{4, 11, 57}, + dictWord{7, 11, 1195}, + dictWord{7, 11, 1438}, + dictWord{ + 7, + 11, + 1548, + }, + dictWord{7, 11, 1835}, + dictWord{7, 11, 1904}, + dictWord{9, 11, 757}, + dictWord{10, 11, 604}, + dictWord{139, 11, 519}, + dictWord{133, 10, 882}, + dictWord{138, 0, 246}, + dictWord{4, 0, 934}, + dictWord{5, 0, 202}, + dictWord{8, 0, 610}, + dictWord{7, 11, 1897}, + dictWord{12, 11, 290}, + dictWord{13, 11, 80}, + dictWord{13, 11, 437}, + dictWord{145, 11, 74}, + dictWord{8, 0, 96}, + dictWord{9, 0, 36}, + dictWord{10, 0, 607}, + dictWord{10, 0, 804}, + dictWord{10, 0, 832}, + dictWord{ + 11, + 0, + 423, + }, + dictWord{11, 0, 442}, + dictWord{12, 0, 309}, + dictWord{14, 0, 199}, + dictWord{15, 0, 90}, + dictWord{145, 0, 110}, + dictWord{132, 10, 426}, + dictWord{ + 7, + 0, + 654, + }, + dictWord{8, 0, 240}, + dictWord{6, 10, 58}, + dictWord{7, 10, 745}, + dictWord{7, 10, 1969}, + dictWord{8, 10, 675}, + dictWord{9, 10, 479}, + dictWord{9, 10, 731}, + dictWord{10, 10, 330}, + dictWord{10, 10, 593}, + dictWord{10, 10, 817}, + dictWord{11, 10, 32}, + dictWord{11, 10, 133}, + dictWord{11, 10, 221}, + dictWord{ + 145, + 10, + 68, + }, + dictWord{9, 0, 13}, + dictWord{9, 0, 398}, + dictWord{9, 0, 727}, + dictWord{10, 0, 75}, + dictWord{10, 0, 184}, + dictWord{10, 0, 230}, + dictWord{10, 0, 564}, + dictWord{ + 10, + 0, + 569, + }, + dictWord{11, 0, 973}, + dictWord{12, 0, 70}, + dictWord{12, 0, 189}, + dictWord{13, 0, 57}, + dictWord{141, 0, 257}, + dictWord{4, 11, 209}, + dictWord{ + 135, + 11, + 902, + }, + dictWord{7, 0, 391}, + dictWord{137, 10, 538}, + dictWord{134, 0, 403}, + dictWord{6, 11, 303}, + dictWord{7, 11, 335}, + dictWord{7, 11, 1437}, + dictWord{ + 7, + 11, + 1668, + }, + dictWord{8, 11, 553}, + dictWord{8, 11, 652}, + dictWord{8, 11, 656}, + dictWord{9, 11, 558}, + dictWord{11, 11, 743}, + dictWord{149, 11, 18}, + dictWord{ + 132, + 11, + 559, + }, + dictWord{11, 0, 75}, + dictWord{142, 0, 267}, + dictWord{6, 0, 815}, + dictWord{141, 11, 2}, + dictWord{141, 0, 366}, + dictWord{137, 0, 631}, + dictWord{ + 133, + 11, + 1017, + }, + dictWord{5, 0, 345}, + dictWord{135, 0, 1016}, + dictWord{133, 11, 709}, + dictWord{134, 11, 1745}, + dictWord{133, 10, 566}, + dictWord{7, 0, 952}, + dictWord{6, 10, 48}, + dictWord{9, 10, 139}, + dictWord{10, 10, 399}, + dictWord{11, 10, 469}, + dictWord{12, 10, 634}, + dictWord{141, 10, 223}, + dictWord{ + 133, + 0, + 673, + }, + dictWord{9, 0, 850}, + dictWord{7, 11, 8}, + dictWord{136, 11, 206}, + dictWord{6, 0, 662}, + dictWord{149, 0, 35}, + dictWord{4, 0, 287}, + dictWord{133, 0, 1018}, + dictWord{6, 10, 114}, + dictWord{7, 10, 1224}, + dictWord{7, 10, 1556}, + dictWord{136, 10, 3}, + dictWord{8, 10, 576}, + dictWord{137, 10, 267}, + dictWord{4, 0, 884}, + dictWord{5, 0, 34}, + dictWord{10, 0, 724}, + dictWord{12, 0, 444}, + dictWord{13, 0, 354}, + dictWord{18, 0, 32}, + dictWord{23, 0, 24}, + dictWord{23, 0, 31}, + dictWord{ + 152, + 0, + 5, + }, + dictWord{133, 10, 933}, + dictWord{132, 11, 776}, + dictWord{138, 0, 151}, + dictWord{136, 0, 427}, + dictWord{134, 0, 382}, + dictWord{132, 0, 329}, + dictWord{ + 9, + 0, + 846, + }, + dictWord{10, 0, 827}, + dictWord{138, 11, 33}, + dictWord{9, 0, 279}, + dictWord{10, 0, 407}, + dictWord{14, 0, 84}, + dictWord{22, 0, 18}, + dictWord{ + 135, + 11, + 1297, + }, + dictWord{136, 11, 406}, + dictWord{132, 0, 906}, + dictWord{136, 0, 366}, + dictWord{134, 0, 843}, + dictWord{134, 0, 1443}, + dictWord{135, 0, 1372}, + dictWord{138, 0, 992}, + dictWord{4, 0, 123}, + dictWord{5, 0, 605}, + dictWord{7, 0, 1509}, + dictWord{136, 0, 36}, + dictWord{132, 0, 649}, + dictWord{8, 11, 175}, + dictWord{10, 11, 168}, + dictWord{138, 11, 573}, + dictWord{133, 0, 767}, + dictWord{134, 0, 1018}, + dictWord{135, 11, 1305}, + dictWord{12, 10, 30}, + dictWord{ + 13, + 10, + 148, + }, + dictWord{14, 10, 87}, + dictWord{14, 10, 182}, + dictWord{16, 10, 42}, + dictWord{148, 10, 70}, + dictWord{134, 11, 607}, + dictWord{4, 0, 273}, + dictWord{ + 5, + 0, + 658, + }, + dictWord{133, 0, 995}, + dictWord{6, 0, 72}, + dictWord{139, 11, 174}, + dictWord{10, 0, 483}, + dictWord{12, 0, 368}, + dictWord{7, 10, 56}, + dictWord{ + 7, + 10, + 1989, + }, + dictWord{8, 10, 337}, + dictWord{8, 10, 738}, + dictWord{9, 10, 600}, + dictWord{13, 10, 447}, + dictWord{142, 10, 92}, + dictWord{5, 11, 784}, + dictWord{ + 138, + 10, + 666, + }, + dictWord{135, 0, 1345}, + dictWord{139, 11, 882}, + dictWord{134, 0, 1293}, + dictWord{133, 0, 589}, + dictWord{134, 0, 1988}, + dictWord{5, 0, 117}, + dictWord{6, 0, 514}, + dictWord{6, 0, 541}, + dictWord{7, 0, 1164}, + dictWord{7, 0, 1436}, + dictWord{8, 0, 220}, + dictWord{8, 0, 648}, + dictWord{10, 0, 688}, + dictWord{ + 139, + 0, + 560, + }, + dictWord{136, 0, 379}, + dictWord{5, 0, 686}, + dictWord{7, 10, 866}, + dictWord{135, 10, 1163}, + dictWord{132, 10, 328}, + dictWord{9, 11, 14}, + dictWord{ + 9, + 11, + 441, + }, + dictWord{10, 11, 306}, + dictWord{139, 11, 9}, + dictWord{4, 10, 101}, + dictWord{135, 10, 1171}, + dictWord{5, 10, 833}, + dictWord{136, 10, 744}, + dictWord{5, 11, 161}, + dictWord{7, 11, 839}, + dictWord{135, 11, 887}, + dictWord{7, 0, 196}, + dictWord{10, 0, 765}, + dictWord{11, 0, 347}, + dictWord{11, 0, 552}, + dictWord{11, 0, 790}, + dictWord{12, 0, 263}, + dictWord{13, 0, 246}, + dictWord{13, 0, 270}, + dictWord{13, 0, 395}, + dictWord{14, 0, 176}, + dictWord{14, 0, 190}, + dictWord{ + 14, + 0, + 398, + }, + dictWord{14, 0, 412}, + dictWord{15, 0, 32}, + dictWord{15, 0, 63}, + dictWord{16, 0, 88}, + dictWord{147, 0, 105}, + dictWord{6, 10, 9}, + dictWord{6, 10, 397}, + dictWord{7, 10, 53}, + dictWord{7, 10, 1742}, + dictWord{10, 10, 632}, + dictWord{11, 10, 828}, + dictWord{140, 10, 146}, + dictWord{5, 0, 381}, + dictWord{135, 0, 1792}, + dictWord{134, 0, 1452}, + dictWord{135, 11, 429}, + dictWord{8, 0, 367}, + dictWord{10, 0, 760}, + dictWord{14, 0, 79}, + dictWord{20, 0, 17}, + dictWord{152, 0, 0}, + dictWord{7, 0, 616}, + dictWord{138, 0, 413}, + dictWord{11, 10, 417}, + dictWord{12, 10, 223}, + dictWord{140, 10, 265}, + dictWord{7, 11, 1611}, + dictWord{13, 11, 14}, + dictWord{15, 11, 44}, + dictWord{19, 11, 13}, + dictWord{148, 11, 76}, + dictWord{135, 0, 1229}, + dictWord{6, 0, 120}, + dictWord{7, 0, 1188}, + dictWord{7, 0, 1710}, + dictWord{8, 0, 286}, + dictWord{9, 0, 667}, + dictWord{11, 0, 592}, + dictWord{139, 0, 730}, + dictWord{135, 11, 1814}, + dictWord{135, 0, 1146}, + dictWord{4, 10, 186}, + dictWord{5, 10, 157}, + dictWord{8, 10, 168}, + dictWord{138, 10, 6}, + dictWord{4, 0, 352}, + dictWord{135, 0, 687}, + dictWord{4, 0, 192}, + dictWord{5, 0, 49}, + dictWord{ + 6, + 0, + 200, + }, + dictWord{6, 0, 293}, + dictWord{6, 0, 1696}, + dictWord{135, 0, 1151}, + dictWord{133, 10, 875}, + dictWord{5, 10, 773}, + dictWord{5, 10, 991}, + dictWord{ + 6, + 10, + 1635, + }, + dictWord{134, 10, 1788}, + dictWord{7, 10, 111}, + dictWord{136, 10, 581}, + dictWord{6, 0, 935}, + dictWord{134, 0, 1151}, + dictWord{134, 0, 1050}, + dictWord{132, 0, 650}, + dictWord{132, 0, 147}, + dictWord{11, 0, 194}, + dictWord{12, 0, 62}, + dictWord{12, 0, 88}, + dictWord{11, 11, 194}, + dictWord{12, 11, 62}, + dictWord{140, 11, 88}, + dictWord{6, 0, 339}, + dictWord{135, 0, 923}, + dictWord{134, 10, 1747}, + dictWord{7, 11, 643}, + dictWord{136, 11, 236}, + dictWord{ + 133, + 0, + 934, + }, + dictWord{7, 10, 1364}, + dictWord{7, 10, 1907}, + dictWord{141, 10, 158}, + dictWord{132, 10, 659}, + dictWord{4, 10, 404}, + dictWord{135, 10, 675}, + dictWord{7, 11, 581}, + dictWord{9, 11, 644}, + dictWord{137, 11, 699}, + dictWord{13, 0, 211}, + dictWord{14, 0, 133}, + dictWord{14, 0, 204}, + dictWord{15, 0, 64}, + dictWord{ + 15, + 0, + 69, + }, + dictWord{15, 0, 114}, + dictWord{16, 0, 10}, + dictWord{19, 0, 23}, + dictWord{19, 0, 35}, + dictWord{19, 0, 39}, + dictWord{19, 0, 51}, + dictWord{19, 0, 71}, + dictWord{19, 0, 75}, + dictWord{152, 0, 15}, + dictWord{133, 10, 391}, + dictWord{5, 11, 54}, + dictWord{135, 11, 1513}, + dictWord{7, 0, 222}, + dictWord{8, 0, 341}, + dictWord{ + 5, + 10, + 540, + }, + dictWord{134, 10, 1697}, + dictWord{134, 10, 78}, + dictWord{132, 11, 744}, + dictWord{136, 0, 293}, + dictWord{137, 11, 701}, + dictWord{ + 7, + 11, + 930, + }, + dictWord{10, 11, 402}, + dictWord{10, 11, 476}, + dictWord{13, 11, 452}, + dictWord{18, 11, 55}, + dictWord{147, 11, 104}, + dictWord{132, 0, 637}, + dictWord{133, 10, 460}, + dictWord{8, 11, 50}, + dictWord{137, 11, 624}, + dictWord{132, 11, 572}, + dictWord{134, 0, 1159}, + dictWord{4, 10, 199}, + dictWord{ + 139, + 10, + 34, + }, + dictWord{134, 0, 847}, + dictWord{134, 10, 388}, + dictWord{6, 11, 43}, + dictWord{7, 11, 38}, + dictWord{8, 11, 248}, + dictWord{9, 11, 504}, + dictWord{ + 138, + 11, + 513, + }, + dictWord{9, 0, 683}, + dictWord{4, 10, 511}, + dictWord{6, 10, 608}, + dictWord{9, 10, 333}, + dictWord{10, 10, 602}, + dictWord{11, 10, 441}, + dictWord{ + 11, + 10, + 723, + }, + dictWord{11, 10, 976}, + dictWord{140, 10, 357}, + dictWord{9, 0, 867}, + dictWord{138, 0, 837}, + dictWord{6, 0, 944}, + dictWord{135, 11, 326}, + dictWord{ + 135, + 0, + 1809, + }, + dictWord{5, 10, 938}, + dictWord{7, 11, 783}, + dictWord{136, 10, 707}, + dictWord{133, 11, 766}, + dictWord{133, 11, 363}, + dictWord{6, 0, 170}, + dictWord{7, 0, 1080}, + dictWord{8, 0, 395}, + dictWord{8, 0, 487}, + dictWord{141, 0, 147}, + dictWord{6, 11, 258}, + dictWord{140, 11, 409}, + dictWord{4, 0, 535}, + dictWord{ + 8, + 0, + 618, + }, + dictWord{5, 11, 249}, + dictWord{148, 11, 82}, + dictWord{6, 0, 1379}, + dictWord{149, 11, 15}, + dictWord{135, 0, 1625}, + dictWord{150, 0, 23}, + dictWord{ + 5, + 11, + 393, + }, + dictWord{6, 11, 378}, + dictWord{7, 11, 1981}, + dictWord{9, 11, 32}, + dictWord{9, 11, 591}, + dictWord{10, 11, 685}, + dictWord{10, 11, 741}, + dictWord{ + 142, + 11, + 382, + }, + dictWord{133, 11, 788}, + dictWord{7, 11, 1968}, + dictWord{10, 11, 19}, + dictWord{139, 11, 911}, + dictWord{7, 11, 1401}, + dictWord{ + 135, + 11, + 1476, + }, + dictWord{4, 11, 61}, + dictWord{5, 11, 58}, + dictWord{5, 11, 171}, + dictWord{5, 11, 635}, + dictWord{5, 11, 683}, + dictWord{5, 11, 700}, + dictWord{6, 11, 291}, + dictWord{6, 11, 566}, + dictWord{7, 11, 1650}, + dictWord{11, 11, 523}, + dictWord{12, 11, 273}, + dictWord{12, 11, 303}, + dictWord{15, 11, 39}, + dictWord{ + 143, + 11, + 111, + }, + dictWord{6, 10, 469}, + dictWord{7, 10, 1709}, + dictWord{138, 10, 515}, + dictWord{4, 0, 778}, + dictWord{134, 11, 589}, + dictWord{132, 0, 46}, + dictWord{ + 5, + 0, + 811, + }, + dictWord{6, 0, 1679}, + dictWord{6, 0, 1714}, + dictWord{135, 0, 2032}, + dictWord{7, 0, 1458}, + dictWord{9, 0, 407}, + dictWord{11, 0, 15}, + dictWord{12, 0, 651}, + dictWord{149, 0, 37}, + dictWord{7, 0, 938}, + dictWord{132, 10, 500}, + dictWord{6, 0, 34}, + dictWord{7, 0, 69}, + dictWord{7, 0, 1089}, + dictWord{7, 0, 1281}, + dictWord{ + 8, + 0, + 708, + }, + dictWord{8, 0, 721}, + dictWord{9, 0, 363}, + dictWord{148, 0, 98}, + dictWord{10, 11, 231}, + dictWord{147, 11, 124}, + dictWord{7, 11, 726}, + dictWord{ + 152, + 11, + 9, + }, + dictWord{5, 10, 68}, + dictWord{134, 10, 383}, + dictWord{136, 11, 583}, + dictWord{4, 11, 917}, + dictWord{133, 11, 1005}, + dictWord{11, 10, 216}, + dictWord{139, 10, 340}, + dictWord{135, 11, 1675}, + dictWord{8, 0, 441}, + dictWord{10, 0, 314}, + dictWord{143, 0, 3}, + dictWord{132, 11, 919}, + dictWord{4, 10, 337}, + dictWord{6, 10, 353}, + dictWord{7, 10, 1934}, + dictWord{8, 10, 488}, + dictWord{137, 10, 429}, + dictWord{7, 0, 889}, + dictWord{7, 10, 1795}, + dictWord{8, 10, 259}, + dictWord{9, 10, 135}, + dictWord{9, 10, 177}, + dictWord{9, 10, 860}, + dictWord{10, 10, 825}, + dictWord{11, 10, 115}, + dictWord{11, 10, 370}, + dictWord{11, 10, 405}, + dictWord{11, 10, 604}, + dictWord{12, 10, 10}, + dictWord{12, 10, 667}, + dictWord{12, 10, 669}, + dictWord{13, 10, 76}, + dictWord{14, 10, 310}, + dictWord{ + 15, + 10, + 76, + }, + dictWord{15, 10, 147}, + dictWord{148, 10, 23}, + dictWord{4, 10, 15}, + dictWord{4, 11, 255}, + dictWord{5, 10, 22}, + dictWord{5, 11, 302}, + dictWord{6, 11, 132}, + dictWord{6, 10, 244}, + dictWord{7, 10, 40}, + dictWord{7, 11, 128}, + dictWord{7, 10, 200}, + dictWord{7, 11, 283}, + dictWord{7, 10, 906}, + dictWord{7, 10, 1199}, + dictWord{ + 7, + 11, + 1299, + }, + dictWord{9, 10, 616}, + dictWord{10, 11, 52}, + dictWord{10, 11, 514}, + dictWord{10, 10, 716}, + dictWord{11, 10, 635}, + dictWord{11, 10, 801}, + dictWord{11, 11, 925}, + dictWord{12, 10, 458}, + dictWord{13, 11, 92}, + dictWord{142, 11, 309}, + dictWord{132, 0, 462}, + dictWord{137, 11, 173}, + dictWord{ + 135, + 10, + 1735, + }, + dictWord{8, 0, 525}, + dictWord{5, 10, 598}, + dictWord{7, 10, 791}, + dictWord{8, 10, 108}, + dictWord{137, 10, 123}, + dictWord{5, 0, 73}, + dictWord{6, 0, 23}, + dictWord{134, 0, 338}, + dictWord{132, 0, 676}, + dictWord{132, 10, 683}, + dictWord{7, 0, 725}, + dictWord{8, 0, 498}, + dictWord{139, 0, 268}, + dictWord{12, 0, 21}, + dictWord{151, 0, 7}, + dictWord{135, 0, 773}, + dictWord{4, 10, 155}, + dictWord{135, 10, 1689}, + dictWord{4, 0, 164}, + dictWord{5, 0, 730}, + dictWord{5, 10, 151}, + dictWord{ + 5, + 10, + 741, + }, + dictWord{6, 11, 210}, + dictWord{7, 10, 498}, + dictWord{7, 10, 870}, + dictWord{7, 10, 1542}, + dictWord{12, 10, 213}, + dictWord{14, 10, 36}, + dictWord{ + 14, + 10, + 391, + }, + dictWord{17, 10, 111}, + dictWord{18, 10, 6}, + dictWord{18, 10, 46}, + dictWord{18, 10, 151}, + dictWord{19, 10, 36}, + dictWord{20, 10, 32}, + dictWord{ + 20, + 10, + 56, + }, + dictWord{20, 10, 69}, + dictWord{20, 10, 102}, + dictWord{21, 10, 4}, + dictWord{22, 10, 8}, + dictWord{22, 10, 10}, + dictWord{22, 10, 14}, + dictWord{ + 150, + 10, + 31, + }, + dictWord{4, 10, 624}, + dictWord{135, 10, 1752}, + dictWord{4, 0, 583}, + dictWord{9, 0, 936}, + dictWord{15, 0, 214}, + dictWord{18, 0, 199}, + dictWord{24, 0, 26}, + dictWord{134, 11, 588}, + dictWord{7, 0, 1462}, + dictWord{11, 0, 659}, + dictWord{4, 11, 284}, + dictWord{134, 11, 223}, + dictWord{133, 0, 220}, + dictWord{ + 139, + 0, + 803, + }, + dictWord{132, 0, 544}, + dictWord{4, 10, 492}, + dictWord{133, 10, 451}, + dictWord{16, 0, 98}, + dictWord{148, 0, 119}, + dictWord{4, 11, 218}, + dictWord{ + 7, + 11, + 526, + }, + dictWord{143, 11, 137}, + dictWord{135, 10, 835}, + dictWord{4, 11, 270}, + dictWord{5, 11, 192}, + dictWord{6, 11, 332}, + dictWord{7, 11, 1322}, + dictWord{ + 13, + 11, + 9, + }, + dictWord{13, 10, 70}, + dictWord{14, 11, 104}, + dictWord{142, 11, 311}, + dictWord{132, 10, 539}, + dictWord{140, 11, 661}, + dictWord{5, 0, 176}, + dictWord{ + 6, + 0, + 437, + }, + dictWord{6, 0, 564}, + dictWord{11, 0, 181}, + dictWord{141, 0, 183}, + dictWord{135, 0, 1192}, + dictWord{6, 10, 113}, + dictWord{135, 10, 436}, + dictWord{136, 10, 718}, + dictWord{135, 10, 520}, + dictWord{135, 0, 1878}, + dictWord{140, 11, 196}, + dictWord{7, 11, 379}, + dictWord{8, 11, 481}, + dictWord{ + 137, + 11, + 377, + }, + dictWord{5, 11, 1003}, + dictWord{6, 11, 149}, + dictWord{137, 11, 746}, + dictWord{8, 11, 262}, + dictWord{9, 11, 627}, + dictWord{10, 11, 18}, + dictWord{ + 11, + 11, + 214, + }, + dictWord{11, 11, 404}, + dictWord{11, 11, 457}, + dictWord{11, 11, 780}, + dictWord{11, 11, 849}, + dictWord{11, 11, 913}, + dictWord{13, 11, 330}, + dictWord{13, 11, 401}, + dictWord{142, 11, 200}, + dictWord{149, 0, 26}, + dictWord{136, 11, 304}, + dictWord{132, 11, 142}, + dictWord{135, 0, 944}, + dictWord{ + 4, + 0, + 790, + }, + dictWord{5, 0, 273}, + dictWord{134, 0, 394}, + dictWord{134, 0, 855}, + dictWord{4, 0, 135}, + dictWord{6, 0, 127}, + dictWord{7, 0, 1185}, + dictWord{7, 0, 1511}, + dictWord{8, 0, 613}, + dictWord{11, 0, 5}, + dictWord{12, 0, 336}, + dictWord{12, 0, 495}, + dictWord{12, 0, 586}, + dictWord{12, 0, 660}, + dictWord{12, 0, 668}, + dictWord{ + 14, + 0, + 385, + }, + dictWord{15, 0, 118}, + dictWord{17, 0, 20}, + dictWord{146, 0, 98}, + dictWord{6, 0, 230}, + dictWord{9, 0, 752}, + dictWord{18, 0, 109}, + dictWord{12, 10, 610}, + dictWord{13, 10, 431}, + dictWord{144, 10, 59}, + dictWord{7, 0, 1954}, + dictWord{135, 11, 925}, + dictWord{4, 11, 471}, + dictWord{5, 11, 51}, + dictWord{6, 11, 602}, + dictWord{8, 11, 484}, + dictWord{10, 11, 195}, + dictWord{140, 11, 159}, + dictWord{132, 10, 307}, + dictWord{136, 11, 688}, + dictWord{132, 11, 697}, + dictWord{ + 7, + 11, + 812, + }, + dictWord{7, 11, 1261}, + dictWord{7, 11, 1360}, + dictWord{9, 11, 632}, + dictWord{140, 11, 352}, + dictWord{5, 0, 162}, + dictWord{8, 0, 68}, + dictWord{ + 133, + 10, + 964, + }, + dictWord{4, 0, 654}, + dictWord{136, 11, 212}, + dictWord{4, 0, 156}, + dictWord{7, 0, 998}, + dictWord{7, 0, 1045}, + dictWord{7, 0, 1860}, + dictWord{9, 0, 48}, + dictWord{9, 0, 692}, + dictWord{11, 0, 419}, + dictWord{139, 0, 602}, + dictWord{133, 11, 221}, + dictWord{4, 11, 373}, + dictWord{5, 11, 283}, + dictWord{6, 11, 480}, + dictWord{135, 11, 609}, + dictWord{142, 11, 216}, + dictWord{132, 0, 240}, + dictWord{6, 11, 192}, + dictWord{9, 11, 793}, + dictWord{145, 11, 55}, + dictWord{ + 4, + 10, + 75, + }, + dictWord{5, 10, 180}, + dictWord{6, 10, 500}, + dictWord{7, 10, 58}, + dictWord{7, 10, 710}, + dictWord{138, 10, 645}, + dictWord{4, 11, 132}, + dictWord{5, 11, 69}, + dictWord{5, 10, 649}, + dictWord{135, 11, 1242}, + dictWord{6, 10, 276}, + dictWord{7, 10, 282}, + dictWord{7, 10, 879}, + dictWord{7, 10, 924}, + dictWord{8, 10, 459}, + dictWord{9, 10, 599}, + dictWord{9, 10, 754}, + dictWord{11, 10, 574}, + dictWord{12, 10, 128}, + dictWord{12, 10, 494}, + dictWord{13, 10, 52}, + dictWord{13, 10, 301}, + dictWord{15, 10, 30}, + dictWord{143, 10, 132}, + dictWord{132, 10, 200}, + dictWord{4, 11, 111}, + dictWord{135, 11, 302}, + dictWord{9, 0, 197}, + dictWord{ + 10, + 0, + 300, + }, + dictWord{12, 0, 473}, + dictWord{13, 0, 90}, + dictWord{141, 0, 405}, + dictWord{132, 11, 767}, + dictWord{6, 11, 42}, + dictWord{7, 11, 1416}, + dictWord{ + 7, + 11, + 1590, + }, + dictWord{7, 11, 2005}, + dictWord{8, 11, 131}, + dictWord{8, 11, 466}, + dictWord{9, 11, 672}, + dictWord{13, 11, 252}, + dictWord{148, 11, 103}, + dictWord{ + 8, + 0, + 958, + }, + dictWord{8, 0, 999}, + dictWord{10, 0, 963}, + dictWord{138, 0, 1001}, + dictWord{135, 10, 1621}, + dictWord{135, 0, 858}, + dictWord{4, 0, 606}, + dictWord{ + 137, + 11, + 444, + }, + dictWord{6, 11, 44}, + dictWord{136, 11, 368}, + dictWord{139, 11, 172}, + dictWord{4, 11, 570}, + dictWord{133, 11, 120}, + dictWord{139, 11, 624}, + dictWord{7, 0, 1978}, + dictWord{8, 0, 676}, + dictWord{6, 10, 225}, + dictWord{137, 10, 211}, + dictWord{7, 0, 972}, + dictWord{11, 0, 102}, + dictWord{136, 10, 687}, + dictWord{6, 11, 227}, + dictWord{135, 11, 1589}, + dictWord{8, 10, 58}, + dictWord{9, 10, 724}, + dictWord{11, 10, 809}, + dictWord{13, 10, 113}, + dictWord{ + 145, + 10, + 72, + }, + dictWord{4, 0, 361}, + dictWord{133, 0, 315}, + dictWord{132, 0, 461}, + dictWord{6, 10, 345}, + dictWord{135, 10, 1247}, + dictWord{132, 0, 472}, + dictWord{ + 8, + 10, + 767, + }, + dictWord{8, 10, 803}, + dictWord{9, 10, 301}, + dictWord{137, 10, 903}, + dictWord{135, 11, 1333}, + dictWord{135, 11, 477}, + dictWord{7, 10, 1949}, + dictWord{136, 10, 674}, + dictWord{6, 0, 905}, + dictWord{138, 0, 747}, + dictWord{133, 0, 155}, + dictWord{134, 10, 259}, + dictWord{7, 0, 163}, + dictWord{8, 0, 319}, + dictWord{9, 0, 402}, + dictWord{10, 0, 24}, + dictWord{10, 0, 681}, + dictWord{11, 0, 200}, + dictWord{12, 0, 253}, + dictWord{12, 0, 410}, + dictWord{142, 0, 219}, + dictWord{ + 5, + 0, + 475, + }, + dictWord{7, 0, 1780}, + dictWord{9, 0, 230}, + dictWord{11, 0, 297}, + dictWord{11, 0, 558}, + dictWord{14, 0, 322}, + dictWord{19, 0, 76}, + dictWord{6, 11, 1667}, + dictWord{7, 11, 2036}, + dictWord{138, 11, 600}, + dictWord{136, 10, 254}, + dictWord{6, 0, 848}, + dictWord{135, 0, 1956}, + dictWord{6, 11, 511}, + dictWord{ + 140, + 11, + 132, + }, + dictWord{5, 11, 568}, + dictWord{6, 11, 138}, + dictWord{135, 11, 1293}, + dictWord{6, 0, 631}, + dictWord{137, 0, 838}, + dictWord{149, 0, 36}, + dictWord{ + 4, + 11, + 565, + }, + dictWord{8, 11, 23}, + dictWord{136, 11, 827}, + dictWord{5, 0, 944}, + dictWord{134, 0, 1769}, + dictWord{4, 0, 144}, + dictWord{6, 0, 842}, + dictWord{ + 6, + 0, + 1400, + }, + dictWord{4, 11, 922}, + dictWord{133, 11, 1023}, + dictWord{133, 10, 248}, + dictWord{9, 10, 800}, + dictWord{10, 10, 693}, + dictWord{11, 10, 482}, + dictWord{11, 10, 734}, + dictWord{139, 10, 789}, + dictWord{7, 11, 1002}, + dictWord{139, 11, 145}, + dictWord{4, 10, 116}, + dictWord{5, 10, 95}, + dictWord{5, 10, 445}, + dictWord{7, 10, 1688}, + dictWord{8, 10, 29}, + dictWord{9, 10, 272}, + dictWord{11, 10, 509}, + dictWord{139, 10, 915}, + dictWord{14, 0, 369}, + dictWord{146, 0, 72}, + dictWord{135, 10, 1641}, + dictWord{132, 11, 740}, + dictWord{133, 10, 543}, + dictWord{140, 11, 116}, + dictWord{6, 0, 247}, + dictWord{9, 0, 555}, + dictWord{ + 5, + 10, + 181, + }, + dictWord{136, 10, 41}, + dictWord{133, 10, 657}, + dictWord{136, 0, 996}, + dictWord{138, 10, 709}, + dictWord{7, 0, 189}, + dictWord{8, 10, 202}, + dictWord{ + 138, + 10, + 536, + }, + dictWord{136, 11, 402}, + dictWord{4, 11, 716}, + dictWord{141, 11, 31}, + dictWord{10, 0, 280}, + dictWord{138, 0, 797}, + dictWord{9, 10, 423}, + dictWord{140, 10, 89}, + dictWord{8, 10, 113}, + dictWord{9, 10, 877}, + dictWord{10, 10, 554}, + dictWord{11, 10, 83}, + dictWord{12, 10, 136}, + dictWord{147, 10, 109}, + dictWord{133, 10, 976}, + dictWord{7, 0, 746}, + dictWord{132, 10, 206}, + dictWord{136, 0, 526}, + dictWord{139, 0, 345}, + dictWord{136, 0, 1017}, + dictWord{ + 8, + 11, + 152, + }, + dictWord{9, 11, 53}, + dictWord{9, 11, 268}, + dictWord{9, 11, 901}, + dictWord{10, 11, 518}, + dictWord{10, 11, 829}, + dictWord{11, 11, 188}, + dictWord{ + 13, + 11, + 74, + }, + dictWord{14, 11, 46}, + dictWord{15, 11, 17}, + dictWord{15, 11, 33}, + dictWord{17, 11, 40}, + dictWord{18, 11, 36}, + dictWord{19, 11, 20}, + dictWord{22, 11, 1}, + dictWord{152, 11, 2}, + dictWord{133, 11, 736}, + dictWord{136, 11, 532}, + dictWord{5, 0, 428}, + dictWord{138, 0, 651}, + dictWord{135, 11, 681}, + dictWord{ + 135, + 0, + 1162, + }, + dictWord{7, 0, 327}, + dictWord{13, 0, 230}, + dictWord{17, 0, 113}, + dictWord{8, 10, 226}, + dictWord{10, 10, 537}, + dictWord{11, 10, 570}, + dictWord{ + 11, + 10, + 605, + }, + dictWord{11, 10, 799}, + dictWord{11, 10, 804}, + dictWord{12, 10, 85}, + dictWord{12, 10, 516}, + dictWord{12, 10, 623}, + dictWord{12, 11, 677}, + dictWord{ + 13, + 10, + 361, + }, + dictWord{14, 10, 77}, + dictWord{14, 10, 78}, + dictWord{147, 10, 110}, + dictWord{4, 0, 792}, + dictWord{7, 0, 1717}, + dictWord{10, 0, 546}, + dictWord{ + 132, + 10, + 769, + }, + dictWord{4, 11, 684}, + dictWord{136, 11, 384}, + dictWord{132, 10, 551}, + dictWord{134, 0, 1203}, + dictWord{9, 10, 57}, + dictWord{9, 10, 459}, + dictWord{10, 10, 425}, + dictWord{11, 10, 119}, + dictWord{12, 10, 184}, + dictWord{12, 10, 371}, + dictWord{13, 10, 358}, + dictWord{145, 10, 51}, + dictWord{5, 0, 672}, + dictWord{5, 10, 814}, + dictWord{8, 10, 10}, + dictWord{9, 10, 421}, + dictWord{9, 10, 729}, + dictWord{10, 10, 609}, + dictWord{139, 10, 689}, + dictWord{138, 0, 189}, + dictWord{134, 10, 624}, + dictWord{7, 11, 110}, + dictWord{7, 11, 188}, + dictWord{8, 11, 290}, + dictWord{8, 11, 591}, + dictWord{9, 11, 382}, + dictWord{9, 11, 649}, + dictWord{11, 11, 71}, + dictWord{11, 11, 155}, + dictWord{11, 11, 313}, + dictWord{12, 11, 5}, + dictWord{13, 11, 325}, + dictWord{142, 11, 287}, + dictWord{133, 0, 99}, + dictWord{6, 0, 1053}, + dictWord{135, 0, 298}, + dictWord{7, 11, 360}, + dictWord{7, 11, 425}, + dictWord{9, 11, 66}, + dictWord{9, 11, 278}, + dictWord{138, 11, 644}, + dictWord{4, 0, 397}, + dictWord{136, 0, 555}, + dictWord{137, 10, 269}, + dictWord{132, 10, 528}, + dictWord{4, 11, 900}, + dictWord{133, 11, 861}, + dictWord{ + 6, + 0, + 1157, + }, + dictWord{5, 11, 254}, + dictWord{7, 11, 985}, + dictWord{136, 11, 73}, + dictWord{7, 11, 1959}, + dictWord{136, 11, 683}, + dictWord{12, 0, 398}, + dictWord{ + 20, + 0, + 39, + }, + dictWord{21, 0, 11}, + dictWord{150, 0, 41}, + dictWord{4, 0, 485}, + dictWord{7, 0, 353}, + dictWord{135, 0, 1523}, + dictWord{6, 0, 366}, + dictWord{7, 0, 1384}, + dictWord{135, 0, 1601}, + dictWord{138, 0, 787}, + dictWord{137, 0, 282}, + dictWord{5, 10, 104}, + dictWord{6, 10, 173}, + dictWord{135, 10, 1631}, + dictWord{ + 139, + 11, + 146, + }, + dictWord{4, 0, 157}, + dictWord{133, 0, 471}, + dictWord{134, 0, 941}, + dictWord{132, 11, 725}, + dictWord{7, 0, 1336}, + dictWord{8, 10, 138}, + dictWord{ + 8, + 10, + 342, + }, + dictWord{9, 10, 84}, + dictWord{10, 10, 193}, + dictWord{11, 10, 883}, + dictWord{140, 10, 359}, + dictWord{134, 11, 196}, + dictWord{136, 0, 116}, + dictWord{133, 11, 831}, + dictWord{134, 0, 787}, + dictWord{134, 10, 95}, + dictWord{6, 10, 406}, + dictWord{10, 10, 409}, + dictWord{10, 10, 447}, + dictWord{ + 11, + 10, + 44, + }, + dictWord{140, 10, 100}, + dictWord{5, 0, 160}, + dictWord{7, 0, 363}, + dictWord{7, 0, 589}, + dictWord{10, 0, 170}, + dictWord{141, 0, 55}, + dictWord{134, 0, 1815}, + dictWord{132, 0, 866}, + dictWord{6, 0, 889}, + dictWord{6, 0, 1067}, + dictWord{6, 0, 1183}, + dictWord{4, 11, 321}, + dictWord{134, 11, 569}, + dictWord{5, 11, 848}, + dictWord{134, 11, 66}, + dictWord{4, 11, 36}, + dictWord{6, 10, 1636}, + dictWord{7, 11, 1387}, + dictWord{10, 11, 205}, + dictWord{11, 11, 755}, + dictWord{ + 141, + 11, + 271, + }, + dictWord{132, 0, 689}, + dictWord{9, 0, 820}, + dictWord{4, 10, 282}, + dictWord{7, 10, 1034}, + dictWord{11, 10, 398}, + dictWord{11, 10, 634}, + dictWord{ + 12, + 10, + 1, + }, + dictWord{12, 10, 79}, + dictWord{12, 10, 544}, + dictWord{14, 10, 237}, + dictWord{17, 10, 10}, + dictWord{146, 10, 20}, + dictWord{4, 0, 108}, + dictWord{7, 0, 804}, + dictWord{139, 0, 498}, + dictWord{132, 11, 887}, + dictWord{6, 0, 1119}, + dictWord{135, 11, 620}, + dictWord{6, 11, 165}, + dictWord{138, 11, 388}, + dictWord{ + 5, + 0, + 244, + }, + dictWord{5, 10, 499}, + dictWord{6, 10, 476}, + dictWord{7, 10, 600}, + dictWord{7, 10, 888}, + dictWord{135, 10, 1096}, + dictWord{140, 0, 609}, + dictWord{ + 135, + 0, + 1005, + }, + dictWord{4, 0, 412}, + dictWord{133, 0, 581}, + dictWord{4, 11, 719}, + dictWord{135, 11, 155}, + dictWord{7, 10, 296}, + dictWord{7, 10, 596}, + dictWord{ + 8, + 10, + 560, + }, + dictWord{8, 10, 586}, + dictWord{9, 10, 612}, + dictWord{11, 10, 304}, + dictWord{12, 10, 46}, + dictWord{13, 10, 89}, + dictWord{14, 10, 112}, + dictWord{ + 145, + 10, + 122, + }, + dictWord{4, 0, 895}, + dictWord{133, 0, 772}, + dictWord{142, 11, 307}, + dictWord{135, 0, 1898}, + dictWord{4, 0, 926}, + dictWord{133, 0, 983}, + dictWord{4, 11, 353}, + dictWord{6, 11, 146}, + dictWord{6, 11, 1789}, + dictWord{7, 11, 288}, + dictWord{7, 11, 990}, + dictWord{7, 11, 1348}, + dictWord{9, 11, 665}, + dictWord{ + 9, + 11, + 898, + }, + dictWord{11, 11, 893}, + dictWord{142, 11, 212}, + dictWord{132, 0, 538}, + dictWord{133, 11, 532}, + dictWord{6, 0, 294}, + dictWord{7, 0, 1267}, + dictWord{8, 0, 624}, + dictWord{141, 0, 496}, + dictWord{7, 0, 1325}, + dictWord{4, 11, 45}, + dictWord{135, 11, 1257}, + dictWord{138, 0, 301}, + dictWord{9, 0, 298}, + dictWord{12, 0, 291}, + dictWord{13, 0, 276}, + dictWord{14, 0, 6}, + dictWord{17, 0, 18}, + dictWord{21, 0, 32}, + dictWord{7, 10, 1599}, + dictWord{7, 10, 1723}, + dictWord{ + 8, + 10, + 79, + }, + dictWord{8, 10, 106}, + dictWord{8, 10, 190}, + dictWord{8, 10, 302}, + dictWord{8, 10, 383}, + dictWord{8, 10, 713}, + dictWord{9, 10, 119}, + dictWord{9, 10, 233}, + dictWord{9, 10, 419}, + dictWord{9, 10, 471}, + dictWord{10, 10, 181}, + dictWord{10, 10, 406}, + dictWord{11, 10, 57}, + dictWord{11, 10, 85}, + dictWord{11, 10, 120}, + dictWord{11, 10, 177}, + dictWord{11, 10, 296}, + dictWord{11, 10, 382}, + dictWord{11, 10, 454}, + dictWord{11, 10, 758}, + dictWord{11, 10, 999}, + dictWord{ + 12, + 10, + 27, + }, + dictWord{12, 10, 131}, + dictWord{12, 10, 245}, + dictWord{12, 10, 312}, + dictWord{12, 10, 446}, + dictWord{12, 10, 454}, + dictWord{13, 10, 98}, + dictWord{ + 13, + 10, + 426, + }, + dictWord{13, 10, 508}, + dictWord{14, 10, 163}, + dictWord{14, 10, 272}, + dictWord{14, 10, 277}, + dictWord{14, 10, 370}, + dictWord{15, 10, 95}, + dictWord{15, 10, 138}, + dictWord{15, 10, 167}, + dictWord{17, 10, 38}, + dictWord{148, 10, 96}, + dictWord{132, 0, 757}, + dictWord{134, 0, 1263}, + dictWord{4, 0, 820}, + dictWord{134, 10, 1759}, + dictWord{133, 0, 722}, + dictWord{136, 11, 816}, + dictWord{138, 10, 372}, + dictWord{145, 10, 16}, + dictWord{134, 0, 1039}, + dictWord{ + 4, + 0, + 991, + }, + dictWord{134, 0, 2028}, + dictWord{133, 10, 258}, + dictWord{7, 0, 1875}, + dictWord{139, 0, 124}, + dictWord{6, 11, 559}, + dictWord{6, 11, 1691}, + dictWord{135, 11, 586}, + dictWord{5, 0, 324}, + dictWord{7, 0, 881}, + dictWord{8, 10, 134}, + dictWord{9, 10, 788}, + dictWord{140, 10, 438}, + dictWord{7, 11, 1823}, + dictWord{139, 11, 693}, + dictWord{6, 0, 1348}, + dictWord{134, 0, 1545}, + dictWord{134, 0, 911}, + dictWord{132, 0, 954}, + dictWord{8, 0, 329}, + dictWord{8, 0, 414}, + dictWord{7, 10, 1948}, + dictWord{135, 10, 2004}, + dictWord{5, 0, 517}, + dictWord{6, 10, 439}, + dictWord{7, 10, 780}, + dictWord{135, 10, 1040}, + dictWord{ + 132, + 0, + 816, + }, + dictWord{5, 10, 1}, + dictWord{6, 10, 81}, + dictWord{138, 10, 520}, + dictWord{9, 0, 713}, + dictWord{10, 0, 222}, + dictWord{5, 10, 482}, + dictWord{8, 10, 98}, + dictWord{10, 10, 700}, + dictWord{10, 10, 822}, + dictWord{11, 10, 302}, + dictWord{11, 10, 778}, + dictWord{12, 10, 50}, + dictWord{12, 10, 127}, + dictWord{12, 10, 396}, + dictWord{13, 10, 62}, + dictWord{13, 10, 328}, + dictWord{14, 10, 122}, + dictWord{147, 10, 72}, + dictWord{137, 0, 33}, + dictWord{5, 10, 2}, + dictWord{7, 10, 1494}, + dictWord{136, 10, 589}, + dictWord{6, 10, 512}, + dictWord{7, 10, 797}, + dictWord{8, 10, 253}, + dictWord{9, 10, 77}, + dictWord{10, 10, 1}, + dictWord{10, 11, 108}, + dictWord{10, 10, 129}, + dictWord{10, 10, 225}, + dictWord{11, 11, 116}, + dictWord{11, 10, 118}, + dictWord{11, 10, 226}, + dictWord{11, 10, 251}, + dictWord{ + 11, + 10, + 430, + }, + dictWord{11, 10, 701}, + dictWord{11, 10, 974}, + dictWord{11, 10, 982}, + dictWord{12, 10, 64}, + dictWord{12, 10, 260}, + dictWord{12, 10, 488}, + dictWord{ + 140, + 10, + 690, + }, + dictWord{134, 11, 456}, + dictWord{133, 11, 925}, + dictWord{5, 0, 150}, + dictWord{7, 0, 106}, + dictWord{7, 0, 774}, + dictWord{8, 0, 603}, + dictWord{ + 9, + 0, + 593, + }, + dictWord{9, 0, 634}, + dictWord{10, 0, 44}, + dictWord{10, 0, 173}, + dictWord{11, 0, 462}, + dictWord{11, 0, 515}, + dictWord{13, 0, 216}, + dictWord{13, 0, 288}, + dictWord{142, 0, 400}, + dictWord{137, 10, 347}, + dictWord{5, 0, 748}, + dictWord{134, 0, 553}, + dictWord{12, 0, 108}, + dictWord{141, 0, 291}, + dictWord{7, 0, 420}, + dictWord{4, 10, 12}, + dictWord{7, 10, 522}, + dictWord{7, 10, 809}, + dictWord{8, 10, 797}, + dictWord{141, 10, 88}, + dictWord{6, 11, 193}, + dictWord{7, 11, 240}, + dictWord{ + 7, + 11, + 1682, + }, + dictWord{10, 11, 51}, + dictWord{10, 11, 640}, + dictWord{11, 11, 410}, + dictWord{13, 11, 82}, + dictWord{14, 11, 247}, + dictWord{14, 11, 331}, + dictWord{142, 11, 377}, + dictWord{133, 10, 528}, + dictWord{135, 0, 1777}, + dictWord{4, 0, 493}, + dictWord{144, 0, 55}, + dictWord{136, 11, 633}, + dictWord{ + 139, + 0, + 81, + }, + dictWord{6, 0, 980}, + dictWord{136, 0, 321}, + dictWord{148, 10, 109}, + dictWord{5, 10, 266}, + dictWord{9, 10, 290}, + dictWord{9, 10, 364}, + dictWord{ + 10, + 10, + 293, + }, + dictWord{11, 10, 606}, + dictWord{142, 10, 45}, + dictWord{6, 0, 568}, + dictWord{7, 0, 112}, + dictWord{7, 0, 1804}, + dictWord{8, 0, 362}, + dictWord{8, 0, 410}, + dictWord{8, 0, 830}, + dictWord{9, 0, 514}, + dictWord{11, 0, 649}, + dictWord{142, 0, 157}, + dictWord{4, 0, 74}, + dictWord{6, 0, 510}, + dictWord{6, 10, 594}, + dictWord{ + 9, + 10, + 121, + }, + dictWord{10, 10, 49}, + dictWord{10, 10, 412}, + dictWord{139, 10, 834}, + dictWord{134, 0, 838}, + dictWord{136, 10, 748}, + dictWord{132, 10, 466}, + dictWord{132, 0, 625}, + dictWord{135, 11, 1443}, + dictWord{4, 11, 237}, + dictWord{135, 11, 514}, + dictWord{9, 10, 378}, + dictWord{141, 10, 162}, + dictWord{6, 0, 16}, + dictWord{6, 0, 158}, + dictWord{7, 0, 43}, + dictWord{7, 0, 129}, + dictWord{7, 0, 181}, + dictWord{8, 0, 276}, + dictWord{8, 0, 377}, + dictWord{10, 0, 523}, + dictWord{ + 11, + 0, + 816, + }, + dictWord{12, 0, 455}, + dictWord{13, 0, 303}, + dictWord{142, 0, 135}, + dictWord{135, 0, 281}, + dictWord{4, 0, 1}, + dictWord{7, 0, 1143}, + dictWord{7, 0, 1463}, + dictWord{8, 0, 61}, + dictWord{9, 0, 207}, + dictWord{9, 0, 390}, + dictWord{9, 0, 467}, + dictWord{139, 0, 836}, + dictWord{6, 11, 392}, + dictWord{7, 11, 65}, + dictWord{ + 135, + 11, + 2019, + }, + dictWord{132, 10, 667}, + dictWord{4, 0, 723}, + dictWord{5, 0, 895}, + dictWord{7, 0, 1031}, + dictWord{8, 0, 199}, + dictWord{8, 0, 340}, + dictWord{9, 0, 153}, + dictWord{9, 0, 215}, + dictWord{10, 0, 21}, + dictWord{10, 0, 59}, + dictWord{10, 0, 80}, + dictWord{10, 0, 224}, + dictWord{10, 0, 838}, + dictWord{11, 0, 229}, + dictWord{ + 11, + 0, + 652, + }, + dictWord{12, 0, 192}, + dictWord{13, 0, 146}, + dictWord{142, 0, 91}, + dictWord{132, 0, 295}, + dictWord{137, 0, 51}, + dictWord{9, 11, 222}, + dictWord{ + 10, + 11, + 43, + }, + dictWord{139, 11, 900}, + dictWord{5, 0, 309}, + dictWord{140, 0, 211}, + dictWord{5, 0, 125}, + dictWord{8, 0, 77}, + dictWord{138, 0, 15}, + dictWord{136, 11, 604}, + dictWord{138, 0, 789}, + dictWord{5, 0, 173}, + dictWord{4, 10, 39}, + dictWord{7, 10, 1843}, + dictWord{8, 10, 407}, + dictWord{11, 10, 144}, + dictWord{140, 10, 523}, + dictWord{138, 11, 265}, + dictWord{133, 0, 439}, + dictWord{132, 10, 510}, + dictWord{7, 0, 648}, + dictWord{7, 0, 874}, + dictWord{11, 0, 164}, + dictWord{12, 0, 76}, + dictWord{18, 0, 9}, + dictWord{7, 10, 1980}, + dictWord{10, 10, 487}, + dictWord{138, 10, 809}, + dictWord{12, 0, 111}, + dictWord{14, 0, 294}, + dictWord{19, 0, 45}, + dictWord{13, 10, 260}, + dictWord{146, 10, 63}, + dictWord{133, 11, 549}, + dictWord{134, 10, 570}, + dictWord{4, 0, 8}, + dictWord{7, 0, 1152}, + dictWord{7, 0, 1153}, + dictWord{7, 0, 1715}, + dictWord{9, 0, 374}, + dictWord{10, 0, 478}, + dictWord{139, 0, 648}, + dictWord{135, 0, 1099}, + dictWord{5, 0, 575}, + dictWord{6, 0, 354}, + dictWord{ + 135, + 0, + 701, + }, + dictWord{7, 11, 36}, + dictWord{8, 11, 201}, + dictWord{136, 11, 605}, + dictWord{4, 10, 787}, + dictWord{136, 11, 156}, + dictWord{6, 0, 518}, + dictWord{ + 149, + 11, + 13, + }, + dictWord{140, 11, 224}, + dictWord{134, 0, 702}, + dictWord{132, 10, 516}, + dictWord{5, 11, 724}, + dictWord{10, 11, 305}, + dictWord{11, 11, 151}, + dictWord{12, 11, 33}, + dictWord{12, 11, 121}, + dictWord{12, 11, 381}, + dictWord{17, 11, 3}, + dictWord{17, 11, 27}, + dictWord{17, 11, 78}, + dictWord{18, 11, 18}, + dictWord{19, 11, 54}, + dictWord{149, 11, 5}, + dictWord{8, 0, 87}, + dictWord{4, 11, 523}, + dictWord{5, 11, 638}, + dictWord{11, 10, 887}, + dictWord{14, 10, 365}, + dictWord{ + 142, + 10, + 375, + }, + dictWord{138, 0, 438}, + dictWord{136, 10, 821}, + dictWord{135, 11, 1908}, + dictWord{6, 11, 242}, + dictWord{7, 11, 227}, + dictWord{7, 11, 1581}, + dictWord{8, 11, 104}, + dictWord{9, 11, 113}, + dictWord{9, 11, 220}, + dictWord{9, 11, 427}, + dictWord{10, 11, 74}, + dictWord{10, 11, 239}, + dictWord{11, 11, 579}, + dictWord{11, 11, 1023}, + dictWord{13, 11, 4}, + dictWord{13, 11, 204}, + dictWord{13, 11, 316}, + dictWord{18, 11, 95}, + dictWord{148, 11, 86}, + dictWord{4, 0, 69}, + dictWord{5, 0, 122}, + dictWord{5, 0, 849}, + dictWord{6, 0, 1633}, + dictWord{9, 0, 656}, + dictWord{138, 0, 464}, + dictWord{7, 0, 1802}, + dictWord{4, 10, 10}, + dictWord{ + 139, + 10, + 786, + }, + dictWord{135, 11, 861}, + dictWord{139, 0, 499}, + dictWord{7, 0, 476}, + dictWord{7, 0, 1592}, + dictWord{138, 0, 87}, + dictWord{133, 10, 684}, + dictWord{ + 4, + 0, + 840, + }, + dictWord{134, 10, 27}, + dictWord{142, 0, 283}, + dictWord{6, 0, 1620}, + dictWord{7, 11, 1328}, + dictWord{136, 11, 494}, + dictWord{5, 0, 859}, + dictWord{ + 7, + 0, + 1160, + }, + dictWord{8, 0, 107}, + dictWord{9, 0, 291}, + dictWord{9, 0, 439}, + dictWord{10, 0, 663}, + dictWord{11, 0, 609}, + dictWord{140, 0, 197}, + dictWord{ + 7, + 11, + 1306, + }, + dictWord{8, 11, 505}, + dictWord{9, 11, 482}, + dictWord{10, 11, 126}, + dictWord{11, 11, 225}, + dictWord{12, 11, 347}, + dictWord{12, 11, 449}, + dictWord{ + 13, + 11, + 19, + }, + dictWord{142, 11, 218}, + dictWord{5, 11, 268}, + dictWord{10, 11, 764}, + dictWord{12, 11, 120}, + dictWord{13, 11, 39}, + dictWord{145, 11, 127}, + dictWord{145, 10, 56}, + dictWord{7, 11, 1672}, + dictWord{10, 11, 472}, + dictWord{11, 11, 189}, + dictWord{143, 11, 51}, + dictWord{6, 10, 342}, + dictWord{6, 10, 496}, + dictWord{8, 10, 275}, + dictWord{137, 10, 206}, + dictWord{133, 0, 600}, + dictWord{4, 0, 117}, + dictWord{6, 0, 372}, + dictWord{7, 0, 1905}, + dictWord{142, 0, 323}, + dictWord{4, 10, 909}, + dictWord{5, 10, 940}, + dictWord{135, 11, 1471}, + dictWord{132, 10, 891}, + dictWord{4, 0, 722}, + dictWord{139, 0, 471}, + dictWord{4, 11, 384}, + dictWord{135, 11, 1022}, + dictWord{132, 10, 687}, + dictWord{9, 0, 5}, + dictWord{12, 0, 216}, + dictWord{12, 0, 294}, + dictWord{12, 0, 298}, + dictWord{12, 0, 400}, + dictWord{12, 0, 518}, + dictWord{13, 0, 229}, + dictWord{143, 0, 139}, + dictWord{135, 11, 1703}, + dictWord{7, 11, 1602}, + dictWord{10, 11, 698}, + dictWord{ + 12, + 11, + 212, + }, + dictWord{141, 11, 307}, + dictWord{6, 10, 41}, + dictWord{141, 10, 160}, + dictWord{135, 11, 1077}, + dictWord{9, 11, 159}, + dictWord{11, 11, 28}, + dictWord{140, 11, 603}, + dictWord{4, 0, 514}, + dictWord{7, 0, 1304}, + dictWord{138, 0, 477}, + dictWord{134, 0, 1774}, + dictWord{9, 0, 88}, + dictWord{139, 0, 270}, + dictWord{5, 0, 12}, + dictWord{7, 0, 375}, + dictWord{9, 0, 438}, + dictWord{134, 10, 1718}, + dictWord{132, 11, 515}, + dictWord{136, 10, 778}, + dictWord{8, 11, 632}, + dictWord{8, 11, 697}, + dictWord{137, 11, 854}, + dictWord{6, 0, 362}, + dictWord{6, 0, 997}, + dictWord{146, 0, 51}, + dictWord{7, 0, 816}, + dictWord{7, 0, 1241}, + dictWord{ + 9, + 0, + 283, + }, + dictWord{9, 0, 520}, + dictWord{10, 0, 213}, + dictWord{10, 0, 307}, + dictWord{10, 0, 463}, + dictWord{10, 0, 671}, + dictWord{10, 0, 746}, + dictWord{11, 0, 401}, + dictWord{11, 0, 794}, + dictWord{12, 0, 517}, + dictWord{18, 0, 107}, + dictWord{147, 0, 115}, + dictWord{133, 10, 115}, + dictWord{150, 11, 28}, + dictWord{4, 11, 136}, + dictWord{133, 11, 551}, + dictWord{142, 10, 314}, + dictWord{132, 0, 258}, + dictWord{6, 0, 22}, + dictWord{7, 0, 903}, + dictWord{7, 0, 1963}, + dictWord{8, 0, 639}, + dictWord{138, 0, 577}, + dictWord{5, 0, 681}, + dictWord{8, 0, 782}, + dictWord{13, 0, 130}, + dictWord{17, 0, 84}, + dictWord{5, 10, 193}, + dictWord{140, 10, 178}, + dictWord{ + 9, + 11, + 17, + }, + dictWord{138, 11, 291}, + dictWord{7, 11, 1287}, + dictWord{9, 11, 44}, + dictWord{10, 11, 552}, + dictWord{10, 11, 642}, + dictWord{11, 11, 839}, + dictWord{12, 11, 274}, + dictWord{12, 11, 275}, + dictWord{12, 11, 372}, + dictWord{13, 11, 91}, + dictWord{142, 11, 125}, + dictWord{135, 10, 174}, + dictWord{4, 0, 664}, + dictWord{5, 0, 804}, + dictWord{139, 0, 1013}, + dictWord{134, 0, 942}, + dictWord{6, 0, 1349}, + dictWord{6, 0, 1353}, + dictWord{6, 0, 1450}, + dictWord{7, 11, 1518}, + dictWord{139, 11, 694}, + dictWord{11, 0, 356}, + dictWord{4, 10, 122}, + dictWord{5, 10, 796}, + dictWord{5, 10, 952}, + dictWord{6, 10, 1660}, + dictWord{ + 6, + 10, + 1671, + }, + dictWord{8, 10, 567}, + dictWord{9, 10, 687}, + dictWord{9, 10, 742}, + dictWord{10, 10, 686}, + dictWord{11, 10, 682}, + dictWord{140, 10, 281}, + dictWord{ + 5, + 0, + 32, + }, + dictWord{6, 11, 147}, + dictWord{7, 11, 886}, + dictWord{9, 11, 753}, + dictWord{138, 11, 268}, + dictWord{5, 10, 179}, + dictWord{7, 10, 1095}, + dictWord{ + 135, + 10, + 1213, + }, + dictWord{4, 10, 66}, + dictWord{7, 10, 722}, + dictWord{135, 10, 904}, + dictWord{135, 10, 352}, + dictWord{9, 11, 245}, + dictWord{138, 11, 137}, + dictWord{4, 0, 289}, + dictWord{7, 0, 629}, + dictWord{7, 0, 1698}, + dictWord{7, 0, 1711}, + dictWord{12, 0, 215}, + dictWord{133, 11, 414}, + dictWord{6, 0, 1975}, + dictWord{135, 11, 1762}, + dictWord{6, 0, 450}, + dictWord{136, 0, 109}, + dictWord{141, 10, 35}, + dictWord{134, 11, 599}, + dictWord{136, 0, 705}, + dictWord{ + 133, + 0, + 664, + }, + dictWord{134, 11, 1749}, + dictWord{11, 11, 402}, + dictWord{12, 11, 109}, + dictWord{12, 11, 431}, + dictWord{13, 11, 179}, + dictWord{13, 11, 206}, + dictWord{14, 11, 175}, + dictWord{14, 11, 217}, + dictWord{16, 11, 3}, + dictWord{148, 11, 53}, + dictWord{135, 0, 1238}, + dictWord{134, 11, 1627}, + dictWord{ + 132, + 11, + 488, + }, + dictWord{13, 0, 318}, + dictWord{10, 10, 592}, + dictWord{10, 10, 753}, + dictWord{12, 10, 317}, + dictWord{12, 10, 355}, + dictWord{12, 10, 465}, + dictWord{ + 12, + 10, + 469, + }, + dictWord{12, 10, 560}, + dictWord{140, 10, 578}, + dictWord{133, 10, 564}, + dictWord{132, 11, 83}, + dictWord{140, 11, 676}, + dictWord{6, 0, 1872}, + dictWord{6, 0, 1906}, + dictWord{6, 0, 1907}, + dictWord{9, 0, 934}, + dictWord{9, 0, 956}, + dictWord{9, 0, 960}, + dictWord{9, 0, 996}, + dictWord{12, 0, 794}, + dictWord{ + 12, + 0, + 876, + }, + dictWord{12, 0, 880}, + dictWord{12, 0, 918}, + dictWord{15, 0, 230}, + dictWord{18, 0, 234}, + dictWord{18, 0, 238}, + dictWord{21, 0, 38}, + dictWord{149, 0, 62}, + dictWord{134, 10, 556}, + dictWord{134, 11, 278}, + dictWord{137, 0, 103}, + dictWord{7, 10, 544}, + dictWord{8, 10, 719}, + dictWord{138, 10, 61}, + dictWord{ + 4, + 10, + 5, + }, + dictWord{5, 10, 498}, + dictWord{8, 10, 637}, + dictWord{137, 10, 521}, + dictWord{7, 0, 777}, + dictWord{12, 0, 229}, + dictWord{12, 0, 239}, + dictWord{15, 0, 12}, + dictWord{12, 11, 229}, + dictWord{12, 11, 239}, + dictWord{143, 11, 12}, + dictWord{6, 0, 26}, + dictWord{7, 11, 388}, + dictWord{7, 11, 644}, + dictWord{139, 11, 781}, + dictWord{7, 11, 229}, + dictWord{8, 11, 59}, + dictWord{9, 11, 190}, + dictWord{9, 11, 257}, + dictWord{10, 11, 378}, + dictWord{140, 11, 191}, + dictWord{133, 10, 927}, + dictWord{135, 10, 1441}, + dictWord{4, 10, 893}, + dictWord{5, 10, 780}, + dictWord{133, 10, 893}, + dictWord{4, 0, 414}, + dictWord{5, 0, 467}, + dictWord{9, 0, 654}, + dictWord{10, 0, 451}, + dictWord{12, 0, 59}, + dictWord{141, 0, 375}, + dictWord{142, 0, 173}, + dictWord{135, 0, 17}, + dictWord{7, 0, 1350}, + dictWord{133, 10, 238}, + dictWord{135, 0, 955}, + dictWord{4, 0, 960}, + dictWord{10, 0, 887}, + dictWord{12, 0, 753}, + dictWord{18, 0, 161}, + dictWord{18, 0, 162}, + dictWord{152, 0, 19}, + dictWord{136, 11, 344}, + dictWord{6, 10, 1729}, + dictWord{137, 11, 288}, + dictWord{132, 11, 660}, + dictWord{4, 0, 217}, + dictWord{5, 0, 710}, + dictWord{7, 0, 760}, + dictWord{7, 0, 1926}, + dictWord{9, 0, 428}, + dictWord{9, 0, 708}, + dictWord{10, 0, 254}, + dictWord{10, 0, 296}, + dictWord{10, 0, 720}, + dictWord{11, 0, 109}, + dictWord{ + 11, + 0, + 255, + }, + dictWord{12, 0, 165}, + dictWord{12, 0, 315}, + dictWord{13, 0, 107}, + dictWord{13, 0, 203}, + dictWord{14, 0, 54}, + dictWord{14, 0, 99}, + dictWord{14, 0, 114}, + dictWord{14, 0, 388}, + dictWord{16, 0, 85}, + dictWord{17, 0, 9}, + dictWord{17, 0, 33}, + dictWord{20, 0, 25}, + dictWord{20, 0, 28}, + dictWord{20, 0, 29}, + dictWord{21, 0, 9}, + dictWord{21, 0, 10}, + dictWord{21, 0, 34}, + dictWord{22, 0, 17}, + dictWord{4, 10, 60}, + dictWord{7, 10, 1800}, + dictWord{8, 10, 314}, + dictWord{9, 10, 700}, + dictWord{ + 139, + 10, + 487, + }, + dictWord{7, 11, 1035}, + dictWord{138, 11, 737}, + dictWord{7, 11, 690}, + dictWord{9, 11, 217}, + dictWord{9, 11, 587}, + dictWord{140, 11, 521}, + dictWord{6, 0, 919}, + dictWord{7, 11, 706}, + dictWord{7, 11, 1058}, + dictWord{138, 11, 538}, + dictWord{7, 10, 1853}, + dictWord{138, 10, 437}, + dictWord{ + 136, + 10, + 419, + }, + dictWord{6, 0, 280}, + dictWord{10, 0, 502}, + dictWord{11, 0, 344}, + dictWord{140, 0, 38}, + dictWord{5, 0, 45}, + dictWord{7, 0, 1161}, + dictWord{11, 0, 448}, + dictWord{11, 0, 880}, + dictWord{13, 0, 139}, + dictWord{13, 0, 407}, + dictWord{15, 0, 16}, + dictWord{17, 0, 95}, + dictWord{18, 0, 66}, + dictWord{18, 0, 88}, + dictWord{ + 18, + 0, + 123, + }, + dictWord{149, 0, 7}, + dictWord{11, 11, 92}, + dictWord{11, 11, 196}, + dictWord{11, 11, 409}, + dictWord{11, 11, 450}, + dictWord{11, 11, 666}, + dictWord{ + 11, + 11, + 777, + }, + dictWord{12, 11, 262}, + dictWord{13, 11, 385}, + dictWord{13, 11, 393}, + dictWord{15, 11, 115}, + dictWord{16, 11, 45}, + dictWord{145, 11, 82}, + dictWord{136, 0, 777}, + dictWord{134, 11, 1744}, + dictWord{4, 0, 410}, + dictWord{7, 0, 521}, + dictWord{133, 10, 828}, + dictWord{134, 0, 673}, + dictWord{7, 0, 1110}, + dictWord{7, 0, 1778}, + dictWord{7, 10, 176}, + dictWord{135, 10, 178}, + dictWord{5, 10, 806}, + dictWord{7, 11, 268}, + dictWord{7, 10, 1976}, + dictWord{ + 136, + 11, + 569, + }, + dictWord{4, 11, 733}, + dictWord{9, 11, 194}, + dictWord{10, 11, 92}, + dictWord{11, 11, 198}, + dictWord{12, 11, 84}, + dictWord{12, 11, 87}, + dictWord{ + 13, + 11, + 128, + }, + dictWord{144, 11, 74}, + dictWord{5, 0, 341}, + dictWord{7, 0, 1129}, + dictWord{11, 0, 414}, + dictWord{4, 10, 51}, + dictWord{6, 10, 4}, + dictWord{7, 10, 591}, + dictWord{7, 10, 849}, + dictWord{7, 10, 951}, + dictWord{7, 10, 1613}, + dictWord{7, 10, 1760}, + dictWord{7, 10, 1988}, + dictWord{9, 10, 434}, + dictWord{10, 10, 754}, + dictWord{11, 10, 25}, + dictWord{139, 10, 37}, + dictWord{133, 10, 902}, + dictWord{135, 10, 928}, + dictWord{135, 0, 787}, + dictWord{132, 0, 436}, + dictWord{ + 134, + 10, + 270, + }, + dictWord{7, 0, 1587}, + dictWord{135, 0, 1707}, + dictWord{6, 0, 377}, + dictWord{7, 0, 1025}, + dictWord{9, 0, 613}, + dictWord{145, 0, 104}, + dictWord{ + 7, + 11, + 982, + }, + dictWord{7, 11, 1361}, + dictWord{10, 11, 32}, + dictWord{143, 11, 56}, + dictWord{139, 0, 96}, + dictWord{132, 0, 451}, + dictWord{132, 10, 416}, + dictWord{ + 142, + 10, + 372, + }, + dictWord{5, 10, 152}, + dictWord{5, 10, 197}, + dictWord{7, 11, 306}, + dictWord{7, 10, 340}, + dictWord{7, 10, 867}, + dictWord{10, 10, 548}, + dictWord{ + 10, + 10, + 581, + }, + dictWord{11, 10, 6}, + dictWord{12, 10, 3}, + dictWord{12, 10, 19}, + dictWord{14, 10, 110}, + dictWord{142, 10, 289}, + dictWord{134, 0, 680}, + dictWord{ + 134, + 11, + 609, + }, + dictWord{7, 0, 483}, + dictWord{7, 10, 190}, + dictWord{8, 10, 28}, + dictWord{8, 10, 141}, + dictWord{8, 10, 444}, + dictWord{8, 10, 811}, + dictWord{ + 9, + 10, + 468, + }, + dictWord{11, 10, 334}, + dictWord{12, 10, 24}, + dictWord{12, 10, 386}, + dictWord{140, 10, 576}, + dictWord{10, 0, 916}, + dictWord{133, 10, 757}, + dictWord{ + 5, + 10, + 721, + }, + dictWord{135, 10, 1553}, + dictWord{133, 11, 178}, + dictWord{134, 0, 937}, + dictWord{132, 10, 898}, + dictWord{133, 0, 739}, + dictWord{ + 147, + 0, + 82, + }, + dictWord{135, 0, 663}, + dictWord{146, 0, 128}, + dictWord{5, 10, 277}, + dictWord{141, 10, 247}, + dictWord{134, 0, 1087}, + dictWord{132, 10, 435}, + dictWord{ + 6, + 11, + 381, + }, + dictWord{7, 11, 645}, + dictWord{7, 11, 694}, + dictWord{136, 11, 546}, + dictWord{7, 0, 503}, + dictWord{135, 0, 1885}, + dictWord{6, 0, 1965}, + dictWord{ + 8, + 0, + 925, + }, + dictWord{138, 0, 955}, + dictWord{4, 0, 113}, + dictWord{5, 0, 163}, + dictWord{5, 0, 735}, + dictWord{7, 0, 1009}, + dictWord{9, 0, 9}, + dictWord{9, 0, 771}, + dictWord{12, 0, 90}, + dictWord{13, 0, 138}, + dictWord{13, 0, 410}, + dictWord{143, 0, 128}, + dictWord{4, 0, 324}, + dictWord{138, 0, 104}, + dictWord{7, 0, 460}, + dictWord{ + 5, + 10, + 265, + }, + dictWord{134, 10, 212}, + dictWord{133, 11, 105}, + dictWord{7, 11, 261}, + dictWord{7, 11, 1107}, + dictWord{7, 11, 1115}, + dictWord{7, 11, 1354}, + dictWord{7, 11, 1588}, + dictWord{7, 11, 1705}, + dictWord{7, 11, 1902}, + dictWord{9, 11, 465}, + dictWord{10, 11, 248}, + dictWord{10, 11, 349}, + dictWord{10, 11, 647}, + dictWord{11, 11, 527}, + dictWord{11, 11, 660}, + dictWord{11, 11, 669}, + dictWord{12, 11, 529}, + dictWord{141, 11, 305}, + dictWord{5, 11, 438}, + dictWord{ + 9, + 11, + 694, + }, + dictWord{12, 11, 627}, + dictWord{141, 11, 210}, + dictWord{152, 11, 11}, + dictWord{4, 0, 935}, + dictWord{133, 0, 823}, + dictWord{132, 10, 702}, + dictWord{ + 5, + 0, + 269, + }, + dictWord{7, 0, 434}, + dictWord{7, 0, 891}, + dictWord{8, 0, 339}, + dictWord{9, 0, 702}, + dictWord{11, 0, 594}, + dictWord{11, 0, 718}, + dictWord{17, 0, 100}, + dictWord{5, 10, 808}, + dictWord{135, 10, 2045}, + dictWord{7, 0, 1014}, + dictWord{9, 0, 485}, + dictWord{141, 0, 264}, + dictWord{134, 0, 1713}, + dictWord{7, 0, 1810}, + dictWord{11, 0, 866}, + dictWord{12, 0, 103}, + dictWord{13, 0, 495}, + dictWord{140, 11, 233}, + dictWord{4, 0, 423}, + dictWord{10, 0, 949}, + dictWord{138, 0, 1013}, + dictWord{135, 0, 900}, + dictWord{8, 11, 25}, + dictWord{138, 11, 826}, + dictWord{5, 10, 166}, + dictWord{8, 10, 739}, + dictWord{140, 10, 511}, + dictWord{ + 134, + 0, + 2018, + }, + dictWord{7, 11, 1270}, + dictWord{139, 11, 612}, + dictWord{4, 10, 119}, + dictWord{5, 10, 170}, + dictWord{5, 10, 447}, + dictWord{7, 10, 1708}, + dictWord{ + 7, + 10, + 1889, + }, + dictWord{9, 10, 357}, + dictWord{9, 10, 719}, + dictWord{12, 10, 486}, + dictWord{140, 10, 596}, + dictWord{12, 0, 574}, + dictWord{140, 11, 574}, + dictWord{132, 11, 308}, + dictWord{6, 0, 964}, + dictWord{6, 0, 1206}, + dictWord{134, 0, 1302}, + dictWord{4, 10, 450}, + dictWord{135, 10, 1158}, + dictWord{ + 135, + 11, + 150, + }, + dictWord{136, 11, 649}, + dictWord{14, 0, 213}, + dictWord{148, 0, 38}, + dictWord{9, 11, 45}, + dictWord{9, 11, 311}, + dictWord{141, 11, 42}, + dictWord{ + 134, + 11, + 521, + }, + dictWord{7, 10, 1375}, + dictWord{7, 10, 1466}, + dictWord{138, 10, 331}, + dictWord{132, 10, 754}, + dictWord{5, 11, 339}, + dictWord{7, 11, 1442}, + dictWord{14, 11, 3}, + dictWord{15, 11, 41}, + dictWord{147, 11, 66}, + dictWord{136, 11, 378}, + dictWord{134, 0, 1022}, + dictWord{5, 10, 850}, + dictWord{136, 10, 799}, + dictWord{142, 0, 143}, + dictWord{135, 0, 2029}, + dictWord{134, 11, 1628}, + dictWord{8, 0, 523}, + dictWord{150, 0, 34}, + dictWord{5, 0, 625}, + dictWord{ + 135, + 0, + 1617, + }, + dictWord{7, 0, 275}, + dictWord{7, 10, 238}, + dictWord{7, 10, 2033}, + dictWord{8, 10, 120}, + dictWord{8, 10, 188}, + dictWord{8, 10, 659}, + dictWord{ + 9, + 10, + 598, + }, + dictWord{10, 10, 466}, + dictWord{12, 10, 342}, + dictWord{12, 10, 588}, + dictWord{13, 10, 503}, + dictWord{14, 10, 246}, + dictWord{143, 10, 92}, + dictWord{ + 7, + 0, + 37, + }, + dictWord{8, 0, 425}, + dictWord{8, 0, 693}, + dictWord{9, 0, 720}, + dictWord{10, 0, 380}, + dictWord{10, 0, 638}, + dictWord{11, 0, 273}, + dictWord{11, 0, 473}, + dictWord{12, 0, 61}, + dictWord{143, 0, 43}, + dictWord{135, 11, 829}, + dictWord{135, 0, 1943}, + dictWord{132, 0, 765}, + dictWord{5, 11, 486}, + dictWord{ + 135, + 11, + 1349, + }, + dictWord{7, 11, 1635}, + dictWord{8, 11, 17}, + dictWord{10, 11, 217}, + dictWord{138, 11, 295}, + dictWord{4, 10, 201}, + dictWord{7, 10, 1744}, + dictWord{ + 8, + 10, + 602, + }, + dictWord{11, 10, 247}, + dictWord{11, 10, 826}, + dictWord{145, 10, 65}, + dictWord{138, 11, 558}, + dictWord{11, 0, 551}, + dictWord{142, 0, 159}, + dictWord{8, 10, 164}, + dictWord{146, 10, 62}, + dictWord{139, 11, 176}, + dictWord{132, 0, 168}, + dictWord{136, 0, 1010}, + dictWord{134, 0, 1994}, + dictWord{ + 135, + 0, + 91, + }, + dictWord{138, 0, 532}, + dictWord{135, 10, 1243}, + dictWord{135, 0, 1884}, + dictWord{132, 10, 907}, + dictWord{5, 10, 100}, + dictWord{10, 10, 329}, + dictWord{12, 10, 416}, + dictWord{149, 10, 29}, + dictWord{134, 11, 447}, + dictWord{132, 10, 176}, + dictWord{5, 10, 636}, + dictWord{5, 10, 998}, + dictWord{7, 10, 9}, + dictWord{7, 10, 1508}, + dictWord{8, 10, 26}, + dictWord{9, 10, 317}, + dictWord{9, 10, 358}, + dictWord{10, 10, 210}, + dictWord{10, 10, 292}, + dictWord{10, 10, 533}, + dictWord{11, 10, 555}, + dictWord{12, 10, 526}, + dictWord{12, 10, 607}, + dictWord{13, 10, 263}, + dictWord{13, 10, 459}, + dictWord{142, 10, 271}, + dictWord{ + 4, + 11, + 609, + }, + dictWord{135, 11, 756}, + dictWord{6, 0, 15}, + dictWord{7, 0, 70}, + dictWord{10, 0, 240}, + dictWord{147, 0, 93}, + dictWord{4, 11, 930}, + dictWord{133, 11, 947}, + dictWord{134, 0, 1227}, + dictWord{134, 0, 1534}, + dictWord{133, 11, 939}, + dictWord{133, 11, 962}, + dictWord{5, 11, 651}, + dictWord{8, 11, 170}, + dictWord{ + 9, + 11, + 61, + }, + dictWord{9, 11, 63}, + dictWord{10, 11, 23}, + dictWord{10, 11, 37}, + dictWord{10, 11, 834}, + dictWord{11, 11, 4}, + dictWord{11, 11, 187}, + dictWord{ + 11, + 11, + 281, + }, + dictWord{11, 11, 503}, + dictWord{11, 11, 677}, + dictWord{12, 11, 96}, + dictWord{12, 11, 130}, + dictWord{12, 11, 244}, + dictWord{14, 11, 5}, + dictWord{ + 14, + 11, + 40, + }, + dictWord{14, 11, 162}, + dictWord{14, 11, 202}, + dictWord{146, 11, 133}, + dictWord{4, 11, 406}, + dictWord{5, 11, 579}, + dictWord{12, 11, 492}, + dictWord{ + 150, + 11, + 15, + }, + dictWord{139, 0, 392}, + dictWord{6, 10, 610}, + dictWord{10, 10, 127}, + dictWord{141, 10, 27}, + dictWord{7, 0, 655}, + dictWord{7, 0, 1844}, + dictWord{ + 136, + 10, + 119, + }, + dictWord{4, 0, 145}, + dictWord{6, 0, 176}, + dictWord{7, 0, 395}, + dictWord{137, 0, 562}, + dictWord{132, 0, 501}, + dictWord{140, 11, 145}, + dictWord{ + 136, + 0, + 1019, + }, + dictWord{134, 0, 509}, + dictWord{139, 0, 267}, + dictWord{6, 11, 17}, + dictWord{7, 11, 16}, + dictWord{7, 11, 1001}, + dictWord{7, 11, 1982}, + dictWord{ + 9, + 11, + 886, + }, + dictWord{10, 11, 489}, + dictWord{10, 11, 800}, + dictWord{11, 11, 782}, + dictWord{12, 11, 320}, + dictWord{13, 11, 467}, + dictWord{14, 11, 145}, + dictWord{14, 11, 387}, + dictWord{143, 11, 119}, + dictWord{145, 11, 17}, + dictWord{6, 0, 1099}, + dictWord{133, 11, 458}, + dictWord{7, 11, 1983}, + dictWord{8, 11, 0}, + dictWord{8, 11, 171}, + dictWord{9, 11, 120}, + dictWord{9, 11, 732}, + dictWord{10, 11, 473}, + dictWord{11, 11, 656}, + dictWord{11, 11, 998}, + dictWord{18, 11, 0}, + dictWord{18, 11, 2}, + dictWord{147, 11, 21}, + dictWord{12, 11, 427}, + dictWord{146, 11, 38}, + dictWord{10, 0, 948}, + dictWord{138, 0, 968}, + dictWord{7, 10, 126}, + dictWord{136, 10, 84}, + dictWord{136, 10, 790}, + dictWord{4, 0, 114}, + dictWord{9, 0, 492}, + dictWord{13, 0, 462}, + dictWord{142, 0, 215}, + dictWord{6, 10, 64}, + dictWord{12, 10, 377}, + dictWord{141, 10, 309}, + dictWord{4, 0, 77}, + dictWord{5, 0, 361}, + dictWord{6, 0, 139}, + dictWord{6, 0, 401}, + dictWord{6, 0, 404}, + dictWord{ + 7, + 0, + 413, + }, + dictWord{7, 0, 715}, + dictWord{7, 0, 1716}, + dictWord{11, 0, 279}, + dictWord{12, 0, 179}, + dictWord{12, 0, 258}, + dictWord{13, 0, 244}, + dictWord{142, 0, 358}, + dictWord{134, 0, 1717}, + dictWord{7, 0, 772}, + dictWord{7, 0, 1061}, + dictWord{7, 0, 1647}, + dictWord{8, 0, 82}, + dictWord{11, 0, 250}, + dictWord{11, 0, 607}, + dictWord{12, 0, 311}, + dictWord{12, 0, 420}, + dictWord{13, 0, 184}, + dictWord{13, 0, 367}, + dictWord{7, 10, 1104}, + dictWord{11, 10, 269}, + dictWord{11, 10, 539}, + dictWord{11, 10, 627}, + dictWord{11, 10, 706}, + dictWord{11, 10, 975}, + dictWord{12, 10, 248}, + dictWord{12, 10, 434}, + dictWord{12, 10, 600}, + dictWord{ + 12, + 10, + 622, + }, + dictWord{13, 10, 297}, + dictWord{13, 10, 485}, + dictWord{14, 10, 69}, + dictWord{14, 10, 409}, + dictWord{143, 10, 108}, + dictWord{135, 0, 724}, + dictWord{ + 4, + 11, + 512, + }, + dictWord{4, 11, 519}, + dictWord{133, 11, 342}, + dictWord{134, 0, 1133}, + dictWord{145, 11, 29}, + dictWord{11, 10, 977}, + dictWord{141, 10, 507}, + dictWord{6, 0, 841}, + dictWord{6, 0, 1042}, + dictWord{6, 0, 1194}, + dictWord{10, 0, 993}, + dictWord{140, 0, 1021}, + dictWord{6, 11, 31}, + dictWord{7, 11, 491}, + dictWord{7, 11, 530}, + dictWord{8, 11, 592}, + dictWord{9, 10, 34}, + dictWord{11, 11, 53}, + dictWord{11, 10, 484}, + dictWord{11, 11, 779}, + dictWord{12, 11, 167}, + dictWord{12, 11, 411}, + dictWord{14, 11, 14}, + dictWord{14, 11, 136}, + dictWord{15, 11, 72}, + dictWord{16, 11, 17}, + dictWord{144, 11, 72}, + dictWord{4, 0, 1021}, + dictWord{6, 0, 2037}, + dictWord{133, 11, 907}, + dictWord{7, 0, 373}, + dictWord{8, 0, 335}, + dictWord{8, 0, 596}, + dictWord{9, 0, 488}, + dictWord{6, 10, 1700}, + dictWord{ + 7, + 10, + 293, + }, + dictWord{7, 10, 382}, + dictWord{7, 10, 1026}, + dictWord{7, 10, 1087}, + dictWord{7, 10, 2027}, + dictWord{8, 10, 252}, + dictWord{8, 10, 727}, + dictWord{ + 8, + 10, + 729, + }, + dictWord{9, 10, 30}, + dictWord{9, 10, 199}, + dictWord{9, 10, 231}, + dictWord{9, 10, 251}, + dictWord{9, 10, 334}, + dictWord{9, 10, 361}, + dictWord{9, 10, 712}, + dictWord{10, 10, 55}, + dictWord{10, 10, 60}, + dictWord{10, 10, 232}, + dictWord{10, 10, 332}, + dictWord{10, 10, 384}, + dictWord{10, 10, 396}, + dictWord{ + 10, + 10, + 504, + }, + dictWord{10, 10, 542}, + dictWord{10, 10, 652}, + dictWord{11, 10, 20}, + dictWord{11, 10, 48}, + dictWord{11, 10, 207}, + dictWord{11, 10, 291}, + dictWord{ + 11, + 10, + 298, + }, + dictWord{11, 10, 342}, + dictWord{11, 10, 365}, + dictWord{11, 10, 394}, + dictWord{11, 10, 620}, + dictWord{11, 10, 705}, + dictWord{11, 10, 1017}, + dictWord{12, 10, 123}, + dictWord{12, 10, 340}, + dictWord{12, 10, 406}, + dictWord{12, 10, 643}, + dictWord{13, 10, 61}, + dictWord{13, 10, 269}, + dictWord{ + 13, + 10, + 311, + }, + dictWord{13, 10, 319}, + dictWord{13, 10, 486}, + dictWord{14, 10, 234}, + dictWord{15, 10, 62}, + dictWord{15, 10, 85}, + dictWord{16, 10, 71}, + dictWord{ + 18, + 10, + 119, + }, + dictWord{148, 10, 105}, + dictWord{150, 0, 37}, + dictWord{4, 11, 208}, + dictWord{5, 11, 106}, + dictWord{6, 11, 531}, + dictWord{8, 11, 408}, + dictWord{ + 9, + 11, + 188, + }, + dictWord{138, 11, 572}, + dictWord{132, 0, 564}, + dictWord{6, 0, 513}, + dictWord{135, 0, 1052}, + dictWord{132, 0, 825}, + dictWord{9, 0, 899}, + dictWord{ + 140, + 11, + 441, + }, + dictWord{134, 0, 778}, + dictWord{133, 11, 379}, + dictWord{7, 0, 1417}, + dictWord{12, 0, 382}, + dictWord{17, 0, 48}, + dictWord{152, 0, 12}, + dictWord{ + 132, + 11, + 241, + }, + dictWord{7, 0, 1116}, + dictWord{6, 10, 379}, + dictWord{7, 10, 270}, + dictWord{8, 10, 176}, + dictWord{8, 10, 183}, + dictWord{9, 10, 432}, + dictWord{ + 9, + 10, + 661, + }, + dictWord{12, 10, 247}, + dictWord{12, 10, 617}, + dictWord{146, 10, 125}, + dictWord{5, 10, 792}, + dictWord{133, 10, 900}, + dictWord{6, 0, 545}, + dictWord{ + 7, + 0, + 565, + }, + dictWord{7, 0, 1669}, + dictWord{10, 0, 114}, + dictWord{11, 0, 642}, + dictWord{140, 0, 618}, + dictWord{133, 0, 5}, + dictWord{138, 11, 7}, + dictWord{ + 132, + 11, + 259, + }, + dictWord{135, 0, 192}, + dictWord{134, 0, 701}, + dictWord{136, 0, 763}, + dictWord{135, 10, 1979}, + dictWord{4, 10, 901}, + dictWord{133, 10, 776}, + dictWord{10, 0, 755}, + dictWord{147, 0, 29}, + dictWord{133, 0, 759}, + dictWord{4, 11, 173}, + dictWord{5, 11, 312}, + dictWord{5, 11, 512}, + dictWord{135, 11, 1285}, + dictWord{7, 11, 1603}, + dictWord{7, 11, 1691}, + dictWord{9, 11, 464}, + dictWord{11, 11, 195}, + dictWord{12, 11, 279}, + dictWord{12, 11, 448}, + dictWord{ + 14, + 11, + 11, + }, + dictWord{147, 11, 102}, + dictWord{7, 0, 370}, + dictWord{7, 0, 1007}, + dictWord{7, 0, 1177}, + dictWord{135, 0, 1565}, + dictWord{135, 0, 1237}, + dictWord{ + 4, + 0, + 87, + }, + dictWord{5, 0, 250}, + dictWord{141, 0, 298}, + dictWord{4, 11, 452}, + dictWord{5, 11, 583}, + dictWord{5, 11, 817}, + dictWord{6, 11, 433}, + dictWord{7, 11, 593}, + dictWord{7, 11, 720}, + dictWord{7, 11, 1378}, + dictWord{8, 11, 161}, + dictWord{9, 11, 284}, + dictWord{10, 11, 313}, + dictWord{139, 11, 886}, + dictWord{4, 11, 547}, + dictWord{135, 11, 1409}, + dictWord{136, 11, 722}, + dictWord{4, 10, 37}, + dictWord{5, 10, 334}, + dictWord{135, 10, 1253}, + dictWord{132, 10, 508}, + dictWord{ + 12, + 0, + 107, + }, + dictWord{146, 0, 31}, + dictWord{8, 11, 420}, + dictWord{139, 11, 193}, + dictWord{135, 0, 814}, + dictWord{135, 11, 409}, + dictWord{140, 0, 991}, + dictWord{4, 0, 57}, + dictWord{7, 0, 1195}, + dictWord{7, 0, 1438}, + dictWord{7, 0, 1548}, + dictWord{7, 0, 1835}, + dictWord{7, 0, 1904}, + dictWord{9, 0, 757}, + dictWord{ + 10, + 0, + 604, + }, + dictWord{139, 0, 519}, + dictWord{132, 0, 540}, + dictWord{138, 11, 308}, + dictWord{132, 10, 533}, + dictWord{136, 0, 608}, + dictWord{144, 11, 65}, + dictWord{4, 0, 1014}, + dictWord{134, 0, 2029}, + dictWord{4, 0, 209}, + dictWord{7, 0, 902}, + dictWord{5, 11, 1002}, + dictWord{136, 11, 745}, + dictWord{134, 0, 2030}, + dictWord{6, 0, 303}, + dictWord{7, 0, 335}, + dictWord{7, 0, 1437}, + dictWord{7, 0, 1668}, + dictWord{8, 0, 553}, + dictWord{8, 0, 652}, + dictWord{8, 0, 656}, + dictWord{ + 9, + 0, + 558, + }, + dictWord{11, 0, 743}, + dictWord{149, 0, 18}, + dictWord{5, 11, 575}, + dictWord{6, 11, 354}, + dictWord{135, 11, 701}, + dictWord{4, 11, 239}, + dictWord{ + 6, + 11, + 477, + }, + dictWord{7, 11, 1607}, + dictWord{11, 11, 68}, + dictWord{139, 11, 617}, + dictWord{132, 0, 559}, + dictWord{8, 0, 527}, + dictWord{18, 0, 60}, + dictWord{ + 147, + 0, + 24, + }, + dictWord{133, 10, 920}, + dictWord{138, 0, 511}, + dictWord{133, 0, 1017}, + dictWord{133, 0, 675}, + dictWord{138, 10, 391}, + dictWord{11, 0, 156}, + dictWord{135, 10, 1952}, + dictWord{138, 11, 369}, + dictWord{132, 11, 367}, + dictWord{133, 0, 709}, + dictWord{6, 0, 698}, + dictWord{134, 0, 887}, + dictWord{ + 142, + 10, + 126, + }, + dictWord{134, 0, 1745}, + dictWord{132, 10, 483}, + dictWord{13, 11, 299}, + dictWord{142, 11, 75}, + dictWord{133, 0, 714}, + dictWord{7, 0, 8}, + dictWord{ + 136, + 0, + 206, + }, + dictWord{138, 10, 480}, + dictWord{4, 11, 694}, + dictWord{9, 10, 495}, + dictWord{146, 10, 104}, + dictWord{7, 11, 1248}, + dictWord{11, 11, 621}, + dictWord{139, 11, 702}, + dictWord{140, 11, 687}, + dictWord{132, 0, 776}, + dictWord{139, 10, 1009}, + dictWord{135, 0, 1272}, + dictWord{134, 0, 1059}, + dictWord{ + 8, + 10, + 653, + }, + dictWord{13, 10, 93}, + dictWord{147, 10, 14}, + dictWord{135, 11, 213}, + dictWord{136, 0, 406}, + dictWord{133, 10, 172}, + dictWord{132, 0, 947}, + dictWord{8, 0, 175}, + dictWord{10, 0, 168}, + dictWord{138, 0, 573}, + dictWord{132, 0, 870}, + dictWord{6, 0, 1567}, + dictWord{151, 11, 28}, + dictWord{ + 134, + 11, + 472, + }, + dictWord{5, 10, 260}, + dictWord{136, 11, 132}, + dictWord{4, 11, 751}, + dictWord{11, 11, 390}, + dictWord{140, 11, 32}, + dictWord{4, 11, 409}, + dictWord{ + 133, + 11, + 78, + }, + dictWord{12, 0, 554}, + dictWord{6, 11, 473}, + dictWord{145, 11, 105}, + dictWord{133, 0, 784}, + dictWord{8, 0, 908}, + dictWord{136, 11, 306}, + dictWord{139, 0, 882}, + dictWord{6, 0, 358}, + dictWord{7, 0, 1393}, + dictWord{8, 0, 396}, + dictWord{10, 0, 263}, + dictWord{14, 0, 154}, + dictWord{16, 0, 48}, + dictWord{ + 17, + 0, + 8, + }, + dictWord{7, 11, 1759}, + dictWord{8, 11, 396}, + dictWord{10, 11, 263}, + dictWord{14, 11, 154}, + dictWord{16, 11, 48}, + dictWord{145, 11, 8}, + dictWord{ + 13, + 11, + 163, + }, + dictWord{13, 11, 180}, + dictWord{18, 11, 78}, + dictWord{148, 11, 35}, + dictWord{14, 0, 32}, + dictWord{18, 0, 85}, + dictWord{20, 0, 2}, + dictWord{152, 0, 16}, + dictWord{7, 0, 228}, + dictWord{10, 0, 770}, + dictWord{8, 10, 167}, + dictWord{8, 10, 375}, + dictWord{9, 10, 82}, + dictWord{9, 10, 561}, + dictWord{138, 10, 620}, + dictWord{132, 0, 845}, + dictWord{9, 0, 14}, + dictWord{9, 0, 441}, + dictWord{10, 0, 306}, + dictWord{139, 0, 9}, + dictWord{11, 0, 966}, + dictWord{12, 0, 287}, + dictWord{ + 13, + 0, + 342, + }, + dictWord{13, 0, 402}, + dictWord{15, 0, 110}, + dictWord{15, 0, 163}, + dictWord{8, 10, 194}, + dictWord{136, 10, 756}, + dictWord{134, 0, 1578}, + dictWord{ + 4, + 0, + 967, + }, + dictWord{6, 0, 1820}, + dictWord{6, 0, 1847}, + dictWord{140, 0, 716}, + dictWord{136, 0, 594}, + dictWord{7, 0, 1428}, + dictWord{7, 0, 1640}, + dictWord{ + 7, + 0, + 1867, + }, + dictWord{9, 0, 169}, + dictWord{9, 0, 182}, + dictWord{9, 0, 367}, + dictWord{9, 0, 478}, + dictWord{9, 0, 506}, + dictWord{9, 0, 551}, + dictWord{9, 0, 557}, + dictWord{ + 9, + 0, + 648, + }, + dictWord{9, 0, 697}, + dictWord{9, 0, 705}, + dictWord{9, 0, 725}, + dictWord{9, 0, 787}, + dictWord{9, 0, 794}, + dictWord{10, 0, 198}, + dictWord{10, 0, 214}, + dictWord{10, 0, 267}, + dictWord{10, 0, 275}, + dictWord{10, 0, 456}, + dictWord{10, 0, 551}, + dictWord{10, 0, 561}, + dictWord{10, 0, 613}, + dictWord{10, 0, 627}, + dictWord{ + 10, + 0, + 668, + }, + dictWord{10, 0, 675}, + dictWord{10, 0, 691}, + dictWord{10, 0, 695}, + dictWord{10, 0, 707}, + dictWord{10, 0, 715}, + dictWord{11, 0, 183}, + dictWord{ + 11, + 0, + 201, + }, + dictWord{11, 0, 244}, + dictWord{11, 0, 262}, + dictWord{11, 0, 352}, + dictWord{11, 0, 439}, + dictWord{11, 0, 493}, + dictWord{11, 0, 572}, + dictWord{11, 0, 591}, + dictWord{11, 0, 608}, + dictWord{11, 0, 611}, + dictWord{11, 0, 646}, + dictWord{11, 0, 674}, + dictWord{11, 0, 711}, + dictWord{11, 0, 751}, + dictWord{11, 0, 761}, + dictWord{11, 0, 776}, + dictWord{11, 0, 785}, + dictWord{11, 0, 850}, + dictWord{11, 0, 853}, + dictWord{11, 0, 862}, + dictWord{11, 0, 865}, + dictWord{11, 0, 868}, + dictWord{ + 11, + 0, + 875, + }, + dictWord{11, 0, 898}, + dictWord{11, 0, 902}, + dictWord{11, 0, 903}, + dictWord{11, 0, 910}, + dictWord{11, 0, 932}, + dictWord{11, 0, 942}, + dictWord{ + 11, + 0, + 957, + }, + dictWord{11, 0, 967}, + dictWord{11, 0, 972}, + dictWord{12, 0, 148}, + dictWord{12, 0, 195}, + dictWord{12, 0, 220}, + dictWord{12, 0, 237}, + dictWord{12, 0, 318}, + dictWord{12, 0, 339}, + dictWord{12, 0, 393}, + dictWord{12, 0, 445}, + dictWord{12, 0, 450}, + dictWord{12, 0, 474}, + dictWord{12, 0, 505}, + dictWord{12, 0, 509}, + dictWord{12, 0, 533}, + dictWord{12, 0, 591}, + dictWord{12, 0, 594}, + dictWord{12, 0, 597}, + dictWord{12, 0, 621}, + dictWord{12, 0, 633}, + dictWord{12, 0, 642}, + dictWord{ + 13, + 0, + 59, + }, + dictWord{13, 0, 60}, + dictWord{13, 0, 145}, + dictWord{13, 0, 239}, + dictWord{13, 0, 250}, + dictWord{13, 0, 329}, + dictWord{13, 0, 344}, + dictWord{13, 0, 365}, + dictWord{13, 0, 372}, + dictWord{13, 0, 387}, + dictWord{13, 0, 403}, + dictWord{13, 0, 414}, + dictWord{13, 0, 456}, + dictWord{13, 0, 470}, + dictWord{13, 0, 478}, + dictWord{13, 0, 483}, + dictWord{13, 0, 489}, + dictWord{14, 0, 55}, + dictWord{14, 0, 57}, + dictWord{14, 0, 81}, + dictWord{14, 0, 90}, + dictWord{14, 0, 148}, + dictWord{ + 14, + 0, + 239, + }, + dictWord{14, 0, 266}, + dictWord{14, 0, 321}, + dictWord{14, 0, 326}, + dictWord{14, 0, 327}, + dictWord{14, 0, 330}, + dictWord{14, 0, 347}, + dictWord{14, 0, 355}, + dictWord{14, 0, 401}, + dictWord{14, 0, 404}, + dictWord{14, 0, 411}, + dictWord{14, 0, 414}, + dictWord{14, 0, 416}, + dictWord{14, 0, 420}, + dictWord{15, 0, 61}, + dictWord{15, 0, 74}, + dictWord{15, 0, 87}, + dictWord{15, 0, 88}, + dictWord{15, 0, 94}, + dictWord{15, 0, 96}, + dictWord{15, 0, 116}, + dictWord{15, 0, 149}, + dictWord{15, 0, 154}, + dictWord{16, 0, 50}, + dictWord{16, 0, 63}, + dictWord{16, 0, 73}, + dictWord{17, 0, 2}, + dictWord{17, 0, 66}, + dictWord{17, 0, 92}, + dictWord{17, 0, 103}, + dictWord{ + 17, + 0, + 112, + }, + dictWord{17, 0, 120}, + dictWord{18, 0, 50}, + dictWord{18, 0, 54}, + dictWord{18, 0, 82}, + dictWord{18, 0, 86}, + dictWord{18, 0, 90}, + dictWord{18, 0, 111}, + dictWord{ + 18, + 0, + 115, + }, + dictWord{18, 0, 156}, + dictWord{19, 0, 40}, + dictWord{19, 0, 79}, + dictWord{20, 0, 78}, + dictWord{21, 0, 22}, + dictWord{135, 11, 883}, + dictWord{5, 0, 161}, + dictWord{135, 0, 839}, + dictWord{4, 0, 782}, + dictWord{13, 11, 293}, + dictWord{142, 11, 56}, + dictWord{133, 11, 617}, + dictWord{139, 11, 50}, + dictWord{ + 135, + 10, + 22, + }, + dictWord{145, 0, 64}, + dictWord{5, 10, 639}, + dictWord{7, 10, 1249}, + dictWord{139, 10, 896}, + dictWord{138, 0, 998}, + dictWord{135, 11, 2042}, + dictWord{ + 4, + 11, + 546, + }, + dictWord{142, 11, 233}, + dictWord{6, 0, 1043}, + dictWord{134, 0, 1574}, + dictWord{134, 0, 1496}, + dictWord{4, 10, 102}, + dictWord{7, 10, 815}, + dictWord{7, 10, 1699}, + dictWord{139, 10, 964}, + dictWord{12, 0, 781}, + dictWord{142, 0, 461}, + dictWord{4, 11, 313}, + dictWord{133, 11, 577}, + dictWord{ + 6, + 0, + 639, + }, + dictWord{6, 0, 1114}, + dictWord{137, 0, 817}, + dictWord{8, 11, 184}, + dictWord{141, 11, 433}, + dictWord{7, 0, 1814}, + dictWord{135, 11, 935}, + dictWord{ + 10, + 0, + 997, + }, + dictWord{140, 0, 958}, + dictWord{4, 0, 812}, + dictWord{137, 11, 625}, + dictWord{132, 10, 899}, + dictWord{136, 10, 795}, + dictWord{5, 11, 886}, + dictWord{6, 11, 46}, + dictWord{6, 11, 1790}, + dictWord{7, 11, 14}, + dictWord{7, 11, 732}, + dictWord{7, 11, 1654}, + dictWord{8, 11, 95}, + dictWord{8, 11, 327}, + dictWord{ + 8, + 11, + 616, + }, + dictWord{10, 11, 598}, + dictWord{10, 11, 769}, + dictWord{11, 11, 134}, + dictWord{11, 11, 747}, + dictWord{12, 11, 378}, + dictWord{142, 11, 97}, + dictWord{136, 0, 139}, + dictWord{6, 10, 52}, + dictWord{9, 10, 104}, + dictWord{9, 10, 559}, + dictWord{12, 10, 308}, + dictWord{147, 10, 87}, + dictWord{133, 11, 1021}, + dictWord{132, 10, 604}, + dictWord{132, 10, 301}, + dictWord{136, 10, 779}, + dictWord{7, 0, 643}, + dictWord{136, 0, 236}, + dictWord{132, 11, 153}, + dictWord{ + 134, + 0, + 1172, + }, + dictWord{147, 10, 32}, + dictWord{133, 11, 798}, + dictWord{6, 0, 1338}, + dictWord{132, 11, 587}, + dictWord{6, 11, 598}, + dictWord{7, 11, 42}, + dictWord{ + 8, + 11, + 695, + }, + dictWord{10, 11, 212}, + dictWord{11, 11, 158}, + dictWord{14, 11, 196}, + dictWord{145, 11, 85}, + dictWord{135, 10, 508}, + dictWord{5, 11, 957}, + dictWord{5, 11, 1008}, + dictWord{135, 11, 249}, + dictWord{4, 11, 129}, + dictWord{135, 11, 465}, + dictWord{5, 0, 54}, + dictWord{7, 11, 470}, + dictWord{7, 11, 1057}, + dictWord{7, 11, 1201}, + dictWord{9, 11, 755}, + dictWord{11, 11, 906}, + dictWord{140, 11, 527}, + dictWord{7, 11, 908}, + dictWord{146, 11, 7}, + dictWord{ + 5, + 11, + 148, + }, + dictWord{136, 11, 450}, + dictWord{144, 11, 1}, + dictWord{4, 0, 256}, + dictWord{135, 0, 1488}, + dictWord{9, 0, 351}, + dictWord{6, 10, 310}, + dictWord{ + 7, + 10, + 1849, + }, + dictWord{8, 10, 72}, + dictWord{8, 10, 272}, + dictWord{8, 10, 431}, + dictWord{9, 10, 12}, + dictWord{10, 10, 563}, + dictWord{10, 10, 630}, + dictWord{ + 10, + 10, + 796, + }, + dictWord{10, 10, 810}, + dictWord{11, 10, 367}, + dictWord{11, 10, 599}, + dictWord{11, 10, 686}, + dictWord{140, 10, 672}, + dictWord{6, 0, 1885}, + dictWord{ + 6, + 0, + 1898, + }, + dictWord{6, 0, 1899}, + dictWord{140, 0, 955}, + dictWord{4, 0, 714}, + dictWord{133, 0, 469}, + dictWord{6, 0, 1270}, + dictWord{134, 0, 1456}, + dictWord{132, 0, 744}, + dictWord{6, 0, 313}, + dictWord{7, 10, 537}, + dictWord{8, 10, 64}, + dictWord{9, 10, 127}, + dictWord{10, 10, 496}, + dictWord{12, 10, 510}, + dictWord{141, 10, 384}, + dictWord{4, 11, 217}, + dictWord{4, 10, 244}, + dictWord{5, 11, 710}, + dictWord{7, 10, 233}, + dictWord{7, 11, 1926}, + dictWord{9, 11, 428}, + dictWord{9, 11, 708}, + dictWord{10, 11, 254}, + dictWord{10, 11, 296}, + dictWord{10, 11, 720}, + dictWord{11, 11, 109}, + dictWord{11, 11, 255}, + dictWord{12, 11, 165}, + dictWord{12, 11, 315}, + dictWord{13, 11, 107}, + dictWord{13, 11, 203}, + dictWord{14, 11, 54}, + dictWord{14, 11, 99}, + dictWord{14, 11, 114}, + dictWord{ + 14, + 11, + 388, + }, + dictWord{16, 11, 85}, + dictWord{17, 11, 9}, + dictWord{17, 11, 33}, + dictWord{20, 11, 25}, + dictWord{20, 11, 28}, + dictWord{20, 11, 29}, + dictWord{21, 11, 9}, + dictWord{21, 11, 10}, + dictWord{21, 11, 34}, + dictWord{150, 11, 17}, + dictWord{138, 0, 402}, + dictWord{7, 0, 969}, + dictWord{146, 0, 55}, + dictWord{8, 0, 50}, + dictWord{ + 137, + 0, + 624, + }, + dictWord{134, 0, 1355}, + dictWord{132, 0, 572}, + dictWord{134, 10, 1650}, + dictWord{10, 10, 702}, + dictWord{139, 10, 245}, + dictWord{ + 10, + 0, + 847, + }, + dictWord{142, 0, 445}, + dictWord{6, 0, 43}, + dictWord{7, 0, 38}, + dictWord{8, 0, 248}, + dictWord{138, 0, 513}, + dictWord{133, 0, 369}, + dictWord{137, 10, 338}, + dictWord{133, 0, 766}, + dictWord{133, 0, 363}, + dictWord{133, 10, 896}, + dictWord{8, 11, 392}, + dictWord{11, 11, 54}, + dictWord{13, 11, 173}, + dictWord{ + 13, + 11, + 294, + }, + dictWord{148, 11, 7}, + dictWord{134, 0, 678}, + dictWord{7, 11, 1230}, + dictWord{136, 11, 531}, + dictWord{6, 0, 258}, + dictWord{140, 0, 409}, + dictWord{ + 5, + 0, + 249, + }, + dictWord{148, 0, 82}, + dictWord{7, 10, 1117}, + dictWord{136, 10, 539}, + dictWord{5, 0, 393}, + dictWord{6, 0, 378}, + dictWord{7, 0, 1981}, + dictWord{9, 0, 32}, + dictWord{9, 0, 591}, + dictWord{10, 0, 685}, + dictWord{10, 0, 741}, + dictWord{142, 0, 382}, + dictWord{133, 0, 788}, + dictWord{134, 0, 1281}, + dictWord{ + 134, + 0, + 1295, + }, + dictWord{7, 0, 1968}, + dictWord{141, 0, 509}, + dictWord{4, 0, 61}, + dictWord{5, 0, 58}, + dictWord{5, 0, 171}, + dictWord{5, 0, 683}, + dictWord{6, 0, 291}, + dictWord{ + 6, + 0, + 566, + }, + dictWord{7, 0, 1650}, + dictWord{11, 0, 523}, + dictWord{12, 0, 273}, + dictWord{12, 0, 303}, + dictWord{15, 0, 39}, + dictWord{143, 0, 111}, + dictWord{ + 6, + 0, + 706, + }, + dictWord{134, 0, 1283}, + dictWord{134, 0, 589}, + dictWord{135, 11, 1433}, + dictWord{133, 11, 435}, + dictWord{7, 0, 1059}, + dictWord{13, 0, 54}, + dictWord{ + 5, + 10, + 4, + }, + dictWord{5, 10, 810}, + dictWord{6, 10, 13}, + dictWord{6, 10, 538}, + dictWord{6, 10, 1690}, + dictWord{6, 10, 1726}, + dictWord{7, 10, 1819}, + dictWord{ + 8, + 10, + 148, + }, + dictWord{8, 10, 696}, + dictWord{8, 10, 791}, + dictWord{12, 10, 125}, + dictWord{143, 10, 9}, + dictWord{135, 10, 1268}, + dictWord{5, 11, 85}, + dictWord{ + 6, + 11, + 419, + }, + dictWord{7, 11, 134}, + dictWord{7, 11, 305}, + dictWord{7, 11, 361}, + dictWord{7, 11, 1337}, + dictWord{8, 11, 71}, + dictWord{140, 11, 519}, + dictWord{ + 137, + 0, + 824, + }, + dictWord{140, 11, 688}, + dictWord{5, 11, 691}, + dictWord{7, 11, 345}, + dictWord{7, 10, 1385}, + dictWord{9, 11, 94}, + dictWord{11, 10, 582}, + dictWord{ + 11, + 10, + 650, + }, + dictWord{11, 10, 901}, + dictWord{11, 10, 949}, + dictWord{12, 11, 169}, + dictWord{12, 10, 232}, + dictWord{12, 10, 236}, + dictWord{13, 10, 413}, + dictWord{13, 10, 501}, + dictWord{146, 10, 116}, + dictWord{4, 0, 917}, + dictWord{133, 0, 1005}, + dictWord{7, 0, 1598}, + dictWord{5, 11, 183}, + dictWord{6, 11, 582}, + dictWord{9, 11, 344}, + dictWord{10, 11, 679}, + dictWord{140, 11, 435}, + dictWord{4, 10, 925}, + dictWord{5, 10, 803}, + dictWord{8, 10, 698}, + dictWord{ + 138, + 10, + 828, + }, + dictWord{132, 0, 919}, + dictWord{135, 11, 511}, + dictWord{139, 10, 992}, + dictWord{4, 0, 255}, + dictWord{5, 0, 302}, + dictWord{6, 0, 132}, + dictWord{ + 7, + 0, + 128, + }, + dictWord{7, 0, 283}, + dictWord{7, 0, 1299}, + dictWord{10, 0, 52}, + dictWord{10, 0, 514}, + dictWord{11, 0, 925}, + dictWord{13, 0, 92}, + dictWord{142, 0, 309}, + dictWord{134, 0, 1369}, + dictWord{135, 10, 1847}, + dictWord{134, 0, 328}, + dictWord{7, 11, 1993}, + dictWord{136, 11, 684}, + dictWord{133, 10, 383}, + dictWord{137, 0, 173}, + dictWord{134, 11, 583}, + dictWord{134, 0, 1411}, + dictWord{19, 0, 65}, + dictWord{5, 11, 704}, + dictWord{8, 11, 357}, + dictWord{10, 11, 745}, + dictWord{14, 11, 426}, + dictWord{17, 11, 94}, + dictWord{147, 11, 57}, + dictWord{9, 10, 660}, + dictWord{138, 10, 347}, + dictWord{4, 11, 179}, + dictWord{5, 11, 198}, + dictWord{133, 11, 697}, + dictWord{7, 11, 347}, + dictWord{7, 11, 971}, + dictWord{8, 11, 181}, + dictWord{138, 11, 711}, + dictWord{141, 0, 442}, + dictWord{ + 11, + 0, + 842, + }, + dictWord{11, 0, 924}, + dictWord{13, 0, 317}, + dictWord{13, 0, 370}, + dictWord{13, 0, 469}, + dictWord{13, 0, 471}, + dictWord{14, 0, 397}, + dictWord{18, 0, 69}, + dictWord{18, 0, 145}, + dictWord{7, 10, 572}, + dictWord{9, 10, 592}, + dictWord{11, 10, 680}, + dictWord{12, 10, 356}, + dictWord{140, 10, 550}, + dictWord{14, 11, 19}, + dictWord{14, 11, 28}, + dictWord{144, 11, 29}, + dictWord{136, 0, 534}, + dictWord{4, 11, 243}, + dictWord{5, 11, 203}, + dictWord{7, 11, 19}, + dictWord{7, 11, 71}, + dictWord{7, 11, 113}, + dictWord{10, 11, 405}, + dictWord{11, 11, 357}, + dictWord{142, 11, 240}, + dictWord{6, 0, 210}, + dictWord{10, 0, 845}, + dictWord{138, 0, 862}, + dictWord{7, 11, 1351}, + dictWord{9, 11, 581}, + dictWord{10, 11, 639}, + dictWord{11, 11, 453}, + dictWord{140, 11, 584}, + dictWord{7, 11, 1450}, + dictWord{ + 139, + 11, + 99, + }, + dictWord{10, 0, 892}, + dictWord{12, 0, 719}, + dictWord{144, 0, 105}, + dictWord{4, 0, 284}, + dictWord{6, 0, 223}, + dictWord{134, 11, 492}, + dictWord{5, 11, 134}, + dictWord{6, 11, 408}, + dictWord{6, 11, 495}, + dictWord{135, 11, 1593}, + dictWord{136, 0, 529}, + dictWord{137, 0, 807}, + dictWord{4, 0, 218}, + dictWord{7, 0, 526}, + dictWord{143, 0, 137}, + dictWord{6, 0, 1444}, + dictWord{142, 11, 4}, + dictWord{132, 11, 665}, + dictWord{4, 0, 270}, + dictWord{5, 0, 192}, + dictWord{6, 0, 332}, + dictWord{7, 0, 1322}, + dictWord{4, 11, 248}, + dictWord{7, 11, 137}, + dictWord{137, 11, 349}, + dictWord{140, 0, 661}, + dictWord{7, 0, 1517}, + dictWord{11, 0, 597}, + dictWord{14, 0, 76}, + dictWord{14, 0, 335}, + dictWord{20, 0, 33}, + dictWord{7, 10, 748}, + dictWord{139, 10, 700}, + dictWord{5, 11, 371}, + dictWord{135, 11, 563}, + dictWord{146, 11, 57}, + dictWord{133, 10, 127}, + dictWord{133, 0, 418}, + dictWord{4, 11, 374}, + dictWord{7, 11, 547}, + dictWord{7, 11, 1700}, + dictWord{7, 11, 1833}, + dictWord{139, 11, 858}, + dictWord{6, 10, 198}, + dictWord{140, 10, 83}, + dictWord{7, 11, 1812}, + dictWord{13, 11, 259}, + dictWord{13, 11, 356}, + dictWord{ + 14, + 11, + 242, + }, + dictWord{147, 11, 114}, + dictWord{7, 0, 379}, + dictWord{8, 0, 481}, + dictWord{9, 0, 377}, + dictWord{5, 10, 276}, + dictWord{6, 10, 55}, + dictWord{ + 135, + 10, + 1369, + }, + dictWord{138, 11, 286}, + dictWord{5, 0, 1003}, + dictWord{6, 0, 149}, + dictWord{6, 10, 1752}, + dictWord{136, 10, 726}, + dictWord{8, 0, 262}, + dictWord{ + 9, + 0, + 627, + }, + dictWord{10, 0, 18}, + dictWord{11, 0, 214}, + dictWord{11, 0, 404}, + dictWord{11, 0, 457}, + dictWord{11, 0, 780}, + dictWord{11, 0, 913}, + dictWord{13, 0, 401}, + dictWord{14, 0, 200}, + dictWord{6, 11, 1647}, + dictWord{7, 11, 1552}, + dictWord{7, 11, 2010}, + dictWord{9, 11, 494}, + dictWord{137, 11, 509}, + dictWord{ + 135, + 0, + 742, + }, + dictWord{136, 0, 304}, + dictWord{132, 0, 142}, + dictWord{133, 10, 764}, + dictWord{6, 10, 309}, + dictWord{7, 10, 331}, + dictWord{138, 10, 550}, + dictWord{135, 10, 1062}, + dictWord{6, 11, 123}, + dictWord{7, 11, 214}, + dictWord{7, 10, 986}, + dictWord{9, 11, 728}, + dictWord{10, 11, 157}, + dictWord{11, 11, 346}, + dictWord{11, 11, 662}, + dictWord{143, 11, 106}, + dictWord{135, 10, 1573}, + dictWord{7, 0, 925}, + dictWord{137, 0, 799}, + dictWord{4, 0, 471}, + dictWord{5, 0, 51}, + dictWord{6, 0, 602}, + dictWord{8, 0, 484}, + dictWord{138, 0, 195}, + dictWord{136, 0, 688}, + dictWord{132, 0, 697}, + dictWord{6, 0, 1169}, + dictWord{6, 0, 1241}, + dictWord{6, 10, 194}, + dictWord{7, 10, 133}, + dictWord{10, 10, 493}, + dictWord{10, 10, 570}, + dictWord{139, 10, 664}, + dictWord{140, 0, 751}, + dictWord{7, 0, 929}, + dictWord{10, 0, 452}, + dictWord{11, 0, 878}, + dictWord{16, 0, 33}, + dictWord{5, 10, 24}, + dictWord{5, 10, 569}, + dictWord{6, 10, 3}, + dictWord{6, 10, 119}, + dictWord{ + 6, + 10, + 143, + }, + dictWord{6, 10, 440}, + dictWord{7, 10, 599}, + dictWord{7, 10, 1686}, + dictWord{7, 10, 1854}, + dictWord{8, 10, 424}, + dictWord{9, 10, 43}, + dictWord{ + 9, + 10, + 584, + }, + dictWord{9, 10, 760}, + dictWord{10, 10, 328}, + dictWord{11, 10, 159}, + dictWord{11, 10, 253}, + dictWord{12, 10, 487}, + dictWord{140, 10, 531}, + dictWord{ + 4, + 11, + 707, + }, + dictWord{13, 11, 106}, + dictWord{18, 11, 49}, + dictWord{147, 11, 41}, + dictWord{5, 0, 221}, + dictWord{5, 11, 588}, + dictWord{134, 11, 393}, + dictWord{134, 0, 1437}, + dictWord{6, 11, 211}, + dictWord{7, 11, 1690}, + dictWord{11, 11, 486}, + dictWord{140, 11, 369}, + dictWord{5, 10, 14}, + dictWord{5, 10, 892}, + dictWord{6, 10, 283}, + dictWord{7, 10, 234}, + dictWord{136, 10, 537}, + dictWord{4, 0, 988}, + dictWord{136, 0, 955}, + dictWord{135, 0, 1251}, + dictWord{4, 10, 126}, + dictWord{8, 10, 635}, + dictWord{147, 10, 34}, + dictWord{4, 10, 316}, + dictWord{135, 10, 1561}, + dictWord{137, 10, 861}, + dictWord{4, 10, 64}, + dictWord{ + 5, + 10, + 352, + }, + dictWord{5, 10, 720}, + dictWord{6, 10, 368}, + dictWord{139, 10, 359}, + dictWord{134, 0, 192}, + dictWord{4, 0, 132}, + dictWord{5, 0, 69}, + dictWord{ + 135, + 0, + 1242, + }, + dictWord{7, 10, 1577}, + dictWord{10, 10, 304}, + dictWord{10, 10, 549}, + dictWord{12, 10, 365}, + dictWord{13, 10, 220}, + dictWord{13, 10, 240}, + dictWord{142, 10, 33}, + dictWord{4, 0, 111}, + dictWord{7, 0, 865}, + dictWord{134, 11, 219}, + dictWord{5, 11, 582}, + dictWord{6, 11, 1646}, + dictWord{7, 11, 99}, + dictWord{ + 7, + 11, + 1962, + }, + dictWord{7, 11, 1986}, + dictWord{8, 11, 515}, + dictWord{8, 11, 773}, + dictWord{9, 11, 23}, + dictWord{9, 11, 491}, + dictWord{12, 11, 620}, + dictWord{ + 14, + 11, + 52, + }, + dictWord{145, 11, 50}, + dictWord{132, 0, 767}, + dictWord{7, 11, 568}, + dictWord{148, 11, 21}, + dictWord{6, 0, 42}, + dictWord{7, 0, 1416}, + dictWord{ + 7, + 0, + 2005, + }, + dictWord{8, 0, 131}, + dictWord{8, 0, 466}, + dictWord{9, 0, 672}, + dictWord{13, 0, 252}, + dictWord{20, 0, 103}, + dictWord{133, 11, 851}, + dictWord{ + 135, + 0, + 1050, + }, + dictWord{6, 10, 175}, + dictWord{137, 10, 289}, + dictWord{5, 10, 432}, + dictWord{133, 10, 913}, + dictWord{6, 0, 44}, + dictWord{136, 0, 368}, + dictWord{ + 135, + 11, + 784, + }, + dictWord{132, 0, 570}, + dictWord{133, 0, 120}, + dictWord{139, 10, 595}, + dictWord{140, 0, 29}, + dictWord{6, 0, 227}, + dictWord{135, 0, 1589}, + dictWord{4, 11, 98}, + dictWord{7, 11, 1365}, + dictWord{9, 11, 422}, + dictWord{9, 11, 670}, + dictWord{10, 11, 775}, + dictWord{11, 11, 210}, + dictWord{13, 11, 26}, + dictWord{13, 11, 457}, + dictWord{141, 11, 476}, + dictWord{140, 10, 80}, + dictWord{5, 10, 931}, + dictWord{134, 10, 1698}, + dictWord{133, 0, 522}, + dictWord{ + 134, + 0, + 1120, + }, + dictWord{135, 0, 1529}, + dictWord{12, 0, 739}, + dictWord{14, 0, 448}, + dictWord{142, 0, 467}, + dictWord{11, 10, 526}, + dictWord{11, 10, 939}, + dictWord{141, 10, 290}, + dictWord{5, 10, 774}, + dictWord{6, 10, 1637}, + dictWord{6, 10, 1686}, + dictWord{134, 10, 1751}, + dictWord{6, 0, 1667}, + dictWord{ + 135, + 0, + 2036, + }, + dictWord{7, 10, 1167}, + dictWord{11, 10, 934}, + dictWord{13, 10, 391}, + dictWord{145, 10, 76}, + dictWord{137, 11, 147}, + dictWord{6, 10, 260}, + dictWord{ + 7, + 10, + 1484, + }, + dictWord{11, 11, 821}, + dictWord{12, 11, 110}, + dictWord{12, 11, 153}, + dictWord{18, 11, 41}, + dictWord{150, 11, 19}, + dictWord{6, 0, 511}, + dictWord{12, 0, 132}, + dictWord{134, 10, 573}, + dictWord{5, 0, 568}, + dictWord{6, 0, 138}, + dictWord{135, 0, 1293}, + dictWord{132, 0, 1020}, + dictWord{8, 0, 258}, + dictWord{9, 0, 208}, + dictWord{137, 0, 359}, + dictWord{4, 0, 565}, + dictWord{8, 0, 23}, + dictWord{136, 0, 827}, + dictWord{134, 0, 344}, + dictWord{4, 0, 922}, + dictWord{ + 5, + 0, + 1023, + }, + dictWord{13, 11, 477}, + dictWord{14, 11, 120}, + dictWord{148, 11, 61}, + dictWord{134, 0, 240}, + dictWord{5, 11, 209}, + dictWord{6, 11, 30}, + dictWord{ + 11, + 11, + 56, + }, + dictWord{139, 11, 305}, + dictWord{6, 0, 171}, + dictWord{7, 0, 1002}, + dictWord{7, 0, 1324}, + dictWord{9, 0, 415}, + dictWord{14, 0, 230}, + dictWord{ + 18, + 0, + 68, + }, + dictWord{4, 10, 292}, + dictWord{4, 10, 736}, + dictWord{5, 10, 871}, + dictWord{6, 10, 1689}, + dictWord{7, 10, 1944}, + dictWord{137, 10, 580}, + dictWord{ + 9, + 11, + 635, + }, + dictWord{139, 11, 559}, + dictWord{4, 11, 150}, + dictWord{5, 11, 303}, + dictWord{134, 11, 327}, + dictWord{6, 10, 63}, + dictWord{135, 10, 920}, + dictWord{ + 133, + 10, + 793, + }, + dictWord{8, 11, 192}, + dictWord{10, 11, 78}, + dictWord{10, 11, 555}, + dictWord{11, 11, 308}, + dictWord{13, 11, 359}, + dictWord{147, 11, 95}, + dictWord{135, 11, 786}, + dictWord{135, 11, 1712}, + dictWord{136, 0, 402}, + dictWord{6, 0, 754}, + dictWord{6, 11, 1638}, + dictWord{7, 11, 79}, + dictWord{7, 11, 496}, + dictWord{9, 11, 138}, + dictWord{10, 11, 336}, + dictWord{11, 11, 12}, + dictWord{12, 11, 412}, + dictWord{12, 11, 440}, + dictWord{142, 11, 305}, + dictWord{4, 0, 716}, + dictWord{141, 0, 31}, + dictWord{133, 0, 982}, + dictWord{8, 0, 691}, + dictWord{8, 0, 731}, + dictWord{5, 10, 67}, + dictWord{6, 10, 62}, + dictWord{6, 10, 374}, + dictWord{ + 135, + 10, + 1391, + }, + dictWord{9, 10, 790}, + dictWord{140, 10, 47}, + dictWord{139, 11, 556}, + dictWord{151, 11, 1}, + dictWord{7, 11, 204}, + dictWord{7, 11, 415}, + dictWord{8, 11, 42}, + dictWord{10, 11, 85}, + dictWord{11, 11, 33}, + dictWord{11, 11, 564}, + dictWord{12, 11, 571}, + dictWord{149, 11, 1}, + dictWord{8, 0, 888}, + dictWord{ + 7, + 11, + 610, + }, + dictWord{135, 11, 1501}, + dictWord{4, 10, 391}, + dictWord{135, 10, 1169}, + dictWord{5, 0, 847}, + dictWord{9, 0, 840}, + dictWord{138, 0, 803}, + dictWord{137, 0, 823}, + dictWord{134, 0, 785}, + dictWord{8, 0, 152}, + dictWord{9, 0, 53}, + dictWord{9, 0, 268}, + dictWord{9, 0, 901}, + dictWord{10, 0, 518}, + dictWord{ + 10, + 0, + 829, + }, + dictWord{11, 0, 188}, + dictWord{13, 0, 74}, + dictWord{14, 0, 46}, + dictWord{15, 0, 17}, + dictWord{15, 0, 33}, + dictWord{17, 0, 40}, + dictWord{18, 0, 36}, + dictWord{ + 19, + 0, + 20, + }, + dictWord{22, 0, 1}, + dictWord{152, 0, 2}, + dictWord{4, 11, 3}, + dictWord{5, 11, 247}, + dictWord{5, 11, 644}, + dictWord{7, 11, 744}, + dictWord{7, 11, 1207}, + dictWord{7, 11, 1225}, + dictWord{7, 11, 1909}, + dictWord{146, 11, 147}, + dictWord{136, 0, 532}, + dictWord{135, 0, 681}, + dictWord{132, 10, 271}, + dictWord{ + 140, + 0, + 314, + }, + dictWord{140, 0, 677}, + dictWord{4, 0, 684}, + dictWord{136, 0, 384}, + dictWord{5, 11, 285}, + dictWord{9, 11, 67}, + dictWord{13, 11, 473}, + dictWord{ + 143, + 11, + 82, + }, + dictWord{4, 10, 253}, + dictWord{5, 10, 544}, + dictWord{7, 10, 300}, + dictWord{137, 10, 340}, + dictWord{7, 0, 110}, + dictWord{7, 0, 447}, + dictWord{8, 0, 290}, + dictWord{8, 0, 591}, + dictWord{9, 0, 382}, + dictWord{9, 0, 649}, + dictWord{11, 0, 71}, + dictWord{11, 0, 155}, + dictWord{11, 0, 313}, + dictWord{12, 0, 5}, + dictWord{13, 0, 325}, + dictWord{142, 0, 287}, + dictWord{134, 0, 1818}, + dictWord{136, 0, 1007}, + dictWord{138, 0, 321}, + dictWord{7, 0, 360}, + dictWord{7, 0, 425}, + dictWord{9, 0, 66}, + dictWord{9, 0, 278}, + dictWord{138, 0, 644}, + dictWord{133, 10, 818}, + dictWord{5, 0, 385}, + dictWord{5, 10, 541}, + dictWord{6, 10, 94}, + dictWord{6, 10, 499}, + dictWord{ + 7, + 10, + 230, + }, + dictWord{139, 10, 321}, + dictWord{4, 10, 920}, + dictWord{5, 10, 25}, + dictWord{5, 10, 790}, + dictWord{6, 10, 457}, + dictWord{7, 10, 853}, + dictWord{ + 136, + 10, + 788, + }, + dictWord{4, 0, 900}, + dictWord{133, 0, 861}, + dictWord{5, 0, 254}, + dictWord{7, 0, 985}, + dictWord{136, 0, 73}, + dictWord{7, 0, 1959}, + dictWord{ + 136, + 0, + 683, + }, + dictWord{134, 10, 1765}, + dictWord{133, 10, 822}, + dictWord{132, 10, 634}, + dictWord{4, 11, 29}, + dictWord{6, 11, 532}, + dictWord{7, 11, 1628}, + dictWord{ + 7, + 11, + 1648, + }, + dictWord{9, 11, 303}, + dictWord{9, 11, 350}, + dictWord{10, 11, 433}, + dictWord{11, 11, 97}, + dictWord{11, 11, 557}, + dictWord{11, 11, 745}, + dictWord{12, 11, 289}, + dictWord{12, 11, 335}, + dictWord{12, 11, 348}, + dictWord{12, 11, 606}, + dictWord{13, 11, 116}, + dictWord{13, 11, 233}, + dictWord{ + 13, + 11, + 466, + }, + dictWord{14, 11, 181}, + dictWord{14, 11, 209}, + dictWord{14, 11, 232}, + dictWord{14, 11, 236}, + dictWord{14, 11, 300}, + dictWord{16, 11, 41}, + dictWord{ + 148, + 11, + 97, + }, + dictWord{19, 0, 86}, + dictWord{6, 10, 36}, + dictWord{7, 10, 658}, + dictWord{136, 10, 454}, + dictWord{135, 11, 1692}, + dictWord{132, 0, 725}, + dictWord{ + 5, + 11, + 501, + }, + dictWord{7, 11, 1704}, + dictWord{9, 11, 553}, + dictWord{11, 11, 520}, + dictWord{12, 11, 557}, + dictWord{141, 11, 249}, + dictWord{134, 0, 196}, + dictWord{133, 0, 831}, + dictWord{136, 0, 723}, + dictWord{7, 0, 1897}, + dictWord{13, 0, 80}, + dictWord{13, 0, 437}, + dictWord{145, 0, 74}, + dictWord{4, 0, 992}, + dictWord{ + 6, + 0, + 627, + }, + dictWord{136, 0, 994}, + dictWord{135, 11, 1294}, + dictWord{132, 10, 104}, + dictWord{5, 0, 848}, + dictWord{6, 0, 66}, + dictWord{136, 0, 764}, + dictWord{ + 4, + 0, + 36, + }, + dictWord{7, 0, 1387}, + dictWord{10, 0, 205}, + dictWord{139, 0, 755}, + dictWord{6, 0, 1046}, + dictWord{134, 0, 1485}, + dictWord{134, 0, 950}, + dictWord{132, 0, 887}, + dictWord{14, 0, 450}, + dictWord{148, 0, 111}, + dictWord{7, 0, 620}, + dictWord{7, 0, 831}, + dictWord{9, 10, 542}, + dictWord{9, 10, 566}, + dictWord{ + 138, + 10, + 728, + }, + dictWord{6, 0, 165}, + dictWord{138, 0, 388}, + dictWord{139, 10, 263}, + dictWord{4, 0, 719}, + dictWord{135, 0, 155}, + dictWord{138, 10, 468}, + dictWord{6, 11, 453}, + dictWord{144, 11, 36}, + dictWord{134, 11, 129}, + dictWord{5, 0, 533}, + dictWord{7, 0, 755}, + dictWord{138, 0, 780}, + dictWord{134, 0, 1465}, + dictWord{4, 0, 353}, + dictWord{6, 0, 146}, + dictWord{6, 0, 1789}, + dictWord{7, 0, 427}, + dictWord{7, 0, 990}, + dictWord{7, 0, 1348}, + dictWord{9, 0, 665}, + dictWord{9, 0, 898}, + dictWord{11, 0, 893}, + dictWord{142, 0, 212}, + dictWord{7, 10, 87}, + dictWord{142, 10, 288}, + dictWord{4, 0, 45}, + dictWord{135, 0, 1257}, + dictWord{12, 0, 7}, + dictWord{7, 10, 988}, + dictWord{7, 10, 1939}, + dictWord{9, 10, 64}, + dictWord{9, 10, 502}, + dictWord{12, 10, 34}, + dictWord{13, 10, 12}, + dictWord{13, 10, 234}, + dictWord{147, 10, 77}, + dictWord{4, 0, 607}, + dictWord{5, 11, 60}, + dictWord{6, 11, 504}, + dictWord{7, 11, 614}, + dictWord{7, 11, 1155}, + dictWord{140, 11, 0}, + dictWord{ + 135, + 10, + 141, + }, + dictWord{8, 11, 198}, + dictWord{11, 11, 29}, + dictWord{140, 11, 534}, + dictWord{140, 0, 65}, + dictWord{136, 0, 816}, + dictWord{132, 10, 619}, + dictWord{139, 0, 88}, + dictWord{5, 10, 246}, + dictWord{8, 10, 189}, + dictWord{9, 10, 355}, + dictWord{9, 10, 512}, + dictWord{10, 10, 124}, + dictWord{10, 10, 453}, + dictWord{11, 10, 143}, + dictWord{11, 10, 416}, + dictWord{11, 10, 859}, + dictWord{141, 10, 341}, + dictWord{4, 11, 379}, + dictWord{135, 11, 1397}, + dictWord{ + 4, + 0, + 600, + }, + dictWord{137, 0, 621}, + dictWord{133, 0, 367}, + dictWord{134, 0, 561}, + dictWord{6, 0, 559}, + dictWord{134, 0, 1691}, + dictWord{6, 0, 585}, + dictWord{ + 134, + 11, + 585, + }, + dictWord{135, 11, 1228}, + dictWord{4, 11, 118}, + dictWord{5, 10, 678}, + dictWord{6, 11, 274}, + dictWord{6, 11, 361}, + dictWord{7, 11, 75}, + dictWord{ + 141, + 11, + 441, + }, + dictWord{135, 11, 1818}, + dictWord{137, 11, 841}, + dictWord{5, 0, 573}, + dictWord{6, 0, 287}, + dictWord{7, 10, 862}, + dictWord{7, 10, 1886}, + dictWord{138, 10, 179}, + dictWord{132, 10, 517}, + dictWord{140, 11, 693}, + dictWord{5, 11, 314}, + dictWord{6, 11, 221}, + dictWord{7, 11, 419}, + dictWord{ + 10, + 11, + 650, + }, + dictWord{11, 11, 396}, + dictWord{12, 11, 156}, + dictWord{13, 11, 369}, + dictWord{14, 11, 333}, + dictWord{145, 11, 47}, + dictWord{140, 10, 540}, + dictWord{136, 10, 667}, + dictWord{11, 10, 403}, + dictWord{146, 10, 83}, + dictWord{6, 0, 672}, + dictWord{133, 10, 761}, + dictWord{9, 0, 157}, + dictWord{10, 10, 131}, + dictWord{140, 10, 72}, + dictWord{7, 0, 714}, + dictWord{134, 11, 460}, + dictWord{134, 0, 456}, + dictWord{133, 0, 925}, + dictWord{5, 11, 682}, + dictWord{ + 135, + 11, + 1887, + }, + dictWord{136, 11, 510}, + dictWord{136, 11, 475}, + dictWord{133, 11, 1016}, + dictWord{9, 0, 19}, + dictWord{7, 11, 602}, + dictWord{8, 11, 179}, + dictWord{ + 10, + 11, + 781, + }, + dictWord{140, 11, 126}, + dictWord{6, 11, 329}, + dictWord{138, 11, 111}, + dictWord{6, 0, 822}, + dictWord{134, 0, 1473}, + dictWord{144, 11, 86}, + dictWord{11, 0, 113}, + dictWord{139, 11, 113}, + dictWord{5, 11, 821}, + dictWord{134, 11, 1687}, + dictWord{133, 10, 449}, + dictWord{7, 0, 463}, + dictWord{ + 17, + 0, + 69, + }, + dictWord{136, 10, 103}, + dictWord{7, 10, 2028}, + dictWord{138, 10, 641}, + dictWord{6, 0, 193}, + dictWord{7, 0, 240}, + dictWord{7, 0, 1682}, + dictWord{ + 10, + 0, + 51, + }, + dictWord{10, 0, 640}, + dictWord{11, 0, 410}, + dictWord{13, 0, 82}, + dictWord{14, 0, 247}, + dictWord{14, 0, 331}, + dictWord{142, 0, 377}, + dictWord{6, 0, 471}, + dictWord{11, 0, 411}, + dictWord{142, 0, 2}, + dictWord{5, 11, 71}, + dictWord{7, 11, 1407}, + dictWord{9, 11, 388}, + dictWord{9, 11, 704}, + dictWord{10, 11, 261}, + dictWord{ + 10, + 11, + 619, + }, + dictWord{11, 11, 547}, + dictWord{11, 11, 619}, + dictWord{143, 11, 157}, + dictWord{136, 0, 633}, + dictWord{135, 0, 1148}, + dictWord{6, 0, 554}, + dictWord{7, 0, 1392}, + dictWord{12, 0, 129}, + dictWord{7, 10, 1274}, + dictWord{7, 10, 1386}, + dictWord{7, 11, 2008}, + dictWord{9, 11, 337}, + dictWord{10, 11, 517}, + dictWord{146, 10, 87}, + dictWord{7, 0, 803}, + dictWord{8, 0, 542}, + dictWord{6, 10, 187}, + dictWord{7, 10, 1203}, + dictWord{8, 10, 380}, + dictWord{14, 10, 117}, + dictWord{149, 10, 28}, + dictWord{6, 10, 297}, + dictWord{7, 10, 793}, + dictWord{139, 10, 938}, + dictWord{8, 0, 438}, + dictWord{11, 0, 363}, + dictWord{7, 10, 464}, + dictWord{11, 10, 105}, + dictWord{12, 10, 231}, + dictWord{14, 10, 386}, + dictWord{15, 10, 102}, + dictWord{148, 10, 75}, + dictWord{5, 11, 16}, + dictWord{6, 11, 86}, + dictWord{6, 11, 603}, + dictWord{7, 11, 292}, + dictWord{7, 11, 561}, + dictWord{8, 11, 257}, + dictWord{8, 11, 382}, + dictWord{9, 11, 721}, + dictWord{9, 11, 778}, + dictWord{ + 11, + 11, + 581, + }, + dictWord{140, 11, 466}, + dictWord{6, 0, 717}, + dictWord{4, 11, 486}, + dictWord{133, 11, 491}, + dictWord{132, 0, 875}, + dictWord{132, 11, 72}, + dictWord{6, 11, 265}, + dictWord{135, 11, 847}, + dictWord{4, 0, 237}, + dictWord{135, 0, 514}, + dictWord{6, 0, 392}, + dictWord{7, 0, 65}, + dictWord{135, 0, 2019}, + dictWord{140, 11, 261}, + dictWord{135, 11, 922}, + dictWord{137, 11, 404}, + dictWord{12, 0, 563}, + dictWord{14, 0, 101}, + dictWord{18, 0, 129}, + dictWord{ + 7, + 10, + 1010, + }, + dictWord{11, 10, 733}, + dictWord{11, 10, 759}, + dictWord{13, 10, 34}, + dictWord{146, 10, 45}, + dictWord{7, 10, 1656}, + dictWord{9, 10, 369}, + dictWord{ + 10, + 10, + 338, + }, + dictWord{10, 10, 490}, + dictWord{11, 10, 154}, + dictWord{11, 10, 545}, + dictWord{11, 10, 775}, + dictWord{13, 10, 77}, + dictWord{141, 10, 274}, + dictWord{4, 0, 444}, + dictWord{10, 0, 146}, + dictWord{140, 0, 9}, + dictWord{139, 11, 163}, + dictWord{7, 0, 1260}, + dictWord{135, 0, 1790}, + dictWord{9, 0, 222}, + dictWord{10, 0, 43}, + dictWord{139, 0, 900}, + dictWord{137, 11, 234}, + dictWord{138, 0, 971}, + dictWord{137, 0, 761}, + dictWord{134, 0, 699}, + dictWord{ + 136, + 11, + 434, + }, + dictWord{6, 0, 1116}, + dictWord{7, 0, 1366}, + dictWord{5, 10, 20}, + dictWord{6, 11, 197}, + dictWord{6, 10, 298}, + dictWord{7, 10, 659}, + dictWord{8, 11, 205}, + dictWord{137, 10, 219}, + dictWord{132, 11, 490}, + dictWord{11, 11, 820}, + dictWord{150, 11, 51}, + dictWord{7, 10, 1440}, + dictWord{11, 10, 854}, + dictWord{ + 11, + 10, + 872, + }, + dictWord{11, 10, 921}, + dictWord{12, 10, 551}, + dictWord{13, 10, 472}, + dictWord{142, 10, 367}, + dictWord{140, 11, 13}, + dictWord{132, 0, 829}, + dictWord{12, 0, 242}, + dictWord{132, 10, 439}, + dictWord{136, 10, 669}, + dictWord{6, 0, 593}, + dictWord{6, 11, 452}, + dictWord{7, 11, 312}, + dictWord{ + 138, + 11, + 219, + }, + dictWord{4, 11, 333}, + dictWord{9, 11, 176}, + dictWord{12, 11, 353}, + dictWord{141, 11, 187}, + dictWord{7, 0, 36}, + dictWord{8, 0, 201}, + dictWord{ + 136, + 0, + 605, + }, + dictWord{140, 0, 224}, + dictWord{132, 10, 233}, + dictWord{134, 0, 1430}, + dictWord{134, 0, 1806}, + dictWord{4, 0, 523}, + dictWord{133, 0, 638}, + dictWord{ + 6, + 0, + 1889, + }, + dictWord{9, 0, 958}, + dictWord{9, 0, 971}, + dictWord{9, 0, 976}, + dictWord{12, 0, 796}, + dictWord{12, 0, 799}, + dictWord{12, 0, 808}, + dictWord{ + 12, + 0, + 835, + }, + dictWord{12, 0, 836}, + dictWord{12, 0, 914}, + dictWord{12, 0, 946}, + dictWord{15, 0, 216}, + dictWord{15, 0, 232}, + dictWord{18, 0, 183}, + dictWord{18, 0, 187}, + dictWord{18, 0, 194}, + dictWord{18, 0, 212}, + dictWord{18, 0, 232}, + dictWord{149, 0, 49}, + dictWord{132, 10, 482}, + dictWord{6, 0, 827}, + dictWord{134, 0, 1434}, + dictWord{135, 10, 346}, + dictWord{134, 0, 2043}, + dictWord{6, 0, 242}, + dictWord{7, 0, 227}, + dictWord{7, 0, 1581}, + dictWord{8, 0, 104}, + dictWord{9, 0, 113}, + dictWord{9, 0, 220}, + dictWord{9, 0, 427}, + dictWord{10, 0, 136}, + dictWord{10, 0, 239}, + dictWord{11, 0, 579}, + dictWord{11, 0, 1023}, + dictWord{13, 0, 4}, + dictWord{ + 13, + 0, + 204, + }, + dictWord{13, 0, 316}, + dictWord{148, 0, 86}, + dictWord{134, 11, 1685}, + dictWord{7, 0, 148}, + dictWord{8, 0, 284}, + dictWord{141, 0, 63}, + dictWord{ + 142, + 0, + 10, + }, + dictWord{135, 11, 584}, + dictWord{134, 0, 1249}, + dictWord{7, 0, 861}, + dictWord{135, 10, 334}, + dictWord{5, 10, 795}, + dictWord{6, 10, 1741}, + dictWord{ + 137, + 11, + 70, + }, + dictWord{132, 0, 807}, + dictWord{7, 11, 135}, + dictWord{8, 11, 7}, + dictWord{8, 11, 62}, + dictWord{9, 11, 243}, + dictWord{10, 11, 658}, + dictWord{ + 10, + 11, + 697, + }, + dictWord{11, 11, 456}, + dictWord{139, 11, 756}, + dictWord{9, 11, 395}, + dictWord{138, 11, 79}, + dictWord{137, 11, 108}, + dictWord{147, 0, 94}, + dictWord{136, 0, 494}, + dictWord{135, 11, 631}, + dictWord{135, 10, 622}, + dictWord{7, 0, 1510}, + dictWord{135, 10, 1750}, + dictWord{4, 10, 203}, + dictWord{ + 135, + 10, + 1936, + }, + dictWord{7, 11, 406}, + dictWord{7, 11, 459}, + dictWord{8, 11, 606}, + dictWord{139, 11, 726}, + dictWord{7, 0, 1306}, + dictWord{8, 0, 505}, + dictWord{ + 9, + 0, + 482, + }, + dictWord{10, 0, 126}, + dictWord{11, 0, 225}, + dictWord{12, 0, 347}, + dictWord{12, 0, 449}, + dictWord{13, 0, 19}, + dictWord{14, 0, 218}, + dictWord{142, 0, 435}, + dictWord{5, 0, 268}, + dictWord{10, 0, 764}, + dictWord{12, 0, 120}, + dictWord{13, 0, 39}, + dictWord{145, 0, 127}, + dictWord{142, 11, 68}, + dictWord{11, 10, 678}, + dictWord{140, 10, 307}, + dictWord{12, 11, 268}, + dictWord{12, 11, 640}, + dictWord{142, 11, 119}, + dictWord{135, 10, 2044}, + dictWord{133, 11, 612}, + dictWord{ + 4, + 11, + 372, + }, + dictWord{7, 11, 482}, + dictWord{8, 11, 158}, + dictWord{9, 11, 602}, + dictWord{9, 11, 615}, + dictWord{10, 11, 245}, + dictWord{10, 11, 678}, + dictWord{ + 10, + 11, + 744, + }, + dictWord{11, 11, 248}, + dictWord{139, 11, 806}, + dictWord{7, 10, 311}, + dictWord{9, 10, 308}, + dictWord{140, 10, 255}, + dictWord{4, 0, 384}, + dictWord{135, 0, 1022}, + dictWord{5, 11, 854}, + dictWord{135, 11, 1991}, + dictWord{135, 10, 1266}, + dictWord{4, 10, 400}, + dictWord{5, 10, 267}, + dictWord{ + 135, + 10, + 232, + }, + dictWord{135, 0, 1703}, + dictWord{9, 0, 159}, + dictWord{11, 0, 661}, + dictWord{140, 0, 603}, + dictWord{4, 0, 964}, + dictWord{14, 0, 438}, + dictWord{ + 14, + 0, + 444, + }, + dictWord{14, 0, 456}, + dictWord{22, 0, 60}, + dictWord{22, 0, 63}, + dictWord{9, 11, 106}, + dictWord{9, 11, 163}, + dictWord{9, 11, 296}, + dictWord{10, 11, 167}, + dictWord{10, 11, 172}, + dictWord{10, 11, 777}, + dictWord{139, 11, 16}, + dictWord{136, 0, 583}, + dictWord{132, 0, 515}, + dictWord{8, 0, 632}, + dictWord{8, 0, 697}, + dictWord{137, 0, 854}, + dictWord{5, 11, 195}, + dictWord{135, 11, 1685}, + dictWord{6, 0, 1123}, + dictWord{134, 0, 1365}, + dictWord{134, 11, 328}, + dictWord{ + 7, + 11, + 1997, + }, + dictWord{8, 11, 730}, + dictWord{139, 11, 1006}, + dictWord{4, 0, 136}, + dictWord{133, 0, 551}, + dictWord{134, 0, 1782}, + dictWord{7, 0, 1287}, + dictWord{ + 9, + 0, + 44, + }, + dictWord{10, 0, 552}, + dictWord{10, 0, 642}, + dictWord{11, 0, 839}, + dictWord{12, 0, 274}, + dictWord{12, 0, 275}, + dictWord{12, 0, 372}, + dictWord{ + 13, + 0, + 91, + }, + dictWord{142, 0, 125}, + dictWord{5, 11, 751}, + dictWord{11, 11, 797}, + dictWord{140, 11, 203}, + dictWord{133, 0, 732}, + dictWord{7, 0, 679}, + dictWord{ + 8, + 0, + 313, + }, + dictWord{4, 10, 100}, + dictWord{135, 11, 821}, + dictWord{10, 0, 361}, + dictWord{142, 0, 316}, + dictWord{134, 0, 595}, + dictWord{6, 0, 147}, + dictWord{ + 7, + 0, + 886, + }, + dictWord{9, 0, 753}, + dictWord{138, 0, 268}, + dictWord{5, 10, 362}, + dictWord{5, 10, 443}, + dictWord{6, 10, 318}, + dictWord{7, 10, 1019}, + dictWord{ + 139, + 10, + 623, + }, + dictWord{5, 10, 463}, + dictWord{136, 10, 296}, + dictWord{4, 10, 454}, + dictWord{5, 11, 950}, + dictWord{5, 11, 994}, + dictWord{134, 11, 351}, + dictWord{ + 138, + 0, + 137, + }, + dictWord{5, 10, 48}, + dictWord{5, 10, 404}, + dictWord{6, 10, 557}, + dictWord{7, 10, 458}, + dictWord{8, 10, 597}, + dictWord{10, 10, 455}, + dictWord{ + 10, + 10, + 606, + }, + dictWord{11, 10, 49}, + dictWord{11, 10, 548}, + dictWord{12, 10, 476}, + dictWord{13, 10, 18}, + dictWord{141, 10, 450}, + dictWord{133, 0, 414}, + dictWord{ + 135, + 0, + 1762, + }, + dictWord{5, 11, 421}, + dictWord{135, 11, 47}, + dictWord{5, 10, 442}, + dictWord{135, 10, 1984}, + dictWord{134, 0, 599}, + dictWord{134, 0, 1749}, + dictWord{134, 0, 1627}, + dictWord{4, 0, 488}, + dictWord{132, 11, 350}, + dictWord{137, 11, 751}, + dictWord{132, 0, 83}, + dictWord{140, 0, 676}, + dictWord{ + 133, + 11, + 967, + }, + dictWord{7, 0, 1639}, + dictWord{5, 10, 55}, + dictWord{140, 10, 161}, + dictWord{4, 11, 473}, + dictWord{7, 11, 623}, + dictWord{8, 11, 808}, + dictWord{ + 9, + 11, + 871, + }, + dictWord{9, 11, 893}, + dictWord{11, 11, 38}, + dictWord{11, 11, 431}, + dictWord{12, 11, 112}, + dictWord{12, 11, 217}, + dictWord{12, 11, 243}, + dictWord{ + 12, + 11, + 562, + }, + dictWord{12, 11, 683}, + dictWord{13, 11, 141}, + dictWord{13, 11, 197}, + dictWord{13, 11, 227}, + dictWord{13, 11, 406}, + dictWord{13, 11, 487}, + dictWord{14, 11, 156}, + dictWord{14, 11, 203}, + dictWord{14, 11, 224}, + dictWord{14, 11, 256}, + dictWord{18, 11, 58}, + dictWord{150, 11, 0}, + dictWord{ + 133, + 10, + 450, + }, + dictWord{7, 11, 736}, + dictWord{139, 11, 264}, + dictWord{134, 0, 278}, + dictWord{4, 11, 222}, + dictWord{7, 11, 286}, + dictWord{136, 11, 629}, + dictWord{ + 135, + 10, + 869, + }, + dictWord{140, 0, 97}, + dictWord{144, 0, 14}, + dictWord{134, 0, 1085}, + dictWord{4, 10, 213}, + dictWord{7, 10, 223}, + dictWord{136, 10, 80}, + dictWord{ + 7, + 0, + 388, + }, + dictWord{7, 0, 644}, + dictWord{139, 0, 781}, + dictWord{132, 0, 849}, + dictWord{7, 0, 229}, + dictWord{8, 0, 59}, + dictWord{9, 0, 190}, + dictWord{10, 0, 378}, + dictWord{140, 0, 191}, + dictWord{7, 10, 381}, + dictWord{7, 10, 806}, + dictWord{7, 10, 820}, + dictWord{8, 10, 354}, + dictWord{8, 10, 437}, + dictWord{8, 10, 787}, + dictWord{9, 10, 657}, + dictWord{10, 10, 58}, + dictWord{10, 10, 339}, + dictWord{10, 10, 749}, + dictWord{11, 10, 914}, + dictWord{12, 10, 162}, + dictWord{13, 10, 75}, + dictWord{14, 10, 106}, + dictWord{14, 10, 198}, + dictWord{14, 10, 320}, + dictWord{14, 10, 413}, + dictWord{146, 10, 43}, + dictWord{141, 11, 306}, + dictWord{ + 136, + 10, + 747, + }, + dictWord{134, 0, 1115}, + dictWord{16, 0, 94}, + dictWord{16, 0, 108}, + dictWord{136, 11, 146}, + dictWord{6, 0, 700}, + dictWord{6, 0, 817}, + dictWord{ + 134, + 0, + 1002, + }, + dictWord{133, 10, 692}, + dictWord{4, 11, 465}, + dictWord{135, 11, 1663}, + dictWord{134, 10, 191}, + dictWord{6, 0, 1414}, + dictWord{ + 135, + 11, + 913, + }, + dictWord{132, 0, 660}, + dictWord{7, 0, 1035}, + dictWord{138, 0, 737}, + dictWord{6, 10, 162}, + dictWord{7, 10, 1960}, + dictWord{136, 10, 831}, + dictWord{ + 132, + 10, + 706, + }, + dictWord{7, 0, 690}, + dictWord{9, 0, 217}, + dictWord{9, 0, 587}, + dictWord{140, 0, 521}, + dictWord{138, 10, 426}, + dictWord{135, 10, 1235}, + dictWord{ + 6, + 11, + 82, + }, + dictWord{7, 11, 138}, + dictWord{7, 11, 517}, + dictWord{9, 11, 673}, + dictWord{139, 11, 238}, + dictWord{138, 0, 272}, + dictWord{5, 11, 495}, + dictWord{ + 7, + 11, + 834, + }, + dictWord{9, 11, 733}, + dictWord{139, 11, 378}, + dictWord{134, 0, 1744}, + dictWord{132, 0, 1011}, + dictWord{7, 11, 828}, + dictWord{142, 11, 116}, + dictWord{4, 0, 733}, + dictWord{9, 0, 194}, + dictWord{10, 0, 92}, + dictWord{11, 0, 198}, + dictWord{12, 0, 84}, + dictWord{13, 0, 128}, + dictWord{133, 11, 559}, + dictWord{ + 10, + 0, + 57, + }, + dictWord{10, 0, 277}, + dictWord{6, 11, 21}, + dictWord{6, 11, 1737}, + dictWord{7, 11, 1444}, + dictWord{136, 11, 224}, + dictWord{4, 10, 204}, + dictWord{ + 137, + 10, + 902, + }, + dictWord{136, 10, 833}, + dictWord{11, 0, 348}, + dictWord{12, 0, 99}, + dictWord{18, 0, 1}, + dictWord{18, 0, 11}, + dictWord{19, 0, 4}, + dictWord{7, 10, 366}, + dictWord{9, 10, 287}, + dictWord{12, 10, 199}, + dictWord{12, 10, 556}, + dictWord{140, 10, 577}, + dictWord{6, 0, 1981}, + dictWord{136, 0, 936}, + dictWord{ + 21, + 0, + 33, + }, + dictWord{150, 0, 40}, + dictWord{5, 11, 519}, + dictWord{138, 11, 204}, + dictWord{5, 10, 356}, + dictWord{135, 10, 224}, + dictWord{134, 0, 775}, + dictWord{ + 135, + 0, + 306, + }, + dictWord{7, 10, 630}, + dictWord{9, 10, 567}, + dictWord{11, 10, 150}, + dictWord{11, 10, 444}, + dictWord{141, 10, 119}, + dictWord{5, 0, 979}, + dictWord{ + 134, + 10, + 539, + }, + dictWord{133, 0, 611}, + dictWord{4, 11, 402}, + dictWord{135, 11, 1679}, + dictWord{5, 0, 178}, + dictWord{7, 11, 2}, + dictWord{8, 11, 323}, + dictWord{ + 136, + 11, + 479, + }, + dictWord{5, 11, 59}, + dictWord{135, 11, 672}, + dictWord{4, 0, 1010}, + dictWord{6, 0, 1969}, + dictWord{138, 11, 237}, + dictWord{133, 11, 412}, + dictWord{146, 11, 34}, + dictWord{7, 11, 1740}, + dictWord{146, 11, 48}, + dictWord{134, 0, 664}, + dictWord{139, 10, 814}, + dictWord{4, 11, 85}, + dictWord{ + 135, + 11, + 549, + }, + dictWord{133, 11, 94}, + dictWord{133, 11, 457}, + dictWord{132, 0, 390}, + dictWord{134, 0, 1510}, + dictWord{4, 10, 235}, + dictWord{135, 10, 255}, + dictWord{4, 10, 194}, + dictWord{5, 10, 584}, + dictWord{6, 11, 11}, + dictWord{6, 10, 384}, + dictWord{7, 11, 187}, + dictWord{7, 10, 583}, + dictWord{10, 10, 761}, + dictWord{ + 11, + 10, + 760, + }, + dictWord{139, 10, 851}, + dictWord{4, 11, 522}, + dictWord{139, 11, 802}, + dictWord{135, 0, 493}, + dictWord{10, 11, 776}, + dictWord{13, 11, 345}, + dictWord{142, 11, 425}, + dictWord{146, 0, 37}, + dictWord{4, 11, 52}, + dictWord{135, 11, 661}, + dictWord{134, 0, 724}, + dictWord{134, 0, 829}, + dictWord{ + 133, + 11, + 520, + }, + dictWord{133, 10, 562}, + dictWord{4, 11, 281}, + dictWord{5, 11, 38}, + dictWord{7, 11, 194}, + dictWord{7, 11, 668}, + dictWord{7, 11, 1893}, + dictWord{ + 137, + 11, + 397, + }, + dictWord{5, 10, 191}, + dictWord{137, 10, 271}, + dictWord{7, 0, 1537}, + dictWord{14, 0, 96}, + dictWord{143, 0, 73}, + dictWord{5, 0, 473}, + dictWord{ + 11, + 0, + 168, + }, + dictWord{4, 10, 470}, + dictWord{6, 10, 153}, + dictWord{7, 10, 1503}, + dictWord{7, 10, 1923}, + dictWord{10, 10, 701}, + dictWord{11, 10, 132}, + dictWord{ + 11, + 10, + 227, + }, + dictWord{11, 10, 320}, + dictWord{11, 10, 436}, + dictWord{11, 10, 525}, + dictWord{11, 10, 855}, + dictWord{12, 10, 41}, + dictWord{12, 10, 286}, + dictWord{13, 10, 103}, + dictWord{13, 10, 284}, + dictWord{14, 10, 255}, + dictWord{14, 10, 262}, + dictWord{15, 10, 117}, + dictWord{143, 10, 127}, + dictWord{ + 133, + 0, + 105, + }, + dictWord{5, 0, 438}, + dictWord{9, 0, 694}, + dictWord{12, 0, 627}, + dictWord{141, 0, 210}, + dictWord{133, 10, 327}, + dictWord{6, 10, 552}, + dictWord{ + 7, + 10, + 1754, + }, + dictWord{137, 10, 604}, + dictWord{134, 0, 1256}, + dictWord{152, 0, 11}, + dictWord{5, 11, 448}, + dictWord{11, 11, 98}, + dictWord{139, 11, 524}, + dictWord{ + 7, + 0, + 1626, + }, + dictWord{5, 10, 80}, + dictWord{6, 10, 405}, + dictWord{7, 10, 403}, + dictWord{7, 10, 1502}, + dictWord{8, 10, 456}, + dictWord{9, 10, 487}, + dictWord{ + 9, + 10, + 853, + }, + dictWord{9, 10, 889}, + dictWord{10, 10, 309}, + dictWord{11, 10, 721}, + dictWord{11, 10, 994}, + dictWord{12, 10, 430}, + dictWord{13, 10, 165}, + dictWord{ + 14, + 11, + 16, + }, + dictWord{146, 11, 44}, + dictWord{132, 0, 779}, + dictWord{8, 0, 25}, + dictWord{138, 0, 826}, + dictWord{4, 10, 453}, + dictWord{5, 10, 887}, + dictWord{ + 6, + 10, + 535, + }, + dictWord{8, 10, 6}, + dictWord{8, 10, 543}, + dictWord{136, 10, 826}, + dictWord{137, 11, 461}, + dictWord{140, 11, 632}, + dictWord{132, 0, 308}, + dictWord{135, 0, 741}, + dictWord{132, 0, 671}, + dictWord{7, 0, 150}, + dictWord{8, 0, 649}, + dictWord{136, 0, 1020}, + dictWord{9, 0, 99}, + dictWord{6, 11, 336}, + dictWord{ + 8, + 11, + 552, + }, + dictWord{9, 11, 285}, + dictWord{10, 11, 99}, + dictWord{139, 11, 568}, + dictWord{134, 0, 521}, + dictWord{5, 0, 339}, + dictWord{14, 0, 3}, + dictWord{ + 15, + 0, + 41, + }, + dictWord{15, 0, 166}, + dictWord{147, 0, 66}, + dictWord{6, 11, 423}, + dictWord{7, 11, 665}, + dictWord{7, 11, 1210}, + dictWord{9, 11, 218}, + dictWord{ + 141, + 11, + 222, + }, + dictWord{6, 0, 543}, + dictWord{5, 10, 101}, + dictWord{5, 11, 256}, + dictWord{6, 10, 88}, + dictWord{7, 10, 1677}, + dictWord{9, 10, 100}, + dictWord{10, 10, 677}, + dictWord{14, 10, 169}, + dictWord{14, 10, 302}, + dictWord{14, 10, 313}, + dictWord{15, 10, 48}, + dictWord{143, 10, 84}, + dictWord{4, 10, 310}, + dictWord{ + 7, + 10, + 708, + }, + dictWord{7, 10, 996}, + dictWord{9, 10, 795}, + dictWord{10, 10, 390}, + dictWord{10, 10, 733}, + dictWord{11, 10, 451}, + dictWord{12, 10, 249}, + dictWord{ + 14, + 10, + 115, + }, + dictWord{14, 10, 286}, + dictWord{143, 10, 100}, + dictWord{133, 10, 587}, + dictWord{13, 11, 417}, + dictWord{14, 11, 129}, + dictWord{143, 11, 15}, + dictWord{134, 0, 1358}, + dictWord{136, 11, 554}, + dictWord{132, 10, 498}, + dictWord{7, 10, 217}, + dictWord{8, 10, 140}, + dictWord{138, 10, 610}, + dictWord{ + 135, + 11, + 989, + }, + dictWord{135, 11, 634}, + dictWord{6, 0, 155}, + dictWord{140, 0, 234}, + dictWord{135, 11, 462}, + dictWord{132, 11, 618}, + dictWord{ + 134, + 0, + 1628, + }, + dictWord{132, 0, 766}, + dictWord{4, 11, 339}, + dictWord{5, 10, 905}, + dictWord{135, 11, 259}, + dictWord{135, 0, 829}, + dictWord{4, 11, 759}, + dictWord{ + 141, + 11, + 169, + }, + dictWord{7, 0, 1445}, + dictWord{4, 10, 456}, + dictWord{7, 10, 358}, + dictWord{7, 10, 1637}, + dictWord{8, 10, 643}, + dictWord{139, 10, 483}, + dictWord{ + 5, + 0, + 486, + }, + dictWord{135, 0, 1349}, + dictWord{5, 11, 688}, + dictWord{135, 11, 712}, + dictWord{7, 0, 1635}, + dictWord{8, 0, 17}, + dictWord{10, 0, 217}, + dictWord{ + 10, + 0, + 295, + }, + dictWord{12, 0, 2}, + dictWord{140, 11, 2}, + dictWord{138, 0, 558}, + dictWord{150, 10, 56}, + dictWord{4, 11, 278}, + dictWord{5, 11, 465}, + dictWord{ + 135, + 11, + 1367, + }, + dictWord{136, 11, 482}, + dictWord{133, 10, 535}, + dictWord{6, 0, 1362}, + dictWord{6, 0, 1461}, + dictWord{10, 11, 274}, + dictWord{10, 11, 625}, + dictWord{139, 11, 530}, + dictWord{5, 0, 599}, + dictWord{5, 11, 336}, + dictWord{6, 11, 341}, + dictWord{6, 11, 478}, + dictWord{6, 11, 1763}, + dictWord{136, 11, 386}, + dictWord{7, 10, 1748}, + dictWord{137, 11, 151}, + dictWord{134, 0, 1376}, + dictWord{133, 10, 539}, + dictWord{135, 11, 73}, + dictWord{135, 11, 1971}, + dictWord{139, 11, 283}, + dictWord{9, 0, 93}, + dictWord{139, 0, 474}, + dictWord{6, 10, 91}, + dictWord{135, 10, 435}, + dictWord{6, 0, 447}, + dictWord{5, 11, 396}, + dictWord{134, 11, 501}, + dictWord{4, 10, 16}, + dictWord{5, 10, 316}, + dictWord{5, 10, 842}, + dictWord{6, 10, 370}, + dictWord{6, 10, 1778}, + dictWord{8, 10, 166}, + dictWord{11, 10, 812}, + dictWord{12, 10, 206}, + dictWord{12, 10, 351}, + dictWord{14, 10, 418}, + dictWord{16, 10, 15}, + dictWord{16, 10, 34}, + dictWord{18, 10, 3}, + dictWord{19, 10, 3}, + dictWord{19, 10, 7}, + dictWord{20, 10, 4}, + dictWord{149, 10, 21}, + dictWord{7, 0, 577}, + dictWord{7, 0, 1432}, + dictWord{9, 0, 475}, + dictWord{9, 0, 505}, + dictWord{9, 0, 526}, + dictWord{9, 0, 609}, + dictWord{9, 0, 689}, + dictWord{9, 0, 726}, + dictWord{9, 0, 735}, + dictWord{9, 0, 738}, + dictWord{10, 0, 556}, + dictWord{ + 10, + 0, + 674, + }, + dictWord{10, 0, 684}, + dictWord{11, 0, 89}, + dictWord{11, 0, 202}, + dictWord{11, 0, 272}, + dictWord{11, 0, 380}, + dictWord{11, 0, 415}, + dictWord{11, 0, 505}, + dictWord{11, 0, 537}, + dictWord{11, 0, 550}, + dictWord{11, 0, 562}, + dictWord{11, 0, 640}, + dictWord{11, 0, 667}, + dictWord{11, 0, 688}, + dictWord{11, 0, 847}, + dictWord{11, 0, 927}, + dictWord{11, 0, 930}, + dictWord{11, 0, 940}, + dictWord{12, 0, 144}, + dictWord{12, 0, 325}, + dictWord{12, 0, 329}, + dictWord{12, 0, 389}, + dictWord{ + 12, + 0, + 403, + }, + dictWord{12, 0, 451}, + dictWord{12, 0, 515}, + dictWord{12, 0, 604}, + dictWord{12, 0, 616}, + dictWord{12, 0, 626}, + dictWord{13, 0, 66}, + dictWord{ + 13, + 0, + 131, + }, + dictWord{13, 0, 167}, + dictWord{13, 0, 236}, + dictWord{13, 0, 368}, + dictWord{13, 0, 411}, + dictWord{13, 0, 434}, + dictWord{13, 0, 453}, + dictWord{13, 0, 461}, + dictWord{13, 0, 474}, + dictWord{14, 0, 59}, + dictWord{14, 0, 60}, + dictWord{14, 0, 139}, + dictWord{14, 0, 152}, + dictWord{14, 0, 276}, + dictWord{14, 0, 353}, + dictWord{ + 14, + 0, + 402, + }, + dictWord{15, 0, 28}, + dictWord{15, 0, 81}, + dictWord{15, 0, 123}, + dictWord{15, 0, 152}, + dictWord{18, 0, 136}, + dictWord{148, 0, 88}, + dictWord{ + 4, + 11, + 929, + }, + dictWord{133, 11, 799}, + dictWord{136, 11, 46}, + dictWord{142, 0, 307}, + dictWord{4, 0, 609}, + dictWord{7, 0, 756}, + dictWord{9, 0, 544}, + dictWord{ + 11, + 0, + 413, + }, + dictWord{144, 0, 25}, + dictWord{10, 0, 687}, + dictWord{7, 10, 619}, + dictWord{10, 10, 547}, + dictWord{11, 10, 122}, + dictWord{140, 10, 601}, + dictWord{ + 4, + 0, + 930, + }, + dictWord{133, 0, 947}, + dictWord{133, 0, 939}, + dictWord{142, 0, 21}, + dictWord{4, 11, 892}, + dictWord{133, 11, 770}, + dictWord{133, 0, 962}, + dictWord{ + 5, + 0, + 651, + }, + dictWord{8, 0, 170}, + dictWord{9, 0, 61}, + dictWord{9, 0, 63}, + dictWord{10, 0, 23}, + dictWord{10, 0, 37}, + dictWord{10, 0, 834}, + dictWord{11, 0, 4}, + dictWord{ + 11, + 0, + 187, + }, + dictWord{11, 0, 281}, + dictWord{11, 0, 503}, + dictWord{11, 0, 677}, + dictWord{12, 0, 96}, + dictWord{12, 0, 130}, + dictWord{12, 0, 244}, + dictWord{14, 0, 5}, + dictWord{14, 0, 40}, + dictWord{14, 0, 162}, + dictWord{14, 0, 202}, + dictWord{146, 0, 133}, + dictWord{4, 0, 406}, + dictWord{5, 0, 579}, + dictWord{12, 0, 492}, + dictWord{ + 150, + 0, + 15, + }, + dictWord{135, 11, 158}, + dictWord{135, 0, 597}, + dictWord{132, 0, 981}, + dictWord{132, 10, 888}, + dictWord{4, 10, 149}, + dictWord{138, 10, 368}, + dictWord{132, 0, 545}, + dictWord{4, 10, 154}, + dictWord{7, 10, 1134}, + dictWord{136, 10, 105}, + dictWord{135, 11, 2001}, + dictWord{134, 0, 1558}, + dictWord{ + 4, + 10, + 31, + }, + dictWord{6, 10, 429}, + dictWord{7, 10, 962}, + dictWord{9, 10, 458}, + dictWord{139, 10, 691}, + dictWord{132, 10, 312}, + dictWord{135, 10, 1642}, + dictWord{ + 6, + 0, + 17, + }, + dictWord{6, 0, 1304}, + dictWord{7, 0, 16}, + dictWord{7, 0, 1001}, + dictWord{9, 0, 886}, + dictWord{10, 0, 489}, + dictWord{10, 0, 800}, + dictWord{11, 0, 782}, + dictWord{12, 0, 320}, + dictWord{13, 0, 467}, + dictWord{14, 0, 145}, + dictWord{14, 0, 387}, + dictWord{143, 0, 119}, + dictWord{135, 0, 1982}, + dictWord{17, 0, 17}, + dictWord{7, 11, 1461}, + dictWord{140, 11, 91}, + dictWord{4, 10, 236}, + dictWord{132, 11, 602}, + dictWord{138, 0, 907}, + dictWord{136, 0, 110}, + dictWord{7, 0, 272}, + dictWord{19, 0, 53}, + dictWord{5, 10, 836}, + dictWord{5, 10, 857}, + dictWord{134, 10, 1680}, + dictWord{5, 0, 458}, + dictWord{7, 11, 1218}, + dictWord{136, 11, 303}, + dictWord{7, 0, 1983}, + dictWord{8, 0, 0}, + dictWord{8, 0, 171}, + dictWord{9, 0, 120}, + dictWord{9, 0, 732}, + dictWord{10, 0, 473}, + dictWord{11, 0, 656}, + dictWord{ + 11, + 0, + 998, + }, + dictWord{18, 0, 0}, + dictWord{18, 0, 2}, + dictWord{19, 0, 21}, + dictWord{10, 10, 68}, + dictWord{139, 10, 494}, + dictWord{137, 11, 662}, + dictWord{4, 11, 13}, + dictWord{5, 11, 567}, + dictWord{7, 11, 1498}, + dictWord{9, 11, 124}, + dictWord{11, 11, 521}, + dictWord{140, 11, 405}, + dictWord{4, 10, 81}, + dictWord{139, 10, 867}, + dictWord{135, 11, 1006}, + dictWord{7, 11, 800}, + dictWord{7, 11, 1783}, + dictWord{138, 11, 12}, + dictWord{9, 0, 295}, + dictWord{10, 0, 443}, + dictWord{ + 5, + 10, + 282, + }, + dictWord{8, 10, 650}, + dictWord{137, 10, 907}, + dictWord{132, 11, 735}, + dictWord{4, 11, 170}, + dictWord{4, 10, 775}, + dictWord{135, 11, 323}, + dictWord{ + 6, + 0, + 1844, + }, + dictWord{10, 0, 924}, + dictWord{11, 11, 844}, + dictWord{12, 11, 104}, + dictWord{140, 11, 625}, + dictWord{5, 11, 304}, + dictWord{7, 11, 1403}, + dictWord{140, 11, 498}, + dictWord{134, 0, 1232}, + dictWord{4, 0, 519}, + dictWord{10, 0, 70}, + dictWord{12, 0, 26}, + dictWord{14, 0, 17}, + dictWord{14, 0, 178}, + dictWord{ + 15, + 0, + 34, + }, + dictWord{149, 0, 12}, + dictWord{132, 0, 993}, + dictWord{4, 11, 148}, + dictWord{133, 11, 742}, + dictWord{6, 0, 31}, + dictWord{7, 0, 491}, + dictWord{7, 0, 530}, + dictWord{8, 0, 592}, + dictWord{11, 0, 53}, + dictWord{11, 0, 779}, + dictWord{12, 0, 167}, + dictWord{12, 0, 411}, + dictWord{14, 0, 14}, + dictWord{14, 0, 136}, + dictWord{ + 15, + 0, + 72, + }, + dictWord{16, 0, 17}, + dictWord{144, 0, 72}, + dictWord{133, 0, 907}, + dictWord{134, 0, 733}, + dictWord{133, 11, 111}, + dictWord{4, 10, 71}, + dictWord{ + 5, + 10, + 376, + }, + dictWord{7, 10, 119}, + dictWord{138, 10, 665}, + dictWord{136, 0, 55}, + dictWord{8, 0, 430}, + dictWord{136, 11, 430}, + dictWord{4, 0, 208}, + dictWord{ + 5, + 0, + 106, + }, + dictWord{6, 0, 531}, + dictWord{8, 0, 408}, + dictWord{9, 0, 188}, + dictWord{138, 0, 572}, + dictWord{12, 0, 56}, + dictWord{11, 10, 827}, + dictWord{14, 10, 34}, + dictWord{143, 10, 148}, + dictWord{134, 0, 1693}, + dictWord{133, 11, 444}, + dictWord{132, 10, 479}, + dictWord{140, 0, 441}, + dictWord{9, 0, 449}, + dictWord{ + 10, + 0, + 192, + }, + dictWord{138, 0, 740}, + dictWord{134, 0, 928}, + dictWord{4, 0, 241}, + dictWord{7, 10, 607}, + dictWord{136, 10, 99}, + dictWord{8, 11, 123}, + dictWord{ + 15, + 11, + 6, + }, + dictWord{144, 11, 7}, + dictWord{6, 11, 285}, + dictWord{8, 11, 654}, + dictWord{11, 11, 749}, + dictWord{12, 11, 190}, + dictWord{12, 11, 327}, + dictWord{ + 13, + 11, + 120, + }, + dictWord{13, 11, 121}, + dictWord{13, 11, 327}, + dictWord{15, 11, 47}, + dictWord{146, 11, 40}, + dictWord{4, 10, 41}, + dictWord{5, 10, 74}, + dictWord{ + 7, + 10, + 1627, + }, + dictWord{11, 10, 871}, + dictWord{140, 10, 619}, + dictWord{7, 0, 1525}, + dictWord{11, 10, 329}, + dictWord{11, 10, 965}, + dictWord{12, 10, 241}, + dictWord{14, 10, 354}, + dictWord{15, 10, 22}, + dictWord{148, 10, 63}, + dictWord{132, 0, 259}, + dictWord{135, 11, 183}, + dictWord{9, 10, 209}, + dictWord{ + 137, + 10, + 300, + }, + dictWord{5, 11, 937}, + dictWord{135, 11, 100}, + dictWord{133, 10, 98}, + dictWord{4, 0, 173}, + dictWord{5, 0, 312}, + dictWord{5, 0, 512}, + dictWord{ + 135, + 0, + 1285, + }, + dictWord{141, 0, 185}, + dictWord{7, 0, 1603}, + dictWord{7, 0, 1691}, + dictWord{9, 0, 464}, + dictWord{11, 0, 195}, + dictWord{12, 0, 279}, + dictWord{ + 12, + 0, + 448, + }, + dictWord{14, 0, 11}, + dictWord{147, 0, 102}, + dictWord{135, 0, 1113}, + dictWord{133, 10, 984}, + dictWord{4, 0, 452}, + dictWord{5, 0, 583}, + dictWord{ + 135, + 0, + 720, + }, + dictWord{4, 0, 547}, + dictWord{5, 0, 817}, + dictWord{6, 0, 433}, + dictWord{7, 0, 593}, + dictWord{7, 0, 1378}, + dictWord{8, 0, 161}, + dictWord{9, 0, 284}, + dictWord{ + 10, + 0, + 313, + }, + dictWord{139, 0, 886}, + dictWord{8, 0, 722}, + dictWord{4, 10, 182}, + dictWord{6, 10, 205}, + dictWord{135, 10, 220}, + dictWord{150, 0, 13}, + dictWord{ + 4, + 10, + 42, + }, + dictWord{9, 10, 205}, + dictWord{9, 10, 786}, + dictWord{138, 10, 659}, + dictWord{6, 0, 289}, + dictWord{7, 0, 1670}, + dictWord{12, 0, 57}, + dictWord{151, 0, 4}, + dictWord{132, 10, 635}, + dictWord{14, 0, 43}, + dictWord{146, 0, 21}, + dictWord{139, 10, 533}, + dictWord{135, 0, 1694}, + dictWord{8, 0, 420}, + dictWord{ + 139, + 0, + 193, + }, + dictWord{135, 0, 409}, + dictWord{132, 10, 371}, + dictWord{4, 10, 272}, + dictWord{135, 10, 836}, + dictWord{5, 10, 825}, + dictWord{134, 10, 1640}, + dictWord{5, 11, 251}, + dictWord{5, 11, 956}, + dictWord{8, 11, 268}, + dictWord{9, 11, 214}, + dictWord{146, 11, 142}, + dictWord{138, 0, 308}, + dictWord{6, 0, 1863}, + dictWord{141, 11, 37}, + dictWord{137, 10, 879}, + dictWord{7, 10, 317}, + dictWord{135, 10, 569}, + dictWord{132, 11, 294}, + dictWord{134, 0, 790}, + dictWord{ + 5, + 0, + 1002, + }, + dictWord{136, 0, 745}, + dictWord{5, 11, 346}, + dictWord{5, 11, 711}, + dictWord{136, 11, 390}, + dictWord{135, 0, 289}, + dictWord{5, 0, 504}, + dictWord{ + 11, + 0, + 68, + }, + dictWord{137, 10, 307}, + dictWord{4, 0, 239}, + dictWord{6, 0, 477}, + dictWord{7, 0, 1607}, + dictWord{139, 0, 617}, + dictWord{149, 0, 13}, + dictWord{ + 133, + 0, + 609, + }, + dictWord{133, 11, 624}, + dictWord{5, 11, 783}, + dictWord{7, 11, 1998}, + dictWord{135, 11, 2047}, + dictWord{133, 10, 525}, + dictWord{132, 0, 367}, + dictWord{132, 11, 594}, + dictWord{6, 0, 528}, + dictWord{133, 10, 493}, + dictWord{4, 10, 174}, + dictWord{135, 10, 911}, + dictWord{8, 10, 417}, + dictWord{ + 137, + 10, + 782, + }, + dictWord{132, 0, 694}, + dictWord{7, 0, 548}, + dictWord{137, 0, 58}, + dictWord{4, 10, 32}, + dictWord{5, 10, 215}, + dictWord{6, 10, 269}, + dictWord{7, 10, 1782}, + dictWord{7, 10, 1892}, + dictWord{10, 10, 16}, + dictWord{11, 10, 822}, + dictWord{11, 10, 954}, + dictWord{141, 10, 481}, + dictWord{140, 0, 687}, + dictWord{ + 7, + 0, + 1749, + }, + dictWord{136, 10, 477}, + dictWord{132, 11, 569}, + dictWord{133, 10, 308}, + dictWord{135, 10, 1088}, + dictWord{4, 0, 661}, + dictWord{138, 0, 1004}, + dictWord{5, 11, 37}, + dictWord{6, 11, 39}, + dictWord{6, 11, 451}, + dictWord{7, 11, 218}, + dictWord{7, 11, 667}, + dictWord{7, 11, 1166}, + dictWord{7, 11, 1687}, + dictWord{8, 11, 662}, + dictWord{144, 11, 2}, + dictWord{9, 0, 445}, + dictWord{12, 0, 53}, + dictWord{13, 0, 492}, + dictWord{5, 10, 126}, + dictWord{8, 10, 297}, + dictWord{ + 9, + 10, + 366, + }, + dictWord{140, 10, 374}, + dictWord{7, 10, 1551}, + dictWord{139, 10, 361}, + dictWord{148, 0, 74}, + dictWord{134, 11, 508}, + dictWord{135, 0, 213}, + dictWord{132, 10, 175}, + dictWord{132, 10, 685}, + dictWord{6, 0, 760}, + dictWord{6, 0, 834}, + dictWord{134, 0, 1248}, + dictWord{7, 11, 453}, + dictWord{7, 11, 635}, + dictWord{7, 11, 796}, + dictWord{8, 11, 331}, + dictWord{9, 11, 328}, + dictWord{9, 11, 330}, + dictWord{9, 11, 865}, + dictWord{10, 11, 119}, + dictWord{10, 11, 235}, + dictWord{11, 11, 111}, + dictWord{11, 11, 129}, + dictWord{11, 11, 240}, + dictWord{12, 11, 31}, + dictWord{12, 11, 66}, + dictWord{12, 11, 222}, + dictWord{12, 11, 269}, + dictWord{12, 11, 599}, + dictWord{12, 11, 689}, + dictWord{13, 11, 186}, + dictWord{13, 11, 364}, + dictWord{142, 11, 345}, + dictWord{7, 0, 1672}, + dictWord{ + 139, + 0, + 189, + }, + dictWord{133, 10, 797}, + dictWord{133, 10, 565}, + dictWord{6, 0, 1548}, + dictWord{6, 11, 98}, + dictWord{7, 11, 585}, + dictWord{135, 11, 702}, + dictWord{ + 9, + 0, + 968, + }, + dictWord{15, 0, 192}, + dictWord{149, 0, 56}, + dictWord{4, 10, 252}, + dictWord{6, 11, 37}, + dictWord{7, 11, 299}, + dictWord{7, 10, 1068}, + dictWord{ + 7, + 11, + 1666, + }, + dictWord{8, 11, 195}, + dictWord{8, 11, 316}, + dictWord{9, 11, 178}, + dictWord{9, 11, 276}, + dictWord{9, 11, 339}, + dictWord{9, 11, 536}, + dictWord{ + 10, + 11, + 102, + }, + dictWord{10, 11, 362}, + dictWord{10, 10, 434}, + dictWord{10, 11, 785}, + dictWord{11, 11, 55}, + dictWord{11, 11, 149}, + dictWord{11, 10, 228}, + dictWord{ + 11, + 10, + 426, + }, + dictWord{11, 11, 773}, + dictWord{13, 10, 231}, + dictWord{13, 11, 416}, + dictWord{13, 11, 419}, + dictWord{14, 11, 38}, + dictWord{14, 11, 41}, + dictWord{14, 11, 210}, + dictWord{18, 10, 106}, + dictWord{148, 10, 87}, + dictWord{4, 0, 751}, + dictWord{11, 0, 390}, + dictWord{140, 0, 32}, + dictWord{4, 0, 409}, + dictWord{133, 0, 78}, + dictWord{11, 11, 458}, + dictWord{12, 11, 15}, + dictWord{140, 11, 432}, + dictWord{7, 0, 1602}, + dictWord{10, 0, 257}, + dictWord{10, 0, 698}, + dictWord{11, 0, 544}, + dictWord{11, 0, 585}, + dictWord{12, 0, 212}, + dictWord{13, 0, 307}, + dictWord{5, 10, 231}, + dictWord{7, 10, 601}, + dictWord{9, 10, 277}, + dictWord{ + 9, + 10, + 674, + }, + dictWord{10, 10, 178}, + dictWord{10, 10, 418}, + dictWord{10, 10, 509}, + dictWord{11, 10, 531}, + dictWord{12, 10, 113}, + dictWord{12, 10, 475}, + dictWord{13, 10, 99}, + dictWord{142, 10, 428}, + dictWord{6, 0, 473}, + dictWord{145, 0, 105}, + dictWord{6, 0, 1949}, + dictWord{15, 0, 156}, + dictWord{133, 11, 645}, + dictWord{7, 10, 1591}, + dictWord{144, 10, 43}, + dictWord{135, 0, 1779}, + dictWord{135, 10, 1683}, + dictWord{4, 11, 290}, + dictWord{135, 11, 1356}, + dictWord{134, 0, 763}, + dictWord{6, 11, 70}, + dictWord{7, 11, 1292}, + dictWord{10, 11, 762}, + dictWord{139, 11, 288}, + dictWord{142, 0, 29}, + dictWord{140, 11, 428}, + dictWord{7, 0, 883}, + dictWord{7, 11, 131}, + dictWord{7, 11, 422}, + dictWord{8, 11, 210}, + dictWord{140, 11, 573}, + dictWord{134, 0, 488}, + dictWord{4, 10, 399}, + dictWord{5, 10, 119}, + dictWord{5, 10, 494}, + dictWord{7, 10, 751}, + dictWord{137, 10, 556}, + dictWord{133, 0, 617}, + dictWord{132, 11, 936}, + dictWord{ + 139, + 0, + 50, + }, + dictWord{7, 0, 1518}, + dictWord{139, 0, 694}, + dictWord{137, 0, 785}, + dictWord{4, 0, 546}, + dictWord{135, 0, 2042}, + dictWord{7, 11, 716}, + dictWord{ + 13, + 11, + 97, + }, + dictWord{141, 11, 251}, + dictWord{132, 11, 653}, + dictWord{145, 0, 22}, + dictWord{134, 0, 1016}, + dictWord{4, 0, 313}, + dictWord{133, 0, 577}, + dictWord{ + 136, + 11, + 657, + }, + dictWord{8, 0, 184}, + dictWord{141, 0, 433}, + dictWord{135, 0, 935}, + dictWord{6, 0, 720}, + dictWord{9, 0, 114}, + dictWord{146, 11, 80}, + dictWord{ + 12, + 0, + 186, + }, + dictWord{12, 0, 292}, + dictWord{14, 0, 100}, + dictWord{18, 0, 70}, + dictWord{7, 10, 594}, + dictWord{7, 10, 851}, + dictWord{7, 10, 1858}, + dictWord{ + 9, + 10, + 411, + }, + dictWord{9, 10, 574}, + dictWord{9, 10, 666}, + dictWord{9, 10, 737}, + dictWord{10, 10, 346}, + dictWord{10, 10, 712}, + dictWord{11, 10, 246}, + dictWord{ + 11, + 10, + 432, + }, + dictWord{11, 10, 517}, + dictWord{11, 10, 647}, + dictWord{11, 10, 679}, + dictWord{11, 10, 727}, + dictWord{12, 10, 304}, + dictWord{12, 10, 305}, + dictWord{12, 10, 323}, + dictWord{12, 10, 483}, + dictWord{12, 10, 572}, + dictWord{12, 10, 593}, + dictWord{12, 10, 602}, + dictWord{13, 10, 95}, + dictWord{13, 10, 101}, + dictWord{13, 10, 171}, + dictWord{13, 10, 315}, + dictWord{13, 10, 378}, + dictWord{13, 10, 425}, + dictWord{13, 10, 475}, + dictWord{14, 10, 63}, + dictWord{ + 14, + 10, + 380, + }, + dictWord{14, 10, 384}, + dictWord{15, 10, 133}, + dictWord{18, 10, 112}, + dictWord{148, 10, 72}, + dictWord{135, 10, 1093}, + dictWord{135, 11, 1836}, + dictWord{132, 10, 679}, + dictWord{137, 10, 203}, + dictWord{11, 0, 402}, + dictWord{12, 0, 109}, + dictWord{12, 0, 431}, + dictWord{13, 0, 179}, + dictWord{13, 0, 206}, + dictWord{14, 0, 217}, + dictWord{16, 0, 3}, + dictWord{148, 0, 53}, + dictWord{7, 11, 1368}, + dictWord{8, 11, 232}, + dictWord{8, 11, 361}, + dictWord{10, 11, 682}, + dictWord{138, 11, 742}, + dictWord{137, 10, 714}, + dictWord{5, 0, 886}, + dictWord{6, 0, 46}, + dictWord{6, 0, 1790}, + dictWord{7, 0, 14}, + dictWord{7, 0, 732}, + dictWord{ + 7, + 0, + 1654, + }, + dictWord{8, 0, 95}, + dictWord{8, 0, 327}, + dictWord{8, 0, 616}, + dictWord{9, 0, 892}, + dictWord{10, 0, 598}, + dictWord{10, 0, 769}, + dictWord{11, 0, 134}, + dictWord{11, 0, 747}, + dictWord{12, 0, 378}, + dictWord{14, 0, 97}, + dictWord{137, 11, 534}, + dictWord{4, 0, 969}, + dictWord{136, 10, 825}, + dictWord{137, 11, 27}, + dictWord{6, 0, 727}, + dictWord{142, 11, 12}, + dictWord{133, 0, 1021}, + dictWord{134, 0, 1190}, + dictWord{134, 11, 1657}, + dictWord{5, 10, 143}, + dictWord{ + 5, + 10, + 769, + }, + dictWord{6, 10, 1760}, + dictWord{7, 10, 682}, + dictWord{7, 10, 1992}, + dictWord{136, 10, 736}, + dictWord{132, 0, 153}, + dictWord{135, 11, 127}, + dictWord{133, 0, 798}, + dictWord{132, 0, 587}, + dictWord{6, 0, 598}, + dictWord{7, 0, 42}, + dictWord{8, 0, 695}, + dictWord{10, 0, 212}, + dictWord{11, 0, 158}, + dictWord{ + 14, + 0, + 196, + }, + dictWord{145, 0, 85}, + dictWord{133, 10, 860}, + dictWord{6, 0, 1929}, + dictWord{134, 0, 1933}, + dictWord{5, 0, 957}, + dictWord{5, 0, 1008}, + dictWord{ + 9, + 0, + 577, + }, + dictWord{12, 0, 141}, + dictWord{6, 10, 422}, + dictWord{7, 10, 0}, + dictWord{7, 10, 1544}, + dictWord{8, 11, 364}, + dictWord{11, 10, 990}, + dictWord{ + 12, + 10, + 453, + }, + dictWord{13, 10, 47}, + dictWord{141, 10, 266}, + dictWord{134, 0, 1319}, + dictWord{4, 0, 129}, + dictWord{135, 0, 465}, + dictWord{7, 0, 470}, + dictWord{ + 7, + 0, + 1057, + }, + dictWord{7, 0, 1201}, + dictWord{9, 0, 755}, + dictWord{11, 0, 906}, + dictWord{140, 0, 527}, + dictWord{7, 0, 908}, + dictWord{146, 0, 7}, + dictWord{5, 0, 148}, + dictWord{136, 0, 450}, + dictWord{5, 10, 515}, + dictWord{137, 10, 131}, + dictWord{7, 10, 1605}, + dictWord{11, 10, 962}, + dictWord{146, 10, 139}, + dictWord{ + 132, + 10, + 646, + }, + dictWord{134, 0, 1166}, + dictWord{4, 10, 396}, + dictWord{7, 10, 728}, + dictWord{9, 10, 117}, + dictWord{13, 10, 202}, + dictWord{148, 10, 51}, + dictWord{ + 6, + 10, + 121, + }, + dictWord{6, 10, 124}, + dictWord{6, 10, 357}, + dictWord{7, 10, 1138}, + dictWord{7, 10, 1295}, + dictWord{8, 10, 162}, + dictWord{139, 10, 655}, + dictWord{14, 0, 374}, + dictWord{142, 11, 374}, + dictWord{138, 0, 253}, + dictWord{139, 0, 1003}, + dictWord{5, 11, 909}, + dictWord{9, 11, 849}, + dictWord{ + 138, + 11, + 805, + }, + dictWord{133, 10, 237}, + dictWord{7, 11, 525}, + dictWord{7, 11, 1579}, + dictWord{8, 11, 497}, + dictWord{136, 11, 573}, + dictWord{137, 0, 46}, + dictWord{ + 132, + 0, + 879, + }, + dictWord{134, 0, 806}, + dictWord{135, 0, 1868}, + dictWord{6, 0, 1837}, + dictWord{134, 0, 1846}, + dictWord{6, 0, 730}, + dictWord{134, 0, 881}, + dictWord{7, 0, 965}, + dictWord{7, 0, 1460}, + dictWord{7, 0, 1604}, + dictWord{7, 11, 193}, + dictWord{7, 11, 397}, + dictWord{7, 11, 1105}, + dictWord{8, 11, 124}, + dictWord{ + 8, + 11, + 619, + }, + dictWord{9, 11, 305}, + dictWord{10, 11, 264}, + dictWord{11, 11, 40}, + dictWord{12, 11, 349}, + dictWord{13, 11, 134}, + dictWord{13, 11, 295}, + dictWord{14, 11, 155}, + dictWord{15, 11, 120}, + dictWord{146, 11, 105}, + dictWord{136, 0, 506}, + dictWord{143, 0, 10}, + dictWord{4, 11, 262}, + dictWord{7, 11, 342}, + dictWord{7, 10, 571}, + dictWord{7, 10, 1877}, + dictWord{10, 10, 366}, + dictWord{141, 11, 23}, + dictWord{133, 11, 641}, + dictWord{10, 0, 22}, + dictWord{9, 10, 513}, + dictWord{10, 10, 39}, + dictWord{12, 10, 122}, + dictWord{140, 10, 187}, + dictWord{135, 11, 1431}, + dictWord{150, 11, 49}, + dictWord{4, 11, 99}, + dictWord{ + 6, + 11, + 250, + }, + dictWord{6, 11, 346}, + dictWord{8, 11, 127}, + dictWord{138, 11, 81}, + dictWord{6, 0, 2014}, + dictWord{8, 0, 928}, + dictWord{10, 0, 960}, + dictWord{10, 0, 979}, + dictWord{140, 0, 996}, + dictWord{134, 0, 296}, + dictWord{132, 11, 915}, + dictWord{5, 11, 75}, + dictWord{9, 11, 517}, + dictWord{10, 11, 470}, + dictWord{ + 12, + 11, + 155, + }, + dictWord{141, 11, 224}, + dictWord{137, 10, 873}, + dictWord{4, 0, 854}, + dictWord{140, 11, 18}, + dictWord{134, 0, 587}, + dictWord{7, 10, 107}, + dictWord{ + 7, + 10, + 838, + }, + dictWord{8, 10, 550}, + dictWord{138, 10, 401}, + dictWord{11, 0, 636}, + dictWord{15, 0, 145}, + dictWord{17, 0, 34}, + dictWord{19, 0, 50}, + dictWord{ + 23, + 0, + 20, + }, + dictWord{11, 10, 588}, + dictWord{11, 10, 864}, + dictWord{11, 10, 968}, + dictWord{143, 10, 160}, + dictWord{135, 11, 216}, + dictWord{7, 0, 982}, + dictWord{ + 10, + 0, + 32, + }, + dictWord{143, 0, 56}, + dictWord{133, 10, 768}, + dictWord{133, 11, 954}, + dictWord{6, 11, 304}, + dictWord{7, 11, 1114}, + dictWord{8, 11, 418}, + dictWord{ + 10, + 11, + 345, + }, + dictWord{11, 11, 341}, + dictWord{11, 11, 675}, + dictWord{141, 11, 40}, + dictWord{9, 11, 410}, + dictWord{139, 11, 425}, + dictWord{136, 0, 941}, + dictWord{5, 0, 435}, + dictWord{132, 10, 894}, + dictWord{5, 0, 85}, + dictWord{6, 0, 419}, + dictWord{7, 0, 134}, + dictWord{7, 0, 305}, + dictWord{7, 0, 361}, + dictWord{ + 7, + 0, + 1337, + }, + dictWord{8, 0, 71}, + dictWord{140, 0, 519}, + dictWord{140, 0, 688}, + dictWord{135, 0, 740}, + dictWord{5, 0, 691}, + dictWord{7, 0, 345}, + dictWord{9, 0, 94}, + dictWord{140, 0, 169}, + dictWord{5, 0, 183}, + dictWord{6, 0, 582}, + dictWord{10, 0, 679}, + dictWord{140, 0, 435}, + dictWord{134, 11, 14}, + dictWord{6, 0, 945}, + dictWord{135, 0, 511}, + dictWord{134, 11, 1708}, + dictWord{5, 11, 113}, + dictWord{6, 11, 243}, + dictWord{7, 11, 1865}, + dictWord{11, 11, 161}, + dictWord{16, 11, 37}, + dictWord{145, 11, 99}, + dictWord{132, 11, 274}, + dictWord{137, 0, 539}, + dictWord{7, 0, 1993}, + dictWord{8, 0, 684}, + dictWord{134, 10, 272}, + dictWord{ + 6, + 0, + 659, + }, + dictWord{134, 0, 982}, + dictWord{4, 10, 9}, + dictWord{5, 10, 128}, + dictWord{7, 10, 368}, + dictWord{11, 10, 480}, + dictWord{148, 10, 3}, + dictWord{ + 134, + 0, + 583, + }, + dictWord{132, 0, 803}, + dictWord{133, 0, 704}, + dictWord{4, 0, 179}, + dictWord{5, 0, 198}, + dictWord{133, 0, 697}, + dictWord{7, 0, 347}, + dictWord{7, 0, 971}, + dictWord{8, 0, 181}, + dictWord{10, 0, 711}, + dictWord{135, 11, 166}, + dictWord{136, 10, 682}, + dictWord{4, 10, 2}, + dictWord{7, 10, 545}, + dictWord{7, 10, 894}, + dictWord{136, 11, 521}, + dictWord{135, 0, 481}, + dictWord{132, 0, 243}, + dictWord{5, 0, 203}, + dictWord{7, 0, 19}, + dictWord{7, 0, 71}, + dictWord{7, 0, 113}, + dictWord{ + 10, + 0, + 405, + }, + dictWord{11, 0, 357}, + dictWord{142, 0, 240}, + dictWord{5, 11, 725}, + dictWord{5, 11, 727}, + dictWord{135, 11, 1811}, + dictWord{6, 0, 826}, + dictWord{ + 137, + 11, + 304, + }, + dictWord{7, 0, 1450}, + dictWord{139, 0, 99}, + dictWord{133, 11, 654}, + dictWord{134, 0, 492}, + dictWord{5, 0, 134}, + dictWord{6, 0, 408}, + dictWord{ + 6, + 0, + 495, + }, + dictWord{7, 0, 1593}, + dictWord{6, 11, 273}, + dictWord{10, 11, 188}, + dictWord{13, 11, 377}, + dictWord{146, 11, 77}, + dictWord{9, 10, 769}, + dictWord{ + 140, + 10, + 185, + }, + dictWord{135, 11, 410}, + dictWord{142, 0, 4}, + dictWord{4, 0, 665}, + dictWord{134, 11, 1785}, + dictWord{4, 0, 248}, + dictWord{7, 0, 137}, + dictWord{ + 137, + 0, + 349, + }, + dictWord{5, 10, 530}, + dictWord{142, 10, 113}, + dictWord{7, 0, 1270}, + dictWord{139, 0, 612}, + dictWord{132, 11, 780}, + dictWord{5, 0, 371}, + dictWord{135, 0, 563}, + dictWord{135, 0, 826}, + dictWord{6, 0, 1535}, + dictWord{23, 0, 21}, + dictWord{151, 0, 23}, + dictWord{4, 0, 374}, + dictWord{7, 0, 547}, + dictWord{ + 7, + 0, + 1700, + }, + dictWord{7, 0, 1833}, + dictWord{139, 0, 858}, + dictWord{133, 10, 556}, + dictWord{7, 11, 612}, + dictWord{8, 11, 545}, + dictWord{8, 11, 568}, + dictWord{ + 8, + 11, + 642, + }, + dictWord{9, 11, 717}, + dictWord{10, 11, 541}, + dictWord{10, 11, 763}, + dictWord{11, 11, 449}, + dictWord{12, 11, 489}, + dictWord{13, 11, 153}, + dictWord{ + 13, + 11, + 296, + }, + dictWord{14, 11, 138}, + dictWord{14, 11, 392}, + dictWord{15, 11, 50}, + dictWord{16, 11, 6}, + dictWord{16, 11, 12}, + dictWord{148, 11, 9}, + dictWord{ + 9, + 0, + 311, + }, + dictWord{141, 0, 42}, + dictWord{8, 10, 16}, + dictWord{140, 10, 568}, + dictWord{6, 0, 1968}, + dictWord{6, 0, 2027}, + dictWord{138, 0, 991}, + dictWord{ + 6, + 0, + 1647, + }, + dictWord{7, 0, 1552}, + dictWord{7, 0, 2010}, + dictWord{9, 0, 494}, + dictWord{137, 0, 509}, + dictWord{133, 11, 948}, + dictWord{6, 10, 186}, + dictWord{ + 137, + 10, + 426, + }, + dictWord{134, 0, 769}, + dictWord{134, 0, 642}, + dictWord{132, 10, 585}, + dictWord{6, 0, 123}, + dictWord{7, 0, 214}, + dictWord{9, 0, 728}, + dictWord{ + 10, + 0, + 157, + }, + dictWord{11, 0, 346}, + dictWord{11, 0, 662}, + dictWord{143, 0, 106}, + dictWord{142, 11, 381}, + dictWord{135, 0, 1435}, + dictWord{4, 11, 532}, + dictWord{ + 5, + 11, + 706, + }, + dictWord{135, 11, 662}, + dictWord{5, 11, 837}, + dictWord{134, 11, 1651}, + dictWord{4, 10, 93}, + dictWord{5, 10, 252}, + dictWord{6, 10, 229}, + dictWord{ + 7, + 10, + 291, + }, + dictWord{9, 10, 550}, + dictWord{139, 10, 644}, + dictWord{148, 0, 79}, + dictWord{137, 10, 749}, + dictWord{134, 0, 1425}, + dictWord{ + 137, + 10, + 162, + }, + dictWord{4, 11, 362}, + dictWord{7, 11, 52}, + dictWord{7, 11, 303}, + dictWord{140, 11, 166}, + dictWord{132, 10, 381}, + dictWord{4, 11, 330}, + dictWord{ + 7, + 11, + 933, + }, + dictWord{7, 11, 2012}, + dictWord{136, 11, 292}, + dictWord{135, 11, 767}, + dictWord{4, 0, 707}, + dictWord{5, 0, 588}, + dictWord{6, 0, 393}, + dictWord{ + 13, + 0, + 106, + }, + dictWord{18, 0, 49}, + dictWord{147, 0, 41}, + dictWord{6, 0, 211}, + dictWord{7, 0, 1690}, + dictWord{11, 0, 486}, + dictWord{140, 0, 369}, + dictWord{ + 137, + 11, + 883, + }, + dictWord{4, 11, 703}, + dictWord{135, 11, 207}, + dictWord{4, 0, 187}, + dictWord{5, 0, 184}, + dictWord{5, 0, 690}, + dictWord{7, 0, 1869}, + dictWord{10, 0, 756}, + dictWord{139, 0, 783}, + dictWord{132, 11, 571}, + dictWord{134, 0, 1382}, + dictWord{5, 0, 175}, + dictWord{6, 10, 77}, + dictWord{6, 10, 157}, + dictWord{7, 10, 974}, + dictWord{7, 10, 1301}, + dictWord{7, 10, 1339}, + dictWord{7, 10, 1490}, + dictWord{7, 10, 1873}, + dictWord{137, 10, 628}, + dictWord{134, 0, 1493}, + dictWord{ + 5, + 11, + 873, + }, + dictWord{133, 11, 960}, + dictWord{134, 0, 1007}, + dictWord{12, 11, 93}, + dictWord{12, 11, 501}, + dictWord{13, 11, 362}, + dictWord{14, 11, 151}, + dictWord{15, 11, 40}, + dictWord{15, 11, 59}, + dictWord{16, 11, 46}, + dictWord{17, 11, 25}, + dictWord{18, 11, 14}, + dictWord{18, 11, 134}, + dictWord{19, 11, 25}, + dictWord{ + 19, + 11, + 69, + }, + dictWord{20, 11, 16}, + dictWord{20, 11, 19}, + dictWord{20, 11, 66}, + dictWord{21, 11, 23}, + dictWord{21, 11, 25}, + dictWord{150, 11, 42}, + dictWord{ + 11, + 10, + 919, + }, + dictWord{141, 10, 409}, + dictWord{134, 0, 219}, + dictWord{5, 0, 582}, + dictWord{6, 0, 1646}, + dictWord{7, 0, 99}, + dictWord{7, 0, 1962}, + dictWord{ + 7, + 0, + 1986, + }, + dictWord{8, 0, 515}, + dictWord{8, 0, 773}, + dictWord{9, 0, 23}, + dictWord{9, 0, 491}, + dictWord{12, 0, 620}, + dictWord{142, 0, 93}, + dictWord{133, 0, 851}, + dictWord{5, 11, 33}, + dictWord{134, 11, 470}, + dictWord{135, 11, 1291}, + dictWord{134, 0, 1278}, + dictWord{135, 11, 1882}, + dictWord{135, 10, 1489}, + dictWord{132, 0, 1000}, + dictWord{138, 0, 982}, + dictWord{8, 0, 762}, + dictWord{8, 0, 812}, + dictWord{137, 0, 910}, + dictWord{6, 11, 47}, + dictWord{7, 11, 90}, + dictWord{ + 7, + 11, + 664, + }, + dictWord{7, 11, 830}, + dictWord{7, 11, 1380}, + dictWord{7, 11, 2025}, + dictWord{8, 11, 448}, + dictWord{136, 11, 828}, + dictWord{4, 0, 98}, + dictWord{ + 4, + 0, + 940, + }, + dictWord{6, 0, 1819}, + dictWord{6, 0, 1834}, + dictWord{6, 0, 1841}, + dictWord{7, 0, 1365}, + dictWord{8, 0, 859}, + dictWord{8, 0, 897}, + dictWord{8, 0, 918}, + dictWord{9, 0, 422}, + dictWord{9, 0, 670}, + dictWord{10, 0, 775}, + dictWord{10, 0, 894}, + dictWord{10, 0, 909}, + dictWord{10, 0, 910}, + dictWord{10, 0, 935}, + dictWord{ + 11, + 0, + 210, + }, + dictWord{12, 0, 750}, + dictWord{12, 0, 755}, + dictWord{13, 0, 26}, + dictWord{13, 0, 457}, + dictWord{13, 0, 476}, + dictWord{16, 0, 100}, + dictWord{16, 0, 109}, + dictWord{18, 0, 173}, + dictWord{18, 0, 175}, + dictWord{8, 10, 398}, + dictWord{9, 10, 681}, + dictWord{139, 10, 632}, + dictWord{9, 11, 417}, + dictWord{ + 137, + 11, + 493, + }, + dictWord{136, 10, 645}, + dictWord{138, 0, 906}, + dictWord{134, 0, 1730}, + dictWord{134, 10, 20}, + dictWord{133, 11, 1019}, + dictWord{134, 0, 1185}, + dictWord{10, 0, 40}, + dictWord{136, 10, 769}, + dictWord{9, 0, 147}, + dictWord{134, 11, 208}, + dictWord{140, 0, 650}, + dictWord{5, 0, 209}, + dictWord{6, 0, 30}, + dictWord{11, 0, 56}, + dictWord{139, 0, 305}, + dictWord{132, 0, 553}, + dictWord{138, 11, 344}, + dictWord{6, 11, 68}, + dictWord{7, 11, 398}, + dictWord{7, 11, 448}, + dictWord{ + 7, + 11, + 1629, + }, + dictWord{7, 11, 1813}, + dictWord{8, 11, 387}, + dictWord{8, 11, 442}, + dictWord{9, 11, 710}, + dictWord{10, 11, 282}, + dictWord{138, 11, 722}, + dictWord{5, 0, 597}, + dictWord{14, 0, 20}, + dictWord{142, 11, 20}, + dictWord{135, 0, 1614}, + dictWord{135, 10, 1757}, + dictWord{4, 0, 150}, + dictWord{5, 0, 303}, + dictWord{6, 0, 327}, + dictWord{135, 10, 937}, + dictWord{16, 0, 49}, + dictWord{7, 10, 1652}, + dictWord{144, 11, 49}, + dictWord{8, 0, 192}, + dictWord{10, 0, 78}, + dictWord{ + 141, + 0, + 359, + }, + dictWord{135, 0, 786}, + dictWord{143, 0, 134}, + dictWord{6, 0, 1638}, + dictWord{7, 0, 79}, + dictWord{7, 0, 496}, + dictWord{9, 0, 138}, + dictWord{ + 10, + 0, + 336, + }, + dictWord{11, 0, 12}, + dictWord{12, 0, 412}, + dictWord{12, 0, 440}, + dictWord{142, 0, 305}, + dictWord{136, 11, 491}, + dictWord{4, 10, 579}, + dictWord{ + 5, + 10, + 226, + }, + dictWord{5, 10, 323}, + dictWord{135, 10, 960}, + dictWord{7, 0, 204}, + dictWord{7, 0, 415}, + dictWord{8, 0, 42}, + dictWord{10, 0, 85}, + dictWord{139, 0, 564}, + dictWord{132, 0, 614}, + dictWord{4, 11, 403}, + dictWord{5, 11, 441}, + dictWord{7, 11, 450}, + dictWord{11, 11, 101}, + dictWord{12, 11, 193}, + dictWord{141, 11, 430}, + dictWord{135, 11, 1927}, + dictWord{135, 11, 1330}, + dictWord{4, 0, 3}, + dictWord{5, 0, 247}, + dictWord{5, 0, 644}, + dictWord{7, 0, 744}, + dictWord{7, 0, 1207}, + dictWord{7, 0, 1225}, + dictWord{7, 0, 1909}, + dictWord{146, 0, 147}, + dictWord{136, 0, 942}, + dictWord{4, 0, 1019}, + dictWord{134, 0, 2023}, + dictWord{5, 11, 679}, + dictWord{133, 10, 973}, + dictWord{5, 0, 285}, + dictWord{9, 0, 67}, + dictWord{13, 0, 473}, + dictWord{143, 0, 82}, + dictWord{7, 11, 328}, + dictWord{137, 11, 326}, + dictWord{151, 0, 8}, + dictWord{6, 10, 135}, + dictWord{135, 10, 1176}, + dictWord{135, 11, 1128}, + dictWord{134, 0, 1309}, + dictWord{135, 11, 1796}, + dictWord{ + 135, + 10, + 314, + }, + dictWord{4, 11, 574}, + dictWord{7, 11, 350}, + dictWord{7, 11, 1024}, + dictWord{8, 11, 338}, + dictWord{9, 11, 677}, + dictWord{10, 11, 808}, + dictWord{ + 139, + 11, + 508, + }, + dictWord{7, 11, 818}, + dictWord{17, 11, 14}, + dictWord{17, 11, 45}, + dictWord{18, 11, 75}, + dictWord{148, 11, 18}, + dictWord{146, 10, 4}, + dictWord{ + 135, + 11, + 1081, + }, + dictWord{4, 0, 29}, + dictWord{6, 0, 532}, + dictWord{7, 0, 1628}, + dictWord{7, 0, 1648}, + dictWord{9, 0, 350}, + dictWord{10, 0, 433}, + dictWord{11, 0, 97}, + dictWord{11, 0, 557}, + dictWord{11, 0, 745}, + dictWord{12, 0, 289}, + dictWord{12, 0, 335}, + dictWord{12, 0, 348}, + dictWord{12, 0, 606}, + dictWord{13, 0, 116}, + dictWord{13, 0, 233}, + dictWord{13, 0, 466}, + dictWord{14, 0, 181}, + dictWord{14, 0, 209}, + dictWord{14, 0, 232}, + dictWord{14, 0, 236}, + dictWord{14, 0, 300}, + dictWord{ + 16, + 0, + 41, + }, + dictWord{148, 0, 97}, + dictWord{7, 0, 318}, + dictWord{6, 10, 281}, + dictWord{8, 10, 282}, + dictWord{8, 10, 480}, + dictWord{8, 10, 499}, + dictWord{9, 10, 198}, + dictWord{10, 10, 143}, + dictWord{10, 10, 169}, + dictWord{10, 10, 211}, + dictWord{10, 10, 417}, + dictWord{10, 10, 574}, + dictWord{11, 10, 147}, + dictWord{ + 11, + 10, + 395, + }, + dictWord{12, 10, 75}, + dictWord{12, 10, 407}, + dictWord{12, 10, 608}, + dictWord{13, 10, 500}, + dictWord{142, 10, 251}, + dictWord{135, 11, 1676}, + dictWord{135, 11, 2037}, + dictWord{135, 0, 1692}, + dictWord{5, 0, 501}, + dictWord{7, 0, 1704}, + dictWord{9, 0, 553}, + dictWord{11, 0, 520}, + dictWord{12, 0, 557}, + dictWord{141, 0, 249}, + dictWord{6, 0, 1527}, + dictWord{14, 0, 324}, + dictWord{15, 0, 55}, + dictWord{15, 0, 80}, + dictWord{14, 11, 324}, + dictWord{15, 11, 55}, + dictWord{143, 11, 80}, + dictWord{135, 10, 1776}, + dictWord{8, 0, 988}, + dictWord{137, 11, 297}, + dictWord{132, 10, 419}, + dictWord{142, 0, 223}, + dictWord{ + 139, + 11, + 234, + }, + dictWord{7, 0, 1123}, + dictWord{12, 0, 508}, + dictWord{14, 0, 102}, + dictWord{14, 0, 226}, + dictWord{144, 0, 57}, + dictWord{4, 10, 138}, + dictWord{ + 7, + 10, + 1012, + }, + dictWord{7, 10, 1280}, + dictWord{137, 10, 76}, + dictWord{7, 0, 1764}, + dictWord{5, 10, 29}, + dictWord{140, 10, 638}, + dictWord{134, 0, 2015}, + dictWord{134, 0, 1599}, + dictWord{138, 11, 56}, + dictWord{6, 11, 306}, + dictWord{7, 11, 1140}, + dictWord{7, 11, 1340}, + dictWord{8, 11, 133}, + dictWord{ + 138, + 11, + 449, + }, + dictWord{139, 11, 1011}, + dictWord{6, 10, 1710}, + dictWord{135, 10, 2038}, + dictWord{7, 11, 1763}, + dictWord{140, 11, 310}, + dictWord{6, 0, 129}, + dictWord{4, 10, 17}, + dictWord{5, 10, 23}, + dictWord{7, 10, 995}, + dictWord{11, 10, 383}, + dictWord{11, 10, 437}, + dictWord{12, 10, 460}, + dictWord{140, 10, 532}, + dictWord{5, 11, 329}, + dictWord{136, 11, 260}, + dictWord{133, 10, 862}, + dictWord{132, 0, 534}, + dictWord{6, 0, 811}, + dictWord{135, 0, 626}, + dictWord{ + 132, + 11, + 657, + }, + dictWord{4, 0, 25}, + dictWord{5, 0, 60}, + dictWord{6, 0, 504}, + dictWord{7, 0, 614}, + dictWord{7, 0, 1155}, + dictWord{12, 0, 0}, + dictWord{152, 11, 7}, + dictWord{ + 7, + 0, + 1248, + }, + dictWord{11, 0, 621}, + dictWord{139, 0, 702}, + dictWord{137, 0, 321}, + dictWord{8, 10, 70}, + dictWord{12, 10, 171}, + dictWord{141, 10, 272}, + dictWord{ + 10, + 10, + 233, + }, + dictWord{139, 10, 76}, + dictWord{4, 0, 379}, + dictWord{7, 0, 1397}, + dictWord{134, 10, 442}, + dictWord{5, 11, 66}, + dictWord{7, 11, 1896}, + dictWord{ + 136, + 11, + 288, + }, + dictWord{134, 11, 1643}, + dictWord{134, 10, 1709}, + dictWord{4, 11, 21}, + dictWord{5, 11, 91}, + dictWord{5, 11, 570}, + dictWord{5, 11, 648}, + dictWord{5, 11, 750}, + dictWord{5, 11, 781}, + dictWord{6, 11, 54}, + dictWord{6, 11, 112}, + dictWord{6, 11, 402}, + dictWord{6, 11, 1732}, + dictWord{7, 11, 315}, + dictWord{ + 7, + 11, + 749, + }, + dictWord{7, 11, 1347}, + dictWord{7, 11, 1900}, + dictWord{9, 11, 78}, + dictWord{9, 11, 508}, + dictWord{10, 11, 611}, + dictWord{11, 11, 510}, + dictWord{ + 11, + 11, + 728, + }, + dictWord{13, 11, 36}, + dictWord{14, 11, 39}, + dictWord{16, 11, 83}, + dictWord{17, 11, 124}, + dictWord{148, 11, 30}, + dictWord{4, 0, 118}, + dictWord{ + 6, + 0, + 274, + }, + dictWord{6, 0, 361}, + dictWord{7, 0, 75}, + dictWord{141, 0, 441}, + dictWord{10, 11, 322}, + dictWord{10, 11, 719}, + dictWord{139, 11, 407}, + dictWord{ + 147, + 10, + 119, + }, + dictWord{12, 11, 549}, + dictWord{14, 11, 67}, + dictWord{147, 11, 60}, + dictWord{11, 10, 69}, + dictWord{12, 10, 105}, + dictWord{12, 10, 117}, + dictWord{13, 10, 213}, + dictWord{14, 10, 13}, + dictWord{14, 10, 62}, + dictWord{14, 10, 177}, + dictWord{14, 10, 421}, + dictWord{15, 10, 19}, + dictWord{146, 10, 141}, + dictWord{9, 0, 841}, + dictWord{137, 10, 309}, + dictWord{7, 10, 608}, + dictWord{7, 10, 976}, + dictWord{8, 11, 125}, + dictWord{8, 11, 369}, + dictWord{8, 11, 524}, + dictWord{9, 10, 146}, + dictWord{10, 10, 206}, + dictWord{10, 11, 486}, + dictWord{10, 10, 596}, + dictWord{11, 11, 13}, + dictWord{11, 11, 381}, + dictWord{11, 11, 736}, + dictWord{11, 11, 766}, + dictWord{11, 11, 845}, + dictWord{13, 11, 114}, + dictWord{13, 10, 218}, + dictWord{13, 11, 292}, + dictWord{14, 11, 47}, + dictWord{ + 142, + 10, + 153, + }, + dictWord{12, 0, 693}, + dictWord{135, 11, 759}, + dictWord{5, 0, 314}, + dictWord{6, 0, 221}, + dictWord{7, 0, 419}, + dictWord{10, 0, 650}, + dictWord{11, 0, 396}, + dictWord{12, 0, 156}, + dictWord{13, 0, 369}, + dictWord{14, 0, 333}, + dictWord{145, 0, 47}, + dictWord{6, 11, 1684}, + dictWord{6, 11, 1731}, + dictWord{7, 11, 356}, + dictWord{7, 11, 1932}, + dictWord{8, 11, 54}, + dictWord{8, 11, 221}, + dictWord{9, 11, 225}, + dictWord{9, 11, 356}, + dictWord{10, 11, 77}, + dictWord{10, 11, 446}, + dictWord{10, 11, 731}, + dictWord{12, 11, 404}, + dictWord{141, 11, 491}, + dictWord{132, 11, 375}, + dictWord{4, 10, 518}, + dictWord{135, 10, 1136}, + dictWord{ + 4, + 0, + 913, + }, + dictWord{4, 11, 411}, + dictWord{11, 11, 643}, + dictWord{140, 11, 115}, + dictWord{4, 11, 80}, + dictWord{133, 11, 44}, + dictWord{8, 10, 689}, + dictWord{ + 137, + 10, + 863, + }, + dictWord{138, 0, 880}, + dictWord{4, 10, 18}, + dictWord{7, 10, 145}, + dictWord{7, 10, 444}, + dictWord{7, 10, 1278}, + dictWord{8, 10, 49}, + dictWord{ + 8, + 10, + 400, + }, + dictWord{9, 10, 71}, + dictWord{9, 10, 250}, + dictWord{10, 10, 459}, + dictWord{12, 10, 160}, + dictWord{144, 10, 24}, + dictWord{136, 0, 475}, + dictWord{ + 5, + 0, + 1016, + }, + dictWord{5, 11, 299}, + dictWord{135, 11, 1083}, + dictWord{7, 0, 602}, + dictWord{8, 0, 179}, + dictWord{10, 0, 781}, + dictWord{140, 0, 126}, + dictWord{ + 6, + 0, + 329, + }, + dictWord{138, 0, 111}, + dictWord{135, 0, 1864}, + dictWord{4, 11, 219}, + dictWord{7, 11, 1761}, + dictWord{137, 11, 86}, + dictWord{6, 0, 1888}, + dictWord{ + 6, + 0, + 1892, + }, + dictWord{6, 0, 1901}, + dictWord{6, 0, 1904}, + dictWord{9, 0, 953}, + dictWord{9, 0, 985}, + dictWord{9, 0, 991}, + dictWord{9, 0, 1001}, + dictWord{12, 0, 818}, + dictWord{12, 0, 846}, + dictWord{12, 0, 847}, + dictWord{12, 0, 861}, + dictWord{12, 0, 862}, + dictWord{12, 0, 873}, + dictWord{12, 0, 875}, + dictWord{12, 0, 877}, + dictWord{12, 0, 879}, + dictWord{12, 0, 881}, + dictWord{12, 0, 884}, + dictWord{12, 0, 903}, + dictWord{12, 0, 915}, + dictWord{12, 0, 926}, + dictWord{12, 0, 939}, + dictWord{ + 15, + 0, + 182, + }, + dictWord{15, 0, 219}, + dictWord{15, 0, 255}, + dictWord{18, 0, 191}, + dictWord{18, 0, 209}, + dictWord{18, 0, 211}, + dictWord{149, 0, 41}, + dictWord{ + 5, + 11, + 328, + }, + dictWord{135, 11, 918}, + dictWord{137, 0, 780}, + dictWord{12, 0, 82}, + dictWord{143, 0, 36}, + dictWord{133, 10, 1010}, + dictWord{5, 0, 821}, + dictWord{ + 134, + 0, + 1687, + }, + dictWord{133, 11, 514}, + dictWord{132, 0, 956}, + dictWord{134, 0, 1180}, + dictWord{10, 0, 112}, + dictWord{5, 10, 87}, + dictWord{7, 10, 313}, + dictWord{ + 7, + 10, + 1103, + }, + dictWord{10, 10, 582}, + dictWord{11, 10, 389}, + dictWord{11, 10, 813}, + dictWord{12, 10, 385}, + dictWord{13, 10, 286}, + dictWord{14, 10, 124}, + dictWord{146, 10, 108}, + dictWord{5, 0, 71}, + dictWord{7, 0, 1407}, + dictWord{9, 0, 704}, + dictWord{10, 0, 261}, + dictWord{10, 0, 619}, + dictWord{11, 0, 547}, + dictWord{11, 0, 619}, + dictWord{143, 0, 157}, + dictWord{4, 0, 531}, + dictWord{5, 0, 455}, + dictWord{5, 11, 301}, + dictWord{6, 11, 571}, + dictWord{14, 11, 49}, + dictWord{ + 146, + 11, + 102, + }, + dictWord{132, 10, 267}, + dictWord{6, 0, 385}, + dictWord{7, 0, 2008}, + dictWord{9, 0, 337}, + dictWord{138, 0, 517}, + dictWord{133, 11, 726}, + dictWord{133, 11, 364}, + dictWord{4, 11, 76}, + dictWord{7, 11, 1550}, + dictWord{9, 11, 306}, + dictWord{9, 11, 430}, + dictWord{9, 11, 663}, + dictWord{10, 11, 683}, + dictWord{11, 11, 427}, + dictWord{11, 11, 753}, + dictWord{12, 11, 334}, + dictWord{12, 11, 442}, + dictWord{14, 11, 258}, + dictWord{14, 11, 366}, + dictWord{ + 143, + 11, + 131, + }, + dictWord{6, 0, 1865}, + dictWord{6, 0, 1879}, + dictWord{6, 0, 1881}, + dictWord{6, 0, 1894}, + dictWord{6, 0, 1908}, + dictWord{9, 0, 915}, + dictWord{9, 0, 926}, + dictWord{9, 0, 940}, + dictWord{9, 0, 943}, + dictWord{9, 0, 966}, + dictWord{9, 0, 980}, + dictWord{9, 0, 989}, + dictWord{9, 0, 1005}, + dictWord{9, 0, 1010}, + dictWord{ + 12, + 0, + 813, + }, + dictWord{12, 0, 817}, + dictWord{12, 0, 840}, + dictWord{12, 0, 843}, + dictWord{12, 0, 855}, + dictWord{12, 0, 864}, + dictWord{12, 0, 871}, + dictWord{12, 0, 872}, + dictWord{12, 0, 899}, + dictWord{12, 0, 905}, + dictWord{12, 0, 924}, + dictWord{15, 0, 171}, + dictWord{15, 0, 181}, + dictWord{15, 0, 224}, + dictWord{15, 0, 235}, + dictWord{15, 0, 251}, + dictWord{146, 0, 184}, + dictWord{137, 11, 52}, + dictWord{5, 0, 16}, + dictWord{6, 0, 86}, + dictWord{6, 0, 603}, + dictWord{7, 0, 292}, + dictWord{7, 0, 561}, + dictWord{8, 0, 257}, + dictWord{8, 0, 382}, + dictWord{9, 0, 721}, + dictWord{9, 0, 778}, + dictWord{11, 0, 581}, + dictWord{140, 0, 466}, + dictWord{4, 0, 486}, + dictWord{ + 5, + 0, + 491, + }, + dictWord{135, 10, 1121}, + dictWord{4, 0, 72}, + dictWord{6, 0, 265}, + dictWord{135, 0, 1300}, + dictWord{135, 11, 1183}, + dictWord{10, 10, 249}, + dictWord{139, 10, 209}, + dictWord{132, 10, 561}, + dictWord{137, 11, 519}, + dictWord{4, 11, 656}, + dictWord{4, 10, 760}, + dictWord{135, 11, 779}, + dictWord{ + 9, + 10, + 154, + }, + dictWord{140, 10, 485}, + dictWord{135, 11, 1793}, + dictWord{135, 11, 144}, + dictWord{136, 10, 255}, + dictWord{133, 0, 621}, + dictWord{4, 10, 368}, + dictWord{135, 10, 641}, + dictWord{135, 11, 1373}, + dictWord{7, 11, 554}, + dictWord{7, 11, 605}, + dictWord{141, 11, 10}, + dictWord{137, 0, 234}, + dictWord{ + 5, + 0, + 815, + }, + dictWord{6, 0, 1688}, + dictWord{134, 0, 1755}, + dictWord{5, 11, 838}, + dictWord{5, 11, 841}, + dictWord{134, 11, 1649}, + dictWord{7, 0, 1987}, + dictWord{ + 7, + 0, + 2040, + }, + dictWord{136, 0, 743}, + dictWord{133, 11, 1012}, + dictWord{6, 0, 197}, + dictWord{136, 0, 205}, + dictWord{6, 0, 314}, + dictWord{134, 11, 314}, + dictWord{144, 11, 53}, + dictWord{6, 11, 251}, + dictWord{7, 11, 365}, + dictWord{7, 11, 1357}, + dictWord{7, 11, 1497}, + dictWord{8, 11, 154}, + dictWord{141, 11, 281}, + dictWord{133, 11, 340}, + dictWord{6, 0, 452}, + dictWord{7, 0, 312}, + dictWord{138, 0, 219}, + dictWord{138, 0, 589}, + dictWord{4, 0, 333}, + dictWord{9, 0, 176}, + dictWord{12, 0, 353}, + dictWord{141, 0, 187}, + dictWord{9, 10, 92}, + dictWord{147, 10, 91}, + dictWord{134, 0, 1110}, + dictWord{11, 0, 47}, + dictWord{139, 11, 495}, + dictWord{6, 10, 525}, + dictWord{8, 10, 806}, + dictWord{9, 10, 876}, + dictWord{140, 10, 284}, + dictWord{8, 11, 261}, + dictWord{9, 11, 144}, + dictWord{9, 11, 466}, + dictWord{10, 11, 370}, + dictWord{12, 11, 470}, + dictWord{13, 11, 144}, + dictWord{142, 11, 348}, + dictWord{137, 11, 897}, + dictWord{8, 0, 863}, + dictWord{8, 0, 864}, + dictWord{8, 0, 868}, + dictWord{8, 0, 884}, + dictWord{10, 0, 866}, + dictWord{10, 0, 868}, + dictWord{10, 0, 873}, + dictWord{10, 0, 911}, + dictWord{10, 0, 912}, + dictWord{ + 10, + 0, + 944, + }, + dictWord{12, 0, 727}, + dictWord{6, 11, 248}, + dictWord{9, 11, 546}, + dictWord{10, 11, 535}, + dictWord{11, 11, 681}, + dictWord{141, 11, 135}, + dictWord{ + 6, + 0, + 300, + }, + dictWord{135, 0, 1515}, + dictWord{134, 0, 1237}, + dictWord{139, 10, 958}, + dictWord{133, 10, 594}, + dictWord{140, 11, 250}, + dictWord{ + 134, + 0, + 1685, + }, + dictWord{134, 11, 567}, + dictWord{7, 0, 135}, + dictWord{8, 0, 7}, + dictWord{8, 0, 62}, + dictWord{9, 0, 243}, + dictWord{10, 0, 658}, + dictWord{10, 0, 697}, + dictWord{11, 0, 456}, + dictWord{139, 0, 756}, + dictWord{9, 0, 395}, + dictWord{138, 0, 79}, + dictWord{6, 10, 1641}, + dictWord{136, 10, 820}, + dictWord{4, 10, 302}, + dictWord{135, 10, 1766}, + dictWord{134, 11, 174}, + dictWord{135, 10, 1313}, + dictWord{135, 0, 631}, + dictWord{134, 10, 1674}, + dictWord{134, 11, 395}, + dictWord{138, 0, 835}, + dictWord{7, 0, 406}, + dictWord{7, 0, 459}, + dictWord{8, 0, 606}, + dictWord{139, 0, 726}, + dictWord{134, 11, 617}, + dictWord{134, 0, 979}, + dictWord{ + 6, + 10, + 389, + }, + dictWord{7, 10, 149}, + dictWord{9, 10, 142}, + dictWord{138, 10, 94}, + dictWord{5, 11, 878}, + dictWord{133, 11, 972}, + dictWord{6, 10, 8}, + dictWord{ + 7, + 10, + 1881, + }, + dictWord{8, 10, 91}, + dictWord{136, 11, 511}, + dictWord{133, 0, 612}, + dictWord{132, 11, 351}, + dictWord{4, 0, 372}, + dictWord{7, 0, 482}, + dictWord{ + 8, + 0, + 158, + }, + dictWord{9, 0, 602}, + dictWord{9, 0, 615}, + dictWord{10, 0, 245}, + dictWord{10, 0, 678}, + dictWord{10, 0, 744}, + dictWord{11, 0, 248}, + dictWord{ + 139, + 0, + 806, + }, + dictWord{5, 0, 854}, + dictWord{135, 0, 1991}, + dictWord{132, 11, 286}, + dictWord{135, 11, 344}, + dictWord{7, 11, 438}, + dictWord{7, 11, 627}, + dictWord{ + 7, + 11, + 1516, + }, + dictWord{8, 11, 40}, + dictWord{9, 11, 56}, + dictWord{9, 11, 294}, + dictWord{10, 11, 30}, + dictWord{10, 11, 259}, + dictWord{11, 11, 969}, + dictWord{ + 146, + 11, + 148, + }, + dictWord{135, 0, 1492}, + dictWord{5, 11, 259}, + dictWord{7, 11, 414}, + dictWord{7, 11, 854}, + dictWord{142, 11, 107}, + dictWord{135, 10, 1746}, + dictWord{6, 0, 833}, + dictWord{134, 0, 998}, + dictWord{135, 10, 24}, + dictWord{6, 0, 750}, + dictWord{135, 0, 1739}, + dictWord{4, 10, 503}, + dictWord{ + 135, + 10, + 1661, + }, + dictWord{5, 10, 130}, + dictWord{7, 10, 1314}, + dictWord{9, 10, 610}, + dictWord{10, 10, 718}, + dictWord{11, 10, 601}, + dictWord{11, 10, 819}, + dictWord{ + 11, + 10, + 946, + }, + dictWord{140, 10, 536}, + dictWord{10, 10, 149}, + dictWord{11, 10, 280}, + dictWord{142, 10, 336}, + dictWord{132, 11, 738}, + dictWord{ + 135, + 10, + 1946, + }, + dictWord{5, 0, 195}, + dictWord{135, 0, 1685}, + dictWord{7, 0, 1997}, + dictWord{8, 0, 730}, + dictWord{139, 0, 1006}, + dictWord{151, 11, 17}, + dictWord{ + 133, + 11, + 866, + }, + dictWord{14, 0, 463}, + dictWord{14, 0, 470}, + dictWord{150, 0, 61}, + dictWord{5, 0, 751}, + dictWord{8, 0, 266}, + dictWord{11, 0, 578}, + dictWord{ + 4, + 10, + 392, + }, + dictWord{135, 10, 1597}, + dictWord{5, 10, 433}, + dictWord{9, 10, 633}, + dictWord{139, 10, 629}, + dictWord{135, 0, 821}, + dictWord{6, 0, 715}, + dictWord{ + 134, + 0, + 1325, + }, + dictWord{133, 11, 116}, + dictWord{6, 0, 868}, + dictWord{132, 11, 457}, + dictWord{134, 0, 959}, + dictWord{6, 10, 234}, + dictWord{138, 11, 199}, + dictWord{7, 0, 1053}, + dictWord{7, 10, 1950}, + dictWord{8, 10, 680}, + dictWord{11, 10, 817}, + dictWord{147, 10, 88}, + dictWord{7, 10, 1222}, + dictWord{ + 138, + 10, + 386, + }, + dictWord{5, 0, 950}, + dictWord{5, 0, 994}, + dictWord{6, 0, 351}, + dictWord{134, 0, 1124}, + dictWord{134, 0, 1081}, + dictWord{7, 0, 1595}, + dictWord{6, 10, 5}, + dictWord{11, 10, 249}, + dictWord{12, 10, 313}, + dictWord{16, 10, 66}, + dictWord{145, 10, 26}, + dictWord{148, 0, 59}, + dictWord{5, 11, 527}, + dictWord{6, 11, 189}, + dictWord{135, 11, 859}, + dictWord{5, 10, 963}, + dictWord{6, 10, 1773}, + dictWord{11, 11, 104}, + dictWord{11, 11, 554}, + dictWord{15, 11, 60}, + dictWord{ + 143, + 11, + 125, + }, + dictWord{135, 0, 47}, + dictWord{137, 0, 684}, + dictWord{134, 11, 116}, + dictWord{134, 0, 1606}, + dictWord{134, 0, 777}, + dictWord{7, 0, 1020}, + dictWord{ + 8, + 10, + 509, + }, + dictWord{136, 10, 792}, + dictWord{135, 0, 1094}, + dictWord{132, 0, 350}, + dictWord{133, 11, 487}, + dictWord{4, 11, 86}, + dictWord{5, 11, 667}, + dictWord{5, 11, 753}, + dictWord{6, 11, 316}, + dictWord{6, 11, 455}, + dictWord{135, 11, 946}, + dictWord{7, 0, 1812}, + dictWord{13, 0, 259}, + dictWord{13, 0, 356}, + dictWord{14, 0, 242}, + dictWord{147, 0, 114}, + dictWord{132, 10, 931}, + dictWord{133, 0, 967}, + dictWord{4, 0, 473}, + dictWord{7, 0, 623}, + dictWord{8, 0, 808}, + dictWord{ + 9, + 0, + 871, + }, + dictWord{9, 0, 893}, + dictWord{11, 0, 38}, + dictWord{11, 0, 431}, + dictWord{12, 0, 112}, + dictWord{12, 0, 217}, + dictWord{12, 0, 243}, + dictWord{12, 0, 562}, + dictWord{12, 0, 663}, + dictWord{12, 0, 683}, + dictWord{13, 0, 141}, + dictWord{13, 0, 197}, + dictWord{13, 0, 227}, + dictWord{13, 0, 406}, + dictWord{13, 0, 487}, + dictWord{14, 0, 156}, + dictWord{14, 0, 203}, + dictWord{14, 0, 224}, + dictWord{14, 0, 256}, + dictWord{18, 0, 58}, + dictWord{150, 0, 0}, + dictWord{138, 0, 286}, + dictWord{ + 7, + 10, + 943, + }, + dictWord{139, 10, 614}, + dictWord{135, 10, 1837}, + dictWord{150, 11, 45}, + dictWord{132, 0, 798}, + dictWord{4, 0, 222}, + dictWord{7, 0, 286}, + dictWord{136, 0, 629}, + dictWord{4, 11, 79}, + dictWord{7, 11, 1773}, + dictWord{10, 11, 450}, + dictWord{11, 11, 589}, + dictWord{13, 11, 332}, + dictWord{13, 11, 493}, + dictWord{14, 11, 183}, + dictWord{14, 11, 334}, + dictWord{14, 11, 362}, + dictWord{14, 11, 368}, + dictWord{14, 11, 376}, + dictWord{14, 11, 379}, + dictWord{ + 19, + 11, + 90, + }, + dictWord{19, 11, 103}, + dictWord{19, 11, 127}, + dictWord{148, 11, 90}, + dictWord{5, 0, 337}, + dictWord{11, 0, 513}, + dictWord{11, 0, 889}, + dictWord{ + 11, + 0, + 961, + }, + dictWord{12, 0, 461}, + dictWord{13, 0, 79}, + dictWord{15, 0, 121}, + dictWord{4, 10, 90}, + dictWord{5, 10, 545}, + dictWord{7, 10, 754}, + dictWord{9, 10, 186}, + dictWord{10, 10, 72}, + dictWord{10, 10, 782}, + dictWord{11, 10, 577}, + dictWord{11, 10, 610}, + dictWord{12, 10, 354}, + dictWord{12, 10, 362}, + dictWord{ + 140, + 10, + 595, + }, + dictWord{141, 0, 306}, + dictWord{136, 0, 146}, + dictWord{7, 0, 1646}, + dictWord{9, 10, 329}, + dictWord{11, 10, 254}, + dictWord{141, 11, 124}, + dictWord{ + 4, + 0, + 465, + }, + dictWord{135, 0, 1663}, + dictWord{132, 0, 525}, + dictWord{133, 11, 663}, + dictWord{10, 0, 299}, + dictWord{18, 0, 74}, + dictWord{9, 10, 187}, + dictWord{ + 11, + 10, + 1016, + }, + dictWord{145, 10, 44}, + dictWord{7, 0, 165}, + dictWord{7, 0, 919}, + dictWord{4, 10, 506}, + dictWord{136, 10, 517}, + dictWord{5, 10, 295}, + dictWord{ + 135, + 10, + 1680, + }, + dictWord{133, 11, 846}, + dictWord{134, 0, 1064}, + dictWord{5, 11, 378}, + dictWord{7, 11, 1402}, + dictWord{7, 11, 1414}, + dictWord{8, 11, 465}, + dictWord{9, 11, 286}, + dictWord{10, 11, 185}, + dictWord{10, 11, 562}, + dictWord{10, 11, 635}, + dictWord{11, 11, 31}, + dictWord{11, 11, 393}, + dictWord{ + 12, + 11, + 456, + }, + dictWord{13, 11, 312}, + dictWord{18, 11, 65}, + dictWord{18, 11, 96}, + dictWord{147, 11, 89}, + dictWord{132, 0, 596}, + dictWord{7, 10, 987}, + dictWord{ + 9, + 10, + 688, + }, + dictWord{10, 10, 522}, + dictWord{11, 10, 788}, + dictWord{140, 10, 566}, + dictWord{6, 0, 82}, + dictWord{7, 0, 138}, + dictWord{7, 0, 517}, + dictWord{7, 0, 1741}, + dictWord{11, 0, 238}, + dictWord{4, 11, 648}, + dictWord{134, 10, 1775}, + dictWord{7, 0, 1233}, + dictWord{7, 10, 700}, + dictWord{7, 10, 940}, + dictWord{8, 10, 514}, + dictWord{9, 10, 116}, + dictWord{9, 10, 535}, + dictWord{10, 10, 118}, + dictWord{11, 10, 107}, + dictWord{11, 10, 148}, + dictWord{11, 10, 922}, + dictWord{ + 12, + 10, + 254, + }, + dictWord{12, 10, 421}, + dictWord{142, 10, 238}, + dictWord{4, 0, 962}, + dictWord{6, 0, 1824}, + dictWord{8, 0, 894}, + dictWord{12, 0, 708}, + dictWord{ + 12, + 0, + 725, + }, + dictWord{14, 0, 451}, + dictWord{20, 0, 94}, + dictWord{22, 0, 59}, + dictWord{150, 0, 62}, + dictWord{5, 11, 945}, + dictWord{6, 11, 1656}, + dictWord{6, 11, 1787}, + dictWord{7, 11, 167}, + dictWord{8, 11, 824}, + dictWord{9, 11, 391}, + dictWord{10, 11, 375}, + dictWord{139, 11, 185}, + dictWord{5, 0, 495}, + dictWord{7, 0, 834}, + dictWord{9, 0, 733}, + dictWord{139, 0, 378}, + dictWord{4, 10, 743}, + dictWord{135, 11, 1273}, + dictWord{6, 0, 1204}, + dictWord{7, 11, 1645}, + dictWord{8, 11, 352}, + dictWord{137, 11, 249}, + dictWord{139, 10, 292}, + dictWord{133, 0, 559}, + dictWord{132, 11, 152}, + dictWord{9, 0, 499}, + dictWord{10, 0, 341}, + dictWord{ + 15, + 0, + 144, + }, + dictWord{19, 0, 49}, + dictWord{7, 10, 1283}, + dictWord{9, 10, 227}, + dictWord{11, 10, 325}, + dictWord{11, 10, 408}, + dictWord{14, 10, 180}, + dictWord{ + 146, + 10, + 47, + }, + dictWord{6, 0, 21}, + dictWord{6, 0, 1737}, + dictWord{7, 0, 1444}, + dictWord{136, 0, 224}, + dictWord{133, 11, 1006}, + dictWord{7, 0, 1446}, + dictWord{ + 9, + 0, + 97, + }, + dictWord{17, 0, 15}, + dictWord{5, 10, 81}, + dictWord{7, 10, 146}, + dictWord{7, 10, 1342}, + dictWord{8, 10, 53}, + dictWord{8, 10, 561}, + dictWord{8, 10, 694}, + dictWord{8, 10, 754}, + dictWord{9, 10, 115}, + dictWord{9, 10, 894}, + dictWord{10, 10, 462}, + dictWord{10, 10, 813}, + dictWord{11, 10, 230}, + dictWord{11, 10, 657}, + dictWord{11, 10, 699}, + dictWord{11, 10, 748}, + dictWord{12, 10, 119}, + dictWord{12, 10, 200}, + dictWord{12, 10, 283}, + dictWord{142, 10, 273}, + dictWord{ + 5, + 10, + 408, + }, + dictWord{137, 10, 747}, + dictWord{135, 11, 431}, + dictWord{135, 11, 832}, + dictWord{6, 0, 729}, + dictWord{134, 0, 953}, + dictWord{4, 0, 727}, + dictWord{ + 8, + 0, + 565, + }, + dictWord{5, 11, 351}, + dictWord{7, 11, 264}, + dictWord{136, 11, 565}, + dictWord{134, 0, 1948}, + dictWord{5, 0, 519}, + dictWord{5, 11, 40}, + dictWord{ + 7, + 11, + 598, + }, + dictWord{7, 11, 1638}, + dictWord{8, 11, 78}, + dictWord{9, 11, 166}, + dictWord{9, 11, 640}, + dictWord{9, 11, 685}, + dictWord{9, 11, 773}, + dictWord{ + 11, + 11, + 215, + }, + dictWord{13, 11, 65}, + dictWord{14, 11, 172}, + dictWord{14, 11, 317}, + dictWord{145, 11, 6}, + dictWord{8, 11, 60}, + dictWord{9, 11, 343}, + dictWord{ + 139, + 11, + 769, + }, + dictWord{137, 11, 455}, + dictWord{134, 0, 1193}, + dictWord{140, 0, 790}, + dictWord{7, 11, 1951}, + dictWord{8, 11, 765}, + dictWord{8, 11, 772}, + dictWord{140, 11, 671}, + dictWord{7, 11, 108}, + dictWord{8, 11, 219}, + dictWord{8, 11, 388}, + dictWord{9, 11, 639}, + dictWord{9, 11, 775}, + dictWord{11, 11, 275}, + dictWord{140, 11, 464}, + dictWord{132, 11, 468}, + dictWord{7, 10, 30}, + dictWord{8, 10, 86}, + dictWord{8, 10, 315}, + dictWord{8, 10, 700}, + dictWord{9, 10, 576}, + dictWord{ + 9, + 10, + 858, + }, + dictWord{11, 10, 310}, + dictWord{11, 10, 888}, + dictWord{11, 10, 904}, + dictWord{12, 10, 361}, + dictWord{141, 10, 248}, + dictWord{5, 11, 15}, + dictWord{6, 11, 56}, + dictWord{7, 11, 1758}, + dictWord{8, 11, 500}, + dictWord{9, 11, 730}, + dictWord{11, 11, 331}, + dictWord{13, 11, 150}, + dictWord{142, 11, 282}, + dictWord{4, 0, 402}, + dictWord{7, 0, 2}, + dictWord{8, 0, 323}, + dictWord{136, 0, 479}, + dictWord{138, 10, 839}, + dictWord{11, 0, 580}, + dictWord{142, 0, 201}, + dictWord{ + 5, + 0, + 59, + }, + dictWord{135, 0, 672}, + dictWord{137, 10, 617}, + dictWord{146, 0, 34}, + dictWord{134, 11, 1886}, + dictWord{4, 0, 961}, + dictWord{136, 0, 896}, + dictWord{ + 6, + 0, + 1285, + }, + dictWord{5, 11, 205}, + dictWord{6, 11, 438}, + dictWord{137, 11, 711}, + dictWord{134, 10, 428}, + dictWord{7, 10, 524}, + dictWord{8, 10, 169}, + dictWord{8, 10, 234}, + dictWord{9, 10, 480}, + dictWord{138, 10, 646}, + dictWord{148, 0, 46}, + dictWord{141, 0, 479}, + dictWord{133, 11, 534}, + dictWord{6, 0, 2019}, + dictWord{134, 10, 1648}, + dictWord{4, 0, 85}, + dictWord{7, 0, 549}, + dictWord{7, 10, 1205}, + dictWord{138, 10, 637}, + dictWord{4, 0, 663}, + dictWord{5, 0, 94}, + dictWord{ + 7, + 11, + 235, + }, + dictWord{7, 11, 1475}, + dictWord{15, 11, 68}, + dictWord{146, 11, 120}, + dictWord{6, 11, 443}, + dictWord{9, 11, 237}, + dictWord{9, 11, 571}, + dictWord{ + 9, + 11, + 695, + }, + dictWord{10, 11, 139}, + dictWord{11, 11, 715}, + dictWord{12, 11, 417}, + dictWord{141, 11, 421}, + dictWord{132, 0, 783}, + dictWord{4, 0, 682}, + dictWord{8, 0, 65}, + dictWord{9, 10, 39}, + dictWord{10, 10, 166}, + dictWord{11, 10, 918}, + dictWord{12, 10, 635}, + dictWord{20, 10, 10}, + dictWord{22, 10, 27}, + dictWord{ + 22, + 10, + 43, + }, + dictWord{150, 10, 52}, + dictWord{6, 0, 11}, + dictWord{135, 0, 187}, + dictWord{132, 0, 522}, + dictWord{4, 0, 52}, + dictWord{135, 0, 661}, + dictWord{ + 4, + 0, + 383, + }, + dictWord{133, 0, 520}, + dictWord{135, 11, 546}, + dictWord{11, 0, 343}, + dictWord{142, 0, 127}, + dictWord{4, 11, 578}, + dictWord{7, 10, 157}, + dictWord{ + 7, + 11, + 624, + }, + dictWord{7, 11, 916}, + dictWord{8, 10, 279}, + dictWord{10, 11, 256}, + dictWord{11, 11, 87}, + dictWord{139, 11, 703}, + dictWord{134, 10, 604}, + dictWord{ + 4, + 0, + 281, + }, + dictWord{5, 0, 38}, + dictWord{7, 0, 194}, + dictWord{7, 0, 668}, + dictWord{7, 0, 1893}, + dictWord{137, 0, 397}, + dictWord{7, 10, 945}, + dictWord{11, 10, 713}, + dictWord{139, 10, 744}, + dictWord{139, 10, 1022}, + dictWord{9, 0, 635}, + dictWord{139, 0, 559}, + dictWord{5, 11, 923}, + dictWord{7, 11, 490}, + dictWord{ + 12, + 11, + 553, + }, + dictWord{13, 11, 100}, + dictWord{14, 11, 118}, + dictWord{143, 11, 75}, + dictWord{132, 0, 975}, + dictWord{132, 10, 567}, + dictWord{137, 10, 859}, + dictWord{7, 10, 1846}, + dictWord{7, 11, 1846}, + dictWord{8, 10, 628}, + dictWord{136, 11, 628}, + dictWord{148, 0, 116}, + dictWord{138, 11, 750}, + dictWord{14, 0, 51}, + dictWord{14, 11, 51}, + dictWord{15, 11, 7}, + dictWord{148, 11, 20}, + dictWord{132, 0, 858}, + dictWord{134, 0, 1075}, + dictWord{4, 11, 924}, + dictWord{ + 133, + 10, + 762, + }, + dictWord{136, 0, 535}, + dictWord{133, 0, 448}, + dictWord{10, 10, 784}, + dictWord{141, 10, 191}, + dictWord{133, 10, 298}, + dictWord{7, 0, 610}, + dictWord{135, 0, 1501}, + dictWord{7, 10, 633}, + dictWord{7, 10, 905}, + dictWord{7, 10, 909}, + dictWord{7, 10, 1538}, + dictWord{9, 10, 767}, + dictWord{140, 10, 636}, + dictWord{4, 11, 265}, + dictWord{7, 11, 807}, + dictWord{135, 11, 950}, + dictWord{5, 11, 93}, + dictWord{12, 11, 267}, + dictWord{144, 11, 26}, + dictWord{136, 0, 191}, + dictWord{139, 10, 301}, + dictWord{135, 10, 1970}, + dictWord{135, 0, 267}, + dictWord{4, 0, 319}, + dictWord{5, 0, 699}, + dictWord{138, 0, 673}, + dictWord{ + 6, + 0, + 336, + }, + dictWord{7, 0, 92}, + dictWord{7, 0, 182}, + dictWord{8, 0, 453}, + dictWord{8, 0, 552}, + dictWord{9, 0, 204}, + dictWord{9, 0, 285}, + dictWord{10, 0, 99}, + dictWord{ + 11, + 0, + 568, + }, + dictWord{11, 0, 950}, + dictWord{12, 0, 94}, + dictWord{16, 0, 20}, + dictWord{16, 0, 70}, + dictWord{19, 0, 55}, + dictWord{12, 10, 644}, + dictWord{144, 10, 90}, + dictWord{6, 0, 551}, + dictWord{7, 0, 1308}, + dictWord{7, 10, 845}, + dictWord{7, 11, 994}, + dictWord{8, 10, 160}, + dictWord{137, 10, 318}, + dictWord{19, 11, 1}, + dictWord{ + 19, + 11, + 26, + }, + dictWord{150, 11, 9}, + dictWord{7, 0, 1406}, + dictWord{9, 0, 218}, + dictWord{141, 0, 222}, + dictWord{5, 0, 256}, + dictWord{138, 0, 69}, + dictWord{ + 5, + 11, + 233, + }, + dictWord{5, 11, 320}, + dictWord{6, 11, 140}, + dictWord{7, 11, 330}, + dictWord{136, 11, 295}, + dictWord{6, 0, 1980}, + dictWord{136, 0, 952}, + dictWord{ + 4, + 0, + 833, + }, + dictWord{137, 11, 678}, + dictWord{133, 11, 978}, + dictWord{4, 11, 905}, + dictWord{6, 11, 1701}, + dictWord{137, 11, 843}, + dictWord{138, 10, 735}, + dictWord{136, 10, 76}, + dictWord{17, 0, 39}, + dictWord{148, 0, 36}, + dictWord{18, 0, 81}, + dictWord{146, 11, 81}, + dictWord{14, 0, 352}, + dictWord{17, 0, 53}, + dictWord{ + 18, + 0, + 146, + }, + dictWord{18, 0, 152}, + dictWord{19, 0, 11}, + dictWord{150, 0, 54}, + dictWord{135, 0, 634}, + dictWord{138, 10, 841}, + dictWord{132, 0, 618}, + dictWord{ + 4, + 0, + 339, + }, + dictWord{7, 0, 259}, + dictWord{17, 0, 73}, + dictWord{4, 11, 275}, + dictWord{140, 11, 376}, + dictWord{132, 11, 509}, + dictWord{7, 11, 273}, + dictWord{ + 139, + 11, + 377, + }, + dictWord{4, 0, 759}, + dictWord{13, 0, 169}, + dictWord{137, 10, 804}, + dictWord{6, 10, 96}, + dictWord{135, 10, 1426}, + dictWord{4, 10, 651}, + dictWord{133, 10, 289}, + dictWord{7, 0, 1075}, + dictWord{8, 10, 35}, + dictWord{9, 10, 511}, + dictWord{10, 10, 767}, + dictWord{147, 10, 118}, + dictWord{6, 0, 649}, + dictWord{6, 0, 670}, + dictWord{136, 0, 482}, + dictWord{5, 0, 336}, + dictWord{6, 0, 341}, + dictWord{6, 0, 478}, + dictWord{6, 0, 1763}, + dictWord{136, 0, 386}, + dictWord{ + 5, + 11, + 802, + }, + dictWord{7, 11, 2021}, + dictWord{8, 11, 805}, + dictWord{14, 11, 94}, + dictWord{15, 11, 65}, + dictWord{16, 11, 4}, + dictWord{16, 11, 77}, + dictWord{16, 11, 80}, + dictWord{145, 11, 5}, + dictWord{6, 0, 1035}, + dictWord{5, 11, 167}, + dictWord{5, 11, 899}, + dictWord{6, 11, 410}, + dictWord{137, 11, 777}, + dictWord{ + 134, + 11, + 1705, + }, + dictWord{5, 0, 924}, + dictWord{133, 0, 969}, + dictWord{132, 10, 704}, + dictWord{135, 0, 73}, + dictWord{135, 11, 10}, + dictWord{135, 10, 1078}, + dictWord{ + 5, + 11, + 11, + }, + dictWord{6, 11, 117}, + dictWord{6, 11, 485}, + dictWord{7, 11, 1133}, + dictWord{9, 11, 582}, + dictWord{9, 11, 594}, + dictWord{11, 11, 21}, + dictWord{ + 11, + 11, + 818, + }, + dictWord{12, 11, 535}, + dictWord{141, 11, 86}, + dictWord{135, 0, 1971}, + dictWord{4, 11, 264}, + dictWord{7, 11, 1067}, + dictWord{8, 11, 204}, + dictWord{8, 11, 385}, + dictWord{139, 11, 953}, + dictWord{6, 0, 1458}, + dictWord{135, 0, 1344}, + dictWord{5, 0, 396}, + dictWord{134, 0, 501}, + dictWord{4, 10, 720}, + dictWord{133, 10, 306}, + dictWord{4, 0, 929}, + dictWord{5, 0, 799}, + dictWord{8, 0, 46}, + dictWord{8, 0, 740}, + dictWord{133, 10, 431}, + dictWord{7, 11, 646}, + dictWord{ + 7, + 11, + 1730, + }, + dictWord{11, 11, 446}, + dictWord{141, 11, 178}, + dictWord{7, 0, 276}, + dictWord{5, 10, 464}, + dictWord{6, 10, 236}, + dictWord{7, 10, 696}, + dictWord{ + 7, + 10, + 914, + }, + dictWord{7, 10, 1108}, + dictWord{7, 10, 1448}, + dictWord{9, 10, 15}, + dictWord{9, 10, 564}, + dictWord{10, 10, 14}, + dictWord{12, 10, 565}, + dictWord{ + 13, + 10, + 449, + }, + dictWord{14, 10, 53}, + dictWord{15, 10, 13}, + dictWord{16, 10, 64}, + dictWord{145, 10, 41}, + dictWord{4, 0, 892}, + dictWord{133, 0, 770}, + dictWord{ + 6, + 10, + 1767, + }, + dictWord{12, 10, 194}, + dictWord{145, 10, 107}, + dictWord{135, 0, 158}, + dictWord{5, 10, 840}, + dictWord{138, 11, 608}, + dictWord{134, 0, 1432}, + dictWord{138, 11, 250}, + dictWord{8, 11, 794}, + dictWord{9, 11, 400}, + dictWord{10, 11, 298}, + dictWord{142, 11, 228}, + dictWord{151, 0, 25}, + dictWord{ + 7, + 11, + 1131, + }, + dictWord{135, 11, 1468}, + dictWord{135, 0, 2001}, + dictWord{9, 10, 642}, + dictWord{11, 10, 236}, + dictWord{142, 10, 193}, + dictWord{4, 10, 68}, + dictWord{5, 10, 634}, + dictWord{6, 10, 386}, + dictWord{7, 10, 794}, + dictWord{8, 10, 273}, + dictWord{9, 10, 563}, + dictWord{10, 10, 105}, + dictWord{10, 10, 171}, + dictWord{11, 10, 94}, + dictWord{139, 10, 354}, + dictWord{136, 11, 724}, + dictWord{132, 0, 478}, + dictWord{11, 11, 512}, + dictWord{13, 11, 205}, + dictWord{ + 19, + 11, + 30, + }, + dictWord{22, 11, 36}, + dictWord{151, 11, 19}, + dictWord{7, 0, 1461}, + dictWord{140, 0, 91}, + dictWord{6, 11, 190}, + dictWord{7, 11, 768}, + dictWord{ + 135, + 11, + 1170, + }, + dictWord{4, 0, 602}, + dictWord{8, 0, 211}, + dictWord{4, 10, 95}, + dictWord{7, 10, 416}, + dictWord{139, 10, 830}, + dictWord{7, 10, 731}, + dictWord{13, 10, 20}, + dictWord{143, 10, 11}, + dictWord{6, 0, 1068}, + dictWord{135, 0, 1872}, + dictWord{4, 0, 13}, + dictWord{5, 0, 567}, + dictWord{7, 0, 1498}, + dictWord{9, 0, 124}, + dictWord{11, 0, 521}, + dictWord{12, 0, 405}, + dictWord{135, 11, 1023}, + dictWord{135, 0, 1006}, + dictWord{132, 0, 735}, + dictWord{138, 0, 812}, + dictWord{4, 0, 170}, + dictWord{135, 0, 323}, + dictWord{6, 11, 137}, + dictWord{9, 11, 75}, + dictWord{9, 11, 253}, + dictWord{10, 11, 194}, + dictWord{138, 11, 444}, + dictWord{5, 0, 304}, + dictWord{7, 0, 1403}, + dictWord{5, 10, 864}, + dictWord{10, 10, 648}, + dictWord{11, 10, 671}, + dictWord{143, 10, 46}, + dictWord{135, 11, 1180}, + dictWord{ + 133, + 10, + 928, + }, + dictWord{4, 0, 148}, + dictWord{133, 0, 742}, + dictWord{11, 10, 986}, + dictWord{140, 10, 682}, + dictWord{133, 0, 523}, + dictWord{135, 11, 1743}, + dictWord{7, 0, 730}, + dictWord{18, 0, 144}, + dictWord{19, 0, 61}, + dictWord{8, 10, 44}, + dictWord{9, 10, 884}, + dictWord{10, 10, 580}, + dictWord{11, 10, 399}, + dictWord{ + 11, + 10, + 894, + }, + dictWord{143, 10, 122}, + dictWord{5, 11, 760}, + dictWord{7, 11, 542}, + dictWord{8, 11, 135}, + dictWord{136, 11, 496}, + dictWord{136, 0, 981}, + dictWord{133, 0, 111}, + dictWord{10, 0, 132}, + dictWord{11, 0, 191}, + dictWord{11, 0, 358}, + dictWord{139, 0, 460}, + dictWord{7, 11, 319}, + dictWord{7, 11, 355}, + dictWord{ + 7, + 11, + 763, + }, + dictWord{10, 11, 389}, + dictWord{145, 11, 43}, + dictWord{134, 0, 890}, + dictWord{134, 0, 1420}, + dictWord{136, 11, 557}, + dictWord{ + 133, + 10, + 518, + }, + dictWord{133, 0, 444}, + dictWord{135, 0, 1787}, + dictWord{135, 10, 1852}, + dictWord{8, 0, 123}, + dictWord{15, 0, 6}, + dictWord{144, 0, 7}, + dictWord{ + 6, + 0, + 2041, + }, + dictWord{10, 11, 38}, + dictWord{139, 11, 784}, + dictWord{136, 0, 932}, + dictWord{5, 0, 937}, + dictWord{135, 0, 100}, + dictWord{6, 0, 995}, + dictWord{ + 4, + 11, + 58, + }, + dictWord{5, 11, 286}, + dictWord{6, 11, 319}, + dictWord{7, 11, 402}, + dictWord{7, 11, 1254}, + dictWord{7, 11, 1903}, + dictWord{8, 11, 356}, + dictWord{ + 140, + 11, + 408, + }, + dictWord{4, 11, 389}, + dictWord{9, 11, 181}, + dictWord{9, 11, 255}, + dictWord{10, 11, 8}, + dictWord{10, 11, 29}, + dictWord{10, 11, 816}, + dictWord{ + 11, + 11, + 311, + }, + dictWord{11, 11, 561}, + dictWord{12, 11, 67}, + dictWord{141, 11, 181}, + dictWord{138, 0, 255}, + dictWord{5, 0, 138}, + dictWord{4, 10, 934}, + dictWord{ + 136, + 10, + 610, + }, + dictWord{4, 0, 965}, + dictWord{10, 0, 863}, + dictWord{138, 0, 898}, + dictWord{10, 10, 804}, + dictWord{138, 10, 832}, + dictWord{12, 0, 631}, + dictWord{ + 8, + 10, + 96, + }, + dictWord{9, 10, 36}, + dictWord{10, 10, 607}, + dictWord{11, 10, 423}, + dictWord{11, 10, 442}, + dictWord{12, 10, 309}, + dictWord{14, 10, 199}, + dictWord{ + 15, + 10, + 90, + }, + dictWord{145, 10, 110}, + dictWord{134, 0, 1394}, + dictWord{4, 0, 652}, + dictWord{8, 0, 320}, + dictWord{22, 0, 6}, + dictWord{22, 0, 16}, + dictWord{ + 9, + 10, + 13, + }, + dictWord{9, 10, 398}, + dictWord{9, 10, 727}, + dictWord{10, 10, 75}, + dictWord{10, 10, 184}, + dictWord{10, 10, 230}, + dictWord{10, 10, 564}, + dictWord{ + 10, + 10, + 569, + }, + dictWord{11, 10, 973}, + dictWord{12, 10, 70}, + dictWord{12, 10, 189}, + dictWord{13, 10, 57}, + dictWord{141, 10, 257}, + dictWord{6, 0, 897}, + dictWord{ + 134, + 0, + 1333, + }, + dictWord{4, 0, 692}, + dictWord{133, 0, 321}, + dictWord{133, 11, 373}, + dictWord{135, 0, 922}, + dictWord{5, 0, 619}, + dictWord{133, 0, 698}, + dictWord{ + 137, + 10, + 631, + }, + dictWord{5, 10, 345}, + dictWord{135, 10, 1016}, + dictWord{9, 0, 957}, + dictWord{9, 0, 1018}, + dictWord{12, 0, 828}, + dictWord{12, 0, 844}, + dictWord{ + 12, + 0, + 897, + }, + dictWord{12, 0, 901}, + dictWord{12, 0, 943}, + dictWord{15, 0, 180}, + dictWord{18, 0, 197}, + dictWord{18, 0, 200}, + dictWord{18, 0, 213}, + dictWord{ + 18, + 0, + 214, + }, + dictWord{146, 0, 226}, + dictWord{5, 0, 917}, + dictWord{134, 0, 1659}, + dictWord{135, 0, 1100}, + dictWord{134, 0, 1173}, + dictWord{134, 0, 1930}, + dictWord{5, 0, 251}, + dictWord{5, 0, 956}, + dictWord{8, 0, 268}, + dictWord{9, 0, 214}, + dictWord{146, 0, 142}, + dictWord{133, 10, 673}, + dictWord{137, 10, 850}, + dictWord{ + 4, + 10, + 287, + }, + dictWord{133, 10, 1018}, + dictWord{132, 11, 672}, + dictWord{5, 0, 346}, + dictWord{5, 0, 711}, + dictWord{8, 0, 390}, + dictWord{11, 11, 752}, + dictWord{139, 11, 885}, + dictWord{5, 10, 34}, + dictWord{10, 10, 724}, + dictWord{12, 10, 444}, + dictWord{13, 10, 354}, + dictWord{18, 10, 32}, + dictWord{23, 10, 24}, + dictWord{23, 10, 31}, + dictWord{152, 10, 5}, + dictWord{4, 11, 710}, + dictWord{134, 11, 606}, + dictWord{134, 0, 744}, + dictWord{134, 10, 382}, + dictWord{ + 133, + 11, + 145, + }, + dictWord{4, 10, 329}, + dictWord{7, 11, 884}, + dictWord{140, 11, 124}, + dictWord{4, 11, 467}, + dictWord{5, 11, 405}, + dictWord{134, 11, 544}, + dictWord{ + 9, + 10, + 846, + }, + dictWord{138, 10, 827}, + dictWord{133, 0, 624}, + dictWord{9, 11, 372}, + dictWord{15, 11, 2}, + dictWord{19, 11, 10}, + dictWord{147, 11, 18}, + dictWord{ + 4, + 11, + 387, + }, + dictWord{135, 11, 1288}, + dictWord{5, 0, 783}, + dictWord{7, 0, 1998}, + dictWord{135, 0, 2047}, + dictWord{132, 10, 906}, + dictWord{136, 10, 366}, + dictWord{135, 11, 550}, + dictWord{4, 10, 123}, + dictWord{4, 10, 649}, + dictWord{5, 10, 605}, + dictWord{7, 10, 1509}, + dictWord{136, 10, 36}, + dictWord{ + 134, + 0, + 1125, + }, + dictWord{132, 0, 594}, + dictWord{133, 10, 767}, + dictWord{135, 11, 1227}, + dictWord{136, 11, 467}, + dictWord{4, 11, 576}, + dictWord{ + 135, + 11, + 1263, + }, + dictWord{4, 0, 268}, + dictWord{7, 0, 1534}, + dictWord{135, 11, 1534}, + dictWord{4, 10, 273}, + dictWord{5, 10, 658}, + dictWord{5, 11, 919}, + dictWord{ + 5, + 10, + 995, + }, + dictWord{134, 11, 1673}, + dictWord{133, 0, 563}, + dictWord{134, 10, 72}, + dictWord{135, 10, 1345}, + dictWord{4, 11, 82}, + dictWord{5, 11, 333}, + dictWord{ + 5, + 11, + 904, + }, + dictWord{6, 11, 207}, + dictWord{7, 11, 325}, + dictWord{7, 11, 1726}, + dictWord{8, 11, 101}, + dictWord{10, 11, 778}, + dictWord{139, 11, 220}, + dictWord{5, 0, 37}, + dictWord{6, 0, 39}, + dictWord{6, 0, 451}, + dictWord{7, 0, 218}, + dictWord{7, 0, 667}, + dictWord{7, 0, 1166}, + dictWord{7, 0, 1687}, + dictWord{8, 0, 662}, + dictWord{16, 0, 2}, + dictWord{133, 10, 589}, + dictWord{134, 0, 1332}, + dictWord{133, 11, 903}, + dictWord{134, 0, 508}, + dictWord{5, 10, 117}, + dictWord{6, 10, 514}, + dictWord{6, 10, 541}, + dictWord{7, 10, 1164}, + dictWord{7, 10, 1436}, + dictWord{8, 10, 220}, + dictWord{8, 10, 648}, + dictWord{10, 10, 688}, + dictWord{11, 10, 560}, + dictWord{140, 11, 147}, + dictWord{6, 11, 555}, + dictWord{135, 11, 485}, + dictWord{133, 10, 686}, + dictWord{7, 0, 453}, + dictWord{7, 0, 635}, + dictWord{7, 0, 796}, + dictWord{8, 0, 331}, + dictWord{9, 0, 330}, + dictWord{9, 0, 865}, + dictWord{10, 0, 119}, + dictWord{10, 0, 235}, + dictWord{11, 0, 111}, + dictWord{11, 0, 129}, + dictWord{ + 11, + 0, + 240, + }, + dictWord{12, 0, 31}, + dictWord{12, 0, 66}, + dictWord{12, 0, 222}, + dictWord{12, 0, 269}, + dictWord{12, 0, 599}, + dictWord{12, 0, 684}, + dictWord{12, 0, 689}, + dictWord{12, 0, 691}, + dictWord{142, 0, 345}, + dictWord{135, 0, 1834}, + dictWord{4, 11, 705}, + dictWord{7, 11, 615}, + dictWord{138, 11, 251}, + dictWord{ + 136, + 11, + 345, + }, + dictWord{137, 0, 527}, + dictWord{6, 0, 98}, + dictWord{7, 0, 702}, + dictWord{135, 0, 991}, + dictWord{11, 0, 576}, + dictWord{14, 0, 74}, + dictWord{7, 10, 196}, + dictWord{10, 10, 765}, + dictWord{11, 10, 347}, + dictWord{11, 10, 552}, + dictWord{11, 10, 790}, + dictWord{12, 10, 263}, + dictWord{13, 10, 246}, + dictWord{ + 13, + 10, + 270, + }, + dictWord{13, 10, 395}, + dictWord{14, 10, 176}, + dictWord{14, 10, 190}, + dictWord{14, 10, 398}, + dictWord{14, 10, 412}, + dictWord{15, 10, 32}, + dictWord{ + 15, + 10, + 63, + }, + dictWord{16, 10, 88}, + dictWord{147, 10, 105}, + dictWord{134, 11, 90}, + dictWord{13, 0, 84}, + dictWord{141, 0, 122}, + dictWord{6, 0, 37}, + dictWord{ + 7, + 0, + 299, + }, + dictWord{7, 0, 1666}, + dictWord{8, 0, 195}, + dictWord{8, 0, 316}, + dictWord{9, 0, 178}, + dictWord{9, 0, 276}, + dictWord{9, 0, 339}, + dictWord{9, 0, 536}, + dictWord{ + 10, + 0, + 102, + }, + dictWord{10, 0, 362}, + dictWord{10, 0, 785}, + dictWord{11, 0, 55}, + dictWord{11, 0, 149}, + dictWord{11, 0, 773}, + dictWord{13, 0, 416}, + dictWord{ + 13, + 0, + 419, + }, + dictWord{14, 0, 38}, + dictWord{14, 0, 41}, + dictWord{142, 0, 210}, + dictWord{5, 10, 381}, + dictWord{135, 10, 1792}, + dictWord{7, 11, 813}, + dictWord{ + 12, + 11, + 497, + }, + dictWord{141, 11, 56}, + dictWord{7, 10, 616}, + dictWord{138, 10, 413}, + dictWord{133, 0, 645}, + dictWord{6, 11, 125}, + dictWord{135, 11, 1277}, + dictWord{132, 0, 290}, + dictWord{6, 0, 70}, + dictWord{7, 0, 1292}, + dictWord{10, 0, 762}, + dictWord{139, 0, 288}, + dictWord{6, 10, 120}, + dictWord{7, 10, 1188}, + dictWord{ + 7, + 10, + 1710, + }, + dictWord{8, 10, 286}, + dictWord{9, 10, 667}, + dictWord{11, 10, 592}, + dictWord{139, 10, 730}, + dictWord{135, 11, 1784}, + dictWord{7, 0, 1315}, + dictWord{135, 11, 1315}, + dictWord{134, 0, 1955}, + dictWord{135, 10, 1146}, + dictWord{7, 0, 131}, + dictWord{7, 0, 422}, + dictWord{8, 0, 210}, + dictWord{ + 140, + 0, + 573, + }, + dictWord{4, 10, 352}, + dictWord{135, 10, 687}, + dictWord{139, 0, 797}, + dictWord{143, 0, 38}, + dictWord{14, 0, 179}, + dictWord{15, 0, 151}, + dictWord{ + 150, + 0, + 11, + }, + dictWord{7, 0, 488}, + dictWord{4, 10, 192}, + dictWord{5, 10, 49}, + dictWord{6, 10, 200}, + dictWord{6, 10, 293}, + dictWord{134, 10, 1696}, + dictWord{ + 132, + 0, + 936, + }, + dictWord{135, 11, 703}, + dictWord{6, 11, 160}, + dictWord{7, 11, 1106}, + dictWord{9, 11, 770}, + dictWord{10, 11, 618}, + dictWord{11, 11, 112}, + dictWord{ + 140, + 11, + 413, + }, + dictWord{5, 0, 453}, + dictWord{134, 0, 441}, + dictWord{135, 0, 595}, + dictWord{132, 10, 650}, + dictWord{132, 10, 147}, + dictWord{6, 0, 991}, + dictWord{6, 0, 1182}, + dictWord{12, 11, 271}, + dictWord{145, 11, 109}, + dictWord{133, 10, 934}, + dictWord{140, 11, 221}, + dictWord{132, 0, 653}, + dictWord{ + 7, + 0, + 505, + }, + dictWord{135, 0, 523}, + dictWord{134, 0, 903}, + dictWord{135, 11, 479}, + dictWord{7, 11, 304}, + dictWord{9, 11, 646}, + dictWord{9, 11, 862}, + dictWord{ + 10, + 11, + 262, + }, + dictWord{11, 11, 696}, + dictWord{12, 11, 208}, + dictWord{15, 11, 79}, + dictWord{147, 11, 108}, + dictWord{146, 0, 80}, + dictWord{135, 11, 981}, + dictWord{142, 0, 432}, + dictWord{132, 0, 314}, + dictWord{137, 11, 152}, + dictWord{7, 0, 1368}, + dictWord{8, 0, 232}, + dictWord{8, 0, 361}, + dictWord{10, 0, 682}, + dictWord{138, 0, 742}, + dictWord{135, 11, 1586}, + dictWord{9, 0, 534}, + dictWord{4, 11, 434}, + dictWord{11, 11, 663}, + dictWord{12, 11, 210}, + dictWord{13, 11, 166}, + dictWord{13, 11, 310}, + dictWord{14, 11, 373}, + dictWord{147, 11, 43}, + dictWord{7, 11, 1091}, + dictWord{135, 11, 1765}, + dictWord{6, 11, 550}, + dictWord{ + 135, + 11, + 652, + }, + dictWord{137, 0, 27}, + dictWord{142, 0, 12}, + dictWord{4, 10, 637}, + dictWord{5, 11, 553}, + dictWord{7, 11, 766}, + dictWord{138, 11, 824}, + dictWord{ + 7, + 11, + 737, + }, + dictWord{8, 11, 298}, + dictWord{136, 11, 452}, + dictWord{7, 0, 736}, + dictWord{139, 0, 264}, + dictWord{134, 0, 1657}, + dictWord{133, 11, 292}, + dictWord{138, 11, 135}, + dictWord{6, 0, 844}, + dictWord{134, 0, 1117}, + dictWord{135, 0, 127}, + dictWord{9, 10, 867}, + dictWord{138, 10, 837}, + dictWord{ + 6, + 0, + 1184, + }, + dictWord{134, 0, 1208}, + dictWord{134, 0, 1294}, + dictWord{136, 0, 364}, + dictWord{6, 0, 1415}, + dictWord{7, 0, 1334}, + dictWord{11, 0, 125}, + dictWord{ + 6, + 10, + 170, + }, + dictWord{7, 11, 393}, + dictWord{8, 10, 395}, + dictWord{8, 10, 487}, + dictWord{10, 11, 603}, + dictWord{11, 11, 206}, + dictWord{141, 10, 147}, + dictWord{137, 11, 748}, + dictWord{4, 11, 912}, + dictWord{137, 11, 232}, + dictWord{4, 10, 535}, + dictWord{136, 10, 618}, + dictWord{137, 0, 792}, + dictWord{ + 7, + 11, + 1973, + }, + dictWord{136, 11, 716}, + dictWord{135, 11, 98}, + dictWord{5, 0, 909}, + dictWord{9, 0, 849}, + dictWord{138, 0, 805}, + dictWord{4, 0, 630}, + dictWord{ + 132, + 0, + 699, + }, + dictWord{5, 11, 733}, + dictWord{14, 11, 103}, + dictWord{150, 10, 23}, + dictWord{12, 11, 158}, + dictWord{18, 11, 8}, + dictWord{19, 11, 62}, + dictWord{ + 20, + 11, + 6, + }, + dictWord{22, 11, 4}, + dictWord{23, 11, 2}, + dictWord{151, 11, 9}, + dictWord{132, 0, 968}, + dictWord{132, 10, 778}, + dictWord{132, 10, 46}, + dictWord{5, 10, 811}, + dictWord{6, 10, 1679}, + dictWord{6, 10, 1714}, + dictWord{135, 10, 2032}, + dictWord{6, 0, 1446}, + dictWord{7, 10, 1458}, + dictWord{9, 10, 407}, + dictWord{ + 139, + 10, + 15, + }, + dictWord{7, 0, 206}, + dictWord{7, 0, 397}, + dictWord{7, 0, 621}, + dictWord{7, 0, 640}, + dictWord{8, 0, 124}, + dictWord{8, 0, 619}, + dictWord{9, 0, 305}, + dictWord{ + 9, + 0, + 643, + }, + dictWord{10, 0, 264}, + dictWord{10, 0, 628}, + dictWord{11, 0, 40}, + dictWord{12, 0, 349}, + dictWord{13, 0, 134}, + dictWord{13, 0, 295}, + dictWord{ + 14, + 0, + 155, + }, + dictWord{15, 0, 120}, + dictWord{18, 0, 105}, + dictWord{6, 10, 34}, + dictWord{7, 10, 1089}, + dictWord{8, 10, 708}, + dictWord{8, 10, 721}, + dictWord{9, 10, 363}, + dictWord{148, 10, 98}, + dictWord{4, 0, 262}, + dictWord{5, 0, 641}, + dictWord{135, 0, 342}, + dictWord{137, 11, 72}, + dictWord{4, 0, 99}, + dictWord{6, 0, 250}, + dictWord{ + 6, + 0, + 346, + }, + dictWord{8, 0, 127}, + dictWord{138, 0, 81}, + dictWord{132, 0, 915}, + dictWord{5, 0, 75}, + dictWord{9, 0, 517}, + dictWord{10, 0, 470}, + dictWord{12, 0, 155}, + dictWord{141, 0, 224}, + dictWord{132, 10, 462}, + dictWord{11, 11, 600}, + dictWord{11, 11, 670}, + dictWord{141, 11, 245}, + dictWord{142, 0, 83}, + dictWord{ + 5, + 10, + 73, + }, + dictWord{6, 10, 23}, + dictWord{134, 10, 338}, + dictWord{6, 0, 1031}, + dictWord{139, 11, 923}, + dictWord{7, 11, 164}, + dictWord{7, 11, 1571}, + dictWord{ + 9, + 11, + 107, + }, + dictWord{140, 11, 225}, + dictWord{134, 0, 1470}, + dictWord{133, 0, 954}, + dictWord{6, 0, 304}, + dictWord{8, 0, 418}, + dictWord{10, 0, 345}, + dictWord{ + 11, + 0, + 341, + }, + dictWord{139, 0, 675}, + dictWord{9, 0, 410}, + dictWord{139, 0, 425}, + dictWord{4, 11, 27}, + dictWord{5, 11, 484}, + dictWord{5, 11, 510}, + dictWord{6, 11, 434}, + dictWord{7, 11, 1000}, + dictWord{7, 11, 1098}, + dictWord{8, 11, 2}, + dictWord{136, 11, 200}, + dictWord{134, 0, 734}, + dictWord{140, 11, 257}, + dictWord{ + 7, + 10, + 725, + }, + dictWord{8, 10, 498}, + dictWord{139, 10, 268}, + dictWord{134, 0, 1822}, + dictWord{135, 0, 1798}, + dictWord{135, 10, 773}, + dictWord{132, 11, 460}, + dictWord{4, 11, 932}, + dictWord{133, 11, 891}, + dictWord{134, 0, 14}, + dictWord{132, 10, 583}, + dictWord{7, 10, 1462}, + dictWord{8, 11, 625}, + dictWord{ + 139, + 10, + 659, + }, + dictWord{5, 0, 113}, + dictWord{6, 0, 243}, + dictWord{6, 0, 1708}, + dictWord{7, 0, 1865}, + dictWord{11, 0, 161}, + dictWord{16, 0, 37}, + dictWord{17, 0, 99}, + dictWord{133, 10, 220}, + dictWord{134, 11, 76}, + dictWord{5, 11, 461}, + dictWord{135, 11, 1925}, + dictWord{140, 0, 69}, + dictWord{8, 11, 92}, + dictWord{ + 137, + 11, + 221, + }, + dictWord{139, 10, 803}, + dictWord{132, 10, 544}, + dictWord{4, 0, 274}, + dictWord{134, 0, 922}, + dictWord{132, 0, 541}, + dictWord{5, 0, 627}, + dictWord{ + 6, + 10, + 437, + }, + dictWord{6, 10, 564}, + dictWord{11, 10, 181}, + dictWord{141, 10, 183}, + dictWord{135, 10, 1192}, + dictWord{7, 0, 166}, + dictWord{132, 11, 763}, + dictWord{133, 11, 253}, + dictWord{134, 0, 849}, + dictWord{9, 11, 73}, + dictWord{10, 11, 110}, + dictWord{14, 11, 185}, + dictWord{145, 11, 119}, + dictWord{5, 11, 212}, + dictWord{12, 11, 35}, + dictWord{141, 11, 382}, + dictWord{133, 0, 717}, + dictWord{137, 0, 304}, + dictWord{136, 0, 600}, + dictWord{133, 0, 654}, + dictWord{ + 6, + 0, + 273, + }, + dictWord{10, 0, 188}, + dictWord{13, 0, 377}, + dictWord{146, 0, 77}, + dictWord{4, 10, 790}, + dictWord{5, 10, 273}, + dictWord{134, 10, 394}, + dictWord{ + 132, + 0, + 543, + }, + dictWord{135, 0, 410}, + dictWord{11, 0, 98}, + dictWord{11, 0, 524}, + dictWord{141, 0, 87}, + dictWord{132, 0, 941}, + dictWord{135, 11, 1175}, + dictWord{ + 4, + 0, + 250, + }, + dictWord{7, 0, 1612}, + dictWord{11, 0, 186}, + dictWord{12, 0, 133}, + dictWord{6, 10, 127}, + dictWord{7, 10, 1511}, + dictWord{8, 10, 613}, + dictWord{ + 12, + 10, + 495, + }, + dictWord{12, 10, 586}, + dictWord{12, 10, 660}, + dictWord{12, 10, 668}, + dictWord{14, 10, 385}, + dictWord{15, 10, 118}, + dictWord{17, 10, 20}, + dictWord{ + 146, + 10, + 98, + }, + dictWord{6, 0, 1785}, + dictWord{133, 11, 816}, + dictWord{134, 0, 1339}, + dictWord{7, 0, 961}, + dictWord{7, 0, 1085}, + dictWord{7, 0, 1727}, + dictWord{ + 8, + 0, + 462, + }, + dictWord{6, 10, 230}, + dictWord{135, 11, 1727}, + dictWord{9, 0, 636}, + dictWord{135, 10, 1954}, + dictWord{132, 0, 780}, + dictWord{5, 11, 869}, + dictWord{5, 11, 968}, + dictWord{6, 11, 1626}, + dictWord{8, 11, 734}, + dictWord{136, 11, 784}, + dictWord{4, 11, 542}, + dictWord{6, 11, 1716}, + dictWord{6, 11, 1727}, + dictWord{7, 11, 1082}, + dictWord{7, 11, 1545}, + dictWord{8, 11, 56}, + dictWord{8, 11, 118}, + dictWord{8, 11, 412}, + dictWord{8, 11, 564}, + dictWord{9, 11, 888}, + dictWord{9, 11, 908}, + dictWord{10, 11, 50}, + dictWord{10, 11, 423}, + dictWord{11, 11, 685}, + dictWord{11, 11, 697}, + dictWord{11, 11, 933}, + dictWord{12, 11, 299}, + dictWord{13, 11, 126}, + dictWord{13, 11, 136}, + dictWord{13, 11, 170}, + dictWord{141, 11, 190}, + dictWord{134, 11, 226}, + dictWord{4, 11, 232}, + dictWord{ + 9, + 11, + 202, + }, + dictWord{10, 11, 474}, + dictWord{140, 11, 433}, + dictWord{137, 11, 500}, + dictWord{5, 0, 529}, + dictWord{136, 10, 68}, + dictWord{132, 10, 654}, + dictWord{ + 4, + 10, + 156, + }, + dictWord{7, 10, 998}, + dictWord{7, 10, 1045}, + dictWord{7, 10, 1860}, + dictWord{9, 10, 48}, + dictWord{9, 10, 692}, + dictWord{11, 10, 419}, + dictWord{139, 10, 602}, + dictWord{7, 0, 1276}, + dictWord{8, 0, 474}, + dictWord{9, 0, 652}, + dictWord{6, 11, 108}, + dictWord{7, 11, 1003}, + dictWord{7, 11, 1181}, + dictWord{136, 11, 343}, + dictWord{7, 11, 1264}, + dictWord{7, 11, 1678}, + dictWord{11, 11, 945}, + dictWord{12, 11, 341}, + dictWord{12, 11, 471}, + dictWord{ + 140, + 11, + 569, + }, + dictWord{134, 11, 1712}, + dictWord{5, 0, 948}, + dictWord{12, 0, 468}, + dictWord{19, 0, 96}, + dictWord{148, 0, 24}, + dictWord{4, 11, 133}, + dictWord{ + 7, + 11, + 711, + }, + dictWord{7, 11, 1298}, + dictWord{7, 11, 1585}, + dictWord{135, 11, 1929}, + dictWord{6, 0, 753}, + dictWord{140, 0, 657}, + dictWord{139, 0, 941}, + dictWord{ + 6, + 11, + 99, + }, + dictWord{7, 11, 1808}, + dictWord{145, 11, 57}, + dictWord{6, 11, 574}, + dictWord{7, 11, 428}, + dictWord{7, 11, 1250}, + dictWord{10, 11, 669}, + dictWord{ + 11, + 11, + 485, + }, + dictWord{11, 11, 840}, + dictWord{12, 11, 300}, + dictWord{142, 11, 250}, + dictWord{4, 0, 532}, + dictWord{5, 0, 706}, + dictWord{135, 0, 662}, + dictWord{ + 5, + 0, + 837, + }, + dictWord{6, 0, 1651}, + dictWord{139, 0, 985}, + dictWord{7, 0, 1861}, + dictWord{9, 10, 197}, + dictWord{10, 10, 300}, + dictWord{12, 10, 473}, + dictWord{ + 13, + 10, + 90, + }, + dictWord{141, 10, 405}, + dictWord{137, 11, 252}, + dictWord{6, 11, 323}, + dictWord{135, 11, 1564}, + dictWord{4, 0, 330}, + dictWord{4, 0, 863}, + dictWord{7, 0, 933}, + dictWord{7, 0, 2012}, + dictWord{8, 0, 292}, + dictWord{7, 11, 461}, + dictWord{8, 11, 775}, + dictWord{138, 11, 435}, + dictWord{132, 10, 606}, + dictWord{ + 4, + 11, + 655, + }, + dictWord{7, 11, 850}, + dictWord{17, 11, 75}, + dictWord{146, 11, 137}, + dictWord{135, 0, 767}, + dictWord{7, 10, 1978}, + dictWord{136, 10, 676}, + dictWord{132, 0, 641}, + dictWord{135, 11, 1559}, + dictWord{134, 0, 1233}, + dictWord{137, 0, 242}, + dictWord{17, 0, 114}, + dictWord{4, 10, 361}, + dictWord{ + 133, + 10, + 315, + }, + dictWord{137, 0, 883}, + dictWord{132, 10, 461}, + dictWord{138, 0, 274}, + dictWord{134, 0, 2008}, + dictWord{134, 0, 1794}, + dictWord{4, 0, 703}, + dictWord{135, 0, 207}, + dictWord{12, 0, 285}, + dictWord{132, 10, 472}, + dictWord{132, 0, 571}, + dictWord{5, 0, 873}, + dictWord{5, 0, 960}, + dictWord{8, 0, 823}, + dictWord{9, 0, 881}, + dictWord{136, 11, 577}, + dictWord{7, 0, 617}, + dictWord{10, 0, 498}, + dictWord{11, 0, 501}, + dictWord{12, 0, 16}, + dictWord{140, 0, 150}, + dictWord{ + 138, + 10, + 747, + }, + dictWord{132, 0, 431}, + dictWord{133, 10, 155}, + dictWord{11, 0, 283}, + dictWord{11, 0, 567}, + dictWord{7, 10, 163}, + dictWord{8, 10, 319}, + dictWord{ + 9, + 10, + 402, + }, + dictWord{10, 10, 24}, + dictWord{10, 10, 681}, + dictWord{11, 10, 200}, + dictWord{12, 10, 253}, + dictWord{12, 10, 410}, + dictWord{142, 10, 219}, + dictWord{4, 11, 413}, + dictWord{5, 11, 677}, + dictWord{8, 11, 432}, + dictWord{140, 11, 280}, + dictWord{9, 0, 401}, + dictWord{5, 10, 475}, + dictWord{7, 10, 1780}, + dictWord{11, 10, 297}, + dictWord{11, 10, 558}, + dictWord{14, 10, 322}, + dictWord{147, 10, 76}, + dictWord{6, 0, 781}, + dictWord{9, 0, 134}, + dictWord{10, 0, 2}, + dictWord{ + 10, + 0, + 27, + }, + dictWord{10, 0, 333}, + dictWord{11, 0, 722}, + dictWord{143, 0, 1}, + dictWord{5, 0, 33}, + dictWord{6, 0, 470}, + dictWord{139, 0, 424}, + dictWord{ + 135, + 0, + 2006, + }, + dictWord{12, 0, 783}, + dictWord{135, 10, 1956}, + dictWord{136, 0, 274}, + dictWord{135, 0, 1882}, + dictWord{132, 0, 794}, + dictWord{135, 0, 1848}, + dictWord{5, 10, 944}, + dictWord{134, 10, 1769}, + dictWord{6, 0, 47}, + dictWord{7, 0, 90}, + dictWord{7, 0, 664}, + dictWord{7, 0, 830}, + dictWord{7, 0, 1380}, + dictWord{ + 7, + 0, + 2025, + }, + dictWord{8, 0, 448}, + dictWord{136, 0, 828}, + dictWord{132, 10, 144}, + dictWord{134, 0, 1199}, + dictWord{4, 11, 395}, + dictWord{139, 11, 762}, + dictWord{135, 11, 1504}, + dictWord{9, 0, 417}, + dictWord{137, 0, 493}, + dictWord{9, 11, 174}, + dictWord{10, 11, 164}, + dictWord{11, 11, 440}, + dictWord{11, 11, 841}, + dictWord{143, 11, 98}, + dictWord{134, 11, 426}, + dictWord{139, 11, 1002}, + dictWord{134, 0, 295}, + dictWord{134, 0, 816}, + dictWord{6, 10, 247}, + dictWord{ + 137, + 10, + 555, + }, + dictWord{133, 0, 1019}, + dictWord{4, 0, 620}, + dictWord{5, 11, 476}, + dictWord{10, 10, 280}, + dictWord{138, 10, 797}, + dictWord{139, 0, 464}, + dictWord{5, 11, 76}, + dictWord{6, 11, 458}, + dictWord{6, 11, 497}, + dictWord{7, 11, 764}, + dictWord{7, 11, 868}, + dictWord{9, 11, 658}, + dictWord{10, 11, 594}, + dictWord{ + 11, + 11, + 173, + }, + dictWord{11, 11, 566}, + dictWord{12, 11, 20}, + dictWord{12, 11, 338}, + dictWord{141, 11, 200}, + dictWord{134, 0, 208}, + dictWord{4, 11, 526}, + dictWord{7, 11, 1029}, + dictWord{135, 11, 1054}, + dictWord{132, 11, 636}, + dictWord{6, 11, 233}, + dictWord{7, 11, 660}, + dictWord{7, 11, 1124}, + dictWord{ + 17, + 11, + 31, + }, + dictWord{19, 11, 22}, + dictWord{151, 11, 14}, + dictWord{10, 0, 442}, + dictWord{133, 10, 428}, + dictWord{10, 0, 930}, + dictWord{140, 0, 778}, + dictWord{ + 6, + 0, + 68, + }, + dictWord{7, 0, 448}, + dictWord{7, 0, 1629}, + dictWord{7, 0, 1769}, + dictWord{7, 0, 1813}, + dictWord{8, 0, 442}, + dictWord{8, 0, 516}, + dictWord{9, 0, 710}, + dictWord{ + 10, + 0, + 282, + }, + dictWord{10, 0, 722}, + dictWord{7, 10, 1717}, + dictWord{138, 10, 546}, + dictWord{134, 0, 1128}, + dictWord{11, 0, 844}, + dictWord{12, 0, 104}, + dictWord{140, 0, 625}, + dictWord{4, 11, 432}, + dictWord{135, 11, 824}, + dictWord{138, 10, 189}, + dictWord{133, 0, 787}, + dictWord{133, 10, 99}, + dictWord{ + 4, + 11, + 279, + }, + dictWord{7, 11, 301}, + dictWord{137, 11, 362}, + dictWord{8, 0, 491}, + dictWord{4, 10, 397}, + dictWord{136, 10, 555}, + dictWord{4, 11, 178}, + dictWord{ + 133, + 11, + 399, + }, + dictWord{134, 0, 711}, + dictWord{144, 0, 9}, + dictWord{4, 0, 403}, + dictWord{5, 0, 441}, + dictWord{7, 0, 450}, + dictWord{10, 0, 840}, + dictWord{11, 0, 101}, + dictWord{12, 0, 193}, + dictWord{141, 0, 430}, + dictWord{135, 11, 1246}, + dictWord{12, 10, 398}, + dictWord{20, 10, 39}, + dictWord{21, 10, 11}, + dictWord{ + 150, + 10, + 41, + }, + dictWord{4, 10, 485}, + dictWord{7, 10, 353}, + dictWord{135, 10, 1523}, + dictWord{6, 10, 366}, + dictWord{7, 10, 1384}, + dictWord{7, 10, 1601}, + dictWord{ + 135, + 11, + 1912, + }, + dictWord{7, 0, 396}, + dictWord{10, 0, 160}, + dictWord{135, 11, 396}, + dictWord{137, 10, 282}, + dictWord{134, 11, 1692}, + dictWord{4, 10, 157}, + dictWord{5, 10, 471}, + dictWord{6, 11, 202}, + dictWord{10, 11, 448}, + dictWord{11, 11, 208}, + dictWord{12, 11, 360}, + dictWord{17, 11, 117}, + dictWord{ + 17, + 11, + 118, + }, + dictWord{18, 11, 27}, + dictWord{148, 11, 67}, + dictWord{133, 0, 679}, + dictWord{137, 0, 326}, + dictWord{136, 10, 116}, + dictWord{7, 11, 872}, + dictWord{ + 10, + 11, + 516, + }, + dictWord{139, 11, 167}, + dictWord{132, 11, 224}, + dictWord{5, 11, 546}, + dictWord{7, 11, 35}, + dictWord{8, 11, 11}, + dictWord{8, 11, 12}, + dictWord{ + 9, + 11, + 315, + }, + dictWord{9, 11, 533}, + dictWord{10, 11, 802}, + dictWord{11, 11, 166}, + dictWord{12, 11, 525}, + dictWord{142, 11, 243}, + dictWord{7, 0, 1128}, + dictWord{135, 11, 1920}, + dictWord{5, 11, 241}, + dictWord{8, 11, 242}, + dictWord{9, 11, 451}, + dictWord{10, 11, 667}, + dictWord{11, 11, 598}, + dictWord{ + 140, + 11, + 429, + }, + dictWord{6, 0, 737}, + dictWord{5, 10, 160}, + dictWord{7, 10, 363}, + dictWord{7, 10, 589}, + dictWord{10, 10, 170}, + dictWord{141, 10, 55}, + dictWord{ + 135, + 0, + 1796, + }, + dictWord{142, 11, 254}, + dictWord{4, 0, 574}, + dictWord{7, 0, 350}, + dictWord{7, 0, 1024}, + dictWord{8, 0, 338}, + dictWord{9, 0, 677}, + dictWord{138, 0, 808}, + dictWord{134, 0, 1096}, + dictWord{137, 11, 516}, + dictWord{7, 0, 405}, + dictWord{10, 0, 491}, + dictWord{4, 10, 108}, + dictWord{4, 11, 366}, + dictWord{ + 139, + 10, + 498, + }, + dictWord{11, 11, 337}, + dictWord{142, 11, 303}, + dictWord{134, 11, 1736}, + dictWord{7, 0, 1081}, + dictWord{140, 11, 364}, + dictWord{7, 10, 1005}, + dictWord{140, 10, 609}, + dictWord{7, 0, 1676}, + dictWord{4, 10, 895}, + dictWord{133, 10, 772}, + dictWord{135, 0, 2037}, + dictWord{6, 0, 1207}, + dictWord{ + 11, + 11, + 916, + }, + dictWord{142, 11, 419}, + dictWord{14, 11, 140}, + dictWord{148, 11, 41}, + dictWord{6, 11, 331}, + dictWord{136, 11, 623}, + dictWord{9, 0, 944}, + dictWord{ + 9, + 0, + 969, + }, + dictWord{9, 0, 1022}, + dictWord{12, 0, 913}, + dictWord{12, 0, 936}, + dictWord{15, 0, 177}, + dictWord{15, 0, 193}, + dictWord{4, 10, 926}, + dictWord{ + 133, + 10, + 983, + }, + dictWord{5, 0, 354}, + dictWord{135, 11, 506}, + dictWord{8, 0, 598}, + dictWord{9, 0, 664}, + dictWord{138, 0, 441}, + dictWord{4, 11, 640}, + dictWord{ + 133, + 11, + 513, + }, + dictWord{137, 0, 297}, + dictWord{132, 10, 538}, + dictWord{6, 10, 294}, + dictWord{7, 10, 1267}, + dictWord{136, 10, 624}, + dictWord{7, 0, 1772}, + dictWord{ + 7, + 11, + 1888, + }, + dictWord{8, 11, 289}, + dictWord{11, 11, 45}, + dictWord{12, 11, 278}, + dictWord{140, 11, 537}, + dictWord{135, 10, 1325}, + dictWord{138, 0, 751}, + dictWord{141, 0, 37}, + dictWord{134, 0, 1828}, + dictWord{132, 10, 757}, + dictWord{132, 11, 394}, + dictWord{6, 0, 257}, + dictWord{135, 0, 1522}, + dictWord{ + 4, + 0, + 582, + }, + dictWord{9, 0, 191}, + dictWord{135, 11, 1931}, + dictWord{7, 11, 574}, + dictWord{7, 11, 1719}, + dictWord{137, 11, 145}, + dictWord{132, 11, 658}, + dictWord{10, 0, 790}, + dictWord{132, 11, 369}, + dictWord{9, 11, 781}, + dictWord{10, 11, 144}, + dictWord{11, 11, 385}, + dictWord{13, 11, 161}, + dictWord{13, 11, 228}, + dictWord{13, 11, 268}, + dictWord{148, 11, 107}, + dictWord{8, 0, 469}, + dictWord{10, 0, 47}, + dictWord{136, 11, 374}, + dictWord{6, 0, 306}, + dictWord{7, 0, 1140}, + dictWord{7, 0, 1340}, + dictWord{8, 0, 133}, + dictWord{138, 0, 449}, + dictWord{139, 0, 1011}, + dictWord{7, 10, 1875}, + dictWord{139, 10, 124}, + dictWord{ + 4, + 11, + 344, + }, + dictWord{6, 11, 498}, + dictWord{139, 11, 323}, + dictWord{137, 0, 299}, + dictWord{132, 0, 837}, + dictWord{133, 11, 906}, + dictWord{5, 0, 329}, + dictWord{ + 8, + 0, + 260, + }, + dictWord{138, 0, 10}, + dictWord{134, 0, 1320}, + dictWord{4, 0, 657}, + dictWord{146, 0, 158}, + dictWord{135, 0, 1191}, + dictWord{152, 0, 7}, + dictWord{ + 6, + 0, + 1939, + }, + dictWord{8, 0, 974}, + dictWord{138, 0, 996}, + dictWord{135, 0, 1665}, + dictWord{11, 11, 126}, + dictWord{139, 11, 287}, + dictWord{143, 0, 8}, + dictWord{ + 14, + 11, + 149, + }, + dictWord{14, 11, 399}, + dictWord{143, 11, 57}, + dictWord{5, 0, 66}, + dictWord{7, 0, 1896}, + dictWord{136, 0, 288}, + dictWord{7, 0, 175}, + dictWord{ + 10, + 0, + 494, + }, + dictWord{5, 10, 150}, + dictWord{8, 10, 603}, + dictWord{9, 10, 593}, + dictWord{9, 10, 634}, + dictWord{10, 10, 173}, + dictWord{11, 10, 462}, + dictWord{ + 11, + 10, + 515, + }, + dictWord{13, 10, 216}, + dictWord{13, 10, 288}, + dictWord{142, 10, 400}, + dictWord{134, 0, 1643}, + dictWord{136, 11, 21}, + dictWord{4, 0, 21}, + dictWord{ + 5, + 0, + 91, + }, + dictWord{5, 0, 648}, + dictWord{5, 0, 750}, + dictWord{5, 0, 781}, + dictWord{6, 0, 54}, + dictWord{6, 0, 112}, + dictWord{6, 0, 402}, + dictWord{6, 0, 1732}, + dictWord{ + 7, + 0, + 315, + }, + dictWord{7, 0, 749}, + dictWord{7, 0, 1427}, + dictWord{7, 0, 1900}, + dictWord{9, 0, 78}, + dictWord{9, 0, 508}, + dictWord{10, 0, 611}, + dictWord{10, 0, 811}, + dictWord{11, 0, 510}, + dictWord{11, 0, 728}, + dictWord{13, 0, 36}, + dictWord{14, 0, 39}, + dictWord{16, 0, 83}, + dictWord{17, 0, 124}, + dictWord{148, 0, 30}, + dictWord{ + 4, + 0, + 668, + }, + dictWord{136, 0, 570}, + dictWord{10, 0, 322}, + dictWord{10, 0, 719}, + dictWord{139, 0, 407}, + dictWord{135, 11, 1381}, + dictWord{136, 11, 193}, + dictWord{12, 10, 108}, + dictWord{141, 10, 291}, + dictWord{132, 11, 616}, + dictWord{136, 11, 692}, + dictWord{8, 0, 125}, + dictWord{8, 0, 369}, + dictWord{8, 0, 524}, + dictWord{10, 0, 486}, + dictWord{11, 0, 13}, + dictWord{11, 0, 381}, + dictWord{11, 0, 736}, + dictWord{11, 0, 766}, + dictWord{11, 0, 845}, + dictWord{13, 0, 114}, + dictWord{ + 13, + 0, + 292, + }, + dictWord{142, 0, 47}, + dictWord{134, 0, 1247}, + dictWord{6, 0, 1684}, + dictWord{6, 0, 1731}, + dictWord{7, 0, 356}, + dictWord{8, 0, 54}, + dictWord{8, 0, 221}, + dictWord{9, 0, 225}, + dictWord{9, 0, 356}, + dictWord{10, 0, 77}, + dictWord{10, 0, 446}, + dictWord{10, 0, 731}, + dictWord{12, 0, 404}, + dictWord{141, 0, 491}, + dictWord{135, 10, 1777}, + dictWord{4, 11, 305}, + dictWord{4, 10, 493}, + dictWord{144, 10, 55}, + dictWord{4, 0, 951}, + dictWord{6, 0, 1809}, + dictWord{6, 0, 1849}, + dictWord{8, 0, 846}, + dictWord{8, 0, 866}, + dictWord{8, 0, 899}, + dictWord{10, 0, 896}, + dictWord{12, 0, 694}, + dictWord{142, 0, 468}, + dictWord{5, 11, 214}, + dictWord{ + 7, + 11, + 603, + }, + dictWord{8, 11, 611}, + dictWord{9, 11, 686}, + dictWord{10, 11, 88}, + dictWord{11, 11, 459}, + dictWord{11, 11, 496}, + dictWord{12, 11, 463}, + dictWord{ + 12, + 11, + 590, + }, + dictWord{13, 11, 0}, + dictWord{142, 11, 214}, + dictWord{132, 0, 411}, + dictWord{4, 0, 80}, + dictWord{133, 0, 44}, + dictWord{140, 11, 74}, + dictWord{ + 143, + 0, + 31, + }, + dictWord{7, 0, 669}, + dictWord{6, 10, 568}, + dictWord{7, 10, 1804}, + dictWord{8, 10, 362}, + dictWord{8, 10, 410}, + dictWord{8, 10, 830}, + dictWord{9, 10, 514}, + dictWord{11, 10, 649}, + dictWord{142, 10, 157}, + dictWord{7, 0, 673}, + dictWord{134, 11, 1703}, + dictWord{132, 10, 625}, + dictWord{134, 0, 1303}, + dictWord{ + 5, + 0, + 299, + }, + dictWord{135, 0, 1083}, + dictWord{138, 0, 704}, + dictWord{6, 0, 275}, + dictWord{7, 0, 408}, + dictWord{6, 10, 158}, + dictWord{7, 10, 129}, + dictWord{ + 7, + 10, + 181, + }, + dictWord{8, 10, 276}, + dictWord{8, 10, 377}, + dictWord{10, 10, 523}, + dictWord{11, 10, 816}, + dictWord{12, 10, 455}, + dictWord{13, 10, 303}, + dictWord{ + 142, + 10, + 135, + }, + dictWord{4, 0, 219}, + dictWord{7, 0, 367}, + dictWord{7, 0, 1713}, + dictWord{7, 0, 1761}, + dictWord{9, 0, 86}, + dictWord{9, 0, 537}, + dictWord{10, 0, 165}, + dictWord{12, 0, 219}, + dictWord{140, 0, 561}, + dictWord{8, 0, 216}, + dictWord{4, 10, 1}, + dictWord{4, 11, 737}, + dictWord{6, 11, 317}, + dictWord{7, 10, 1143}, + dictWord{ + 7, + 10, + 1463, + }, + dictWord{9, 10, 207}, + dictWord{9, 10, 390}, + dictWord{9, 10, 467}, + dictWord{10, 11, 98}, + dictWord{11, 11, 294}, + dictWord{11, 10, 836}, + dictWord{ + 12, + 11, + 60, + }, + dictWord{12, 11, 437}, + dictWord{13, 11, 64}, + dictWord{13, 11, 380}, + dictWord{142, 11, 430}, + dictWord{6, 11, 1758}, + dictWord{8, 11, 520}, + dictWord{9, 11, 345}, + dictWord{9, 11, 403}, + dictWord{142, 11, 350}, + dictWord{5, 11, 47}, + dictWord{10, 11, 242}, + dictWord{138, 11, 579}, + dictWord{5, 11, 139}, + dictWord{7, 11, 1168}, + dictWord{138, 11, 539}, + dictWord{135, 0, 1319}, + dictWord{4, 10, 295}, + dictWord{4, 10, 723}, + dictWord{5, 10, 895}, + dictWord{ + 7, + 10, + 1031, + }, + dictWord{8, 10, 199}, + dictWord{8, 10, 340}, + dictWord{9, 10, 153}, + dictWord{9, 10, 215}, + dictWord{10, 10, 21}, + dictWord{10, 10, 59}, + dictWord{ + 10, + 10, + 80, + }, + dictWord{10, 10, 224}, + dictWord{10, 10, 838}, + dictWord{11, 10, 229}, + dictWord{11, 10, 652}, + dictWord{12, 10, 192}, + dictWord{13, 10, 146}, + dictWord{ + 142, + 10, + 91, + }, + dictWord{140, 0, 428}, + dictWord{137, 10, 51}, + dictWord{133, 0, 514}, + dictWord{5, 10, 309}, + dictWord{140, 10, 211}, + dictWord{6, 0, 1010}, + dictWord{5, 10, 125}, + dictWord{8, 10, 77}, + dictWord{138, 10, 15}, + dictWord{4, 0, 55}, + dictWord{5, 0, 301}, + dictWord{6, 0, 571}, + dictWord{142, 0, 49}, + dictWord{ + 146, + 0, + 102, + }, + dictWord{136, 11, 370}, + dictWord{4, 11, 107}, + dictWord{7, 11, 613}, + dictWord{8, 11, 358}, + dictWord{8, 11, 439}, + dictWord{8, 11, 504}, + dictWord{ + 9, + 11, + 501, + }, + dictWord{10, 11, 383}, + dictWord{139, 11, 477}, + dictWord{132, 11, 229}, + dictWord{133, 0, 364}, + dictWord{133, 10, 439}, + dictWord{4, 11, 903}, + dictWord{135, 11, 1816}, + dictWord{11, 0, 379}, + dictWord{140, 10, 76}, + dictWord{4, 0, 76}, + dictWord{4, 0, 971}, + dictWord{7, 0, 1550}, + dictWord{9, 0, 306}, + dictWord{ + 9, + 0, + 430, + }, + dictWord{9, 0, 663}, + dictWord{10, 0, 683}, + dictWord{10, 0, 921}, + dictWord{11, 0, 427}, + dictWord{11, 0, 753}, + dictWord{12, 0, 334}, + dictWord{12, 0, 442}, + dictWord{14, 0, 258}, + dictWord{14, 0, 366}, + dictWord{143, 0, 131}, + dictWord{137, 0, 52}, + dictWord{4, 11, 47}, + dictWord{6, 11, 373}, + dictWord{7, 11, 452}, + dictWord{7, 11, 543}, + dictWord{7, 11, 1714}, + dictWord{7, 11, 1856}, + dictWord{9, 11, 6}, + dictWord{11, 11, 257}, + dictWord{139, 11, 391}, + dictWord{4, 10, 8}, + dictWord{ + 7, + 10, + 1152, + }, + dictWord{7, 10, 1153}, + dictWord{7, 10, 1715}, + dictWord{9, 10, 374}, + dictWord{10, 10, 478}, + dictWord{139, 10, 648}, + dictWord{4, 11, 785}, + dictWord{133, 11, 368}, + dictWord{135, 10, 1099}, + dictWord{135, 11, 860}, + dictWord{5, 11, 980}, + dictWord{134, 11, 1754}, + dictWord{134, 0, 1258}, + dictWord{ + 6, + 0, + 1058, + }, + dictWord{6, 0, 1359}, + dictWord{7, 11, 536}, + dictWord{7, 11, 1331}, + dictWord{136, 11, 143}, + dictWord{4, 0, 656}, + dictWord{135, 0, 779}, + dictWord{136, 10, 87}, + dictWord{5, 11, 19}, + dictWord{6, 11, 533}, + dictWord{146, 11, 126}, + dictWord{7, 0, 144}, + dictWord{138, 10, 438}, + dictWord{5, 11, 395}, + dictWord{5, 11, 951}, + dictWord{134, 11, 1776}, + dictWord{135, 0, 1373}, + dictWord{7, 0, 554}, + dictWord{7, 0, 605}, + dictWord{141, 0, 10}, + dictWord{4, 10, 69}, + dictWord{ + 5, + 10, + 122, + }, + dictWord{9, 10, 656}, + dictWord{138, 10, 464}, + dictWord{5, 10, 849}, + dictWord{134, 10, 1633}, + dictWord{5, 0, 838}, + dictWord{5, 0, 841}, + dictWord{134, 0, 1649}, + dictWord{133, 0, 1012}, + dictWord{139, 10, 499}, + dictWord{7, 10, 476}, + dictWord{7, 10, 1592}, + dictWord{138, 10, 87}, + dictWord{ + 6, + 0, + 251, + }, + dictWord{7, 0, 365}, + dictWord{7, 0, 1357}, + dictWord{7, 0, 1497}, + dictWord{8, 0, 154}, + dictWord{141, 0, 281}, + dictWord{132, 11, 441}, + dictWord{ + 132, + 11, + 695, + }, + dictWord{7, 11, 497}, + dictWord{9, 11, 387}, + dictWord{147, 11, 81}, + dictWord{133, 0, 340}, + dictWord{14, 10, 283}, + dictWord{142, 11, 283}, + dictWord{ + 134, + 0, + 810, + }, + dictWord{135, 11, 1894}, + dictWord{139, 0, 495}, + dictWord{5, 11, 284}, + dictWord{6, 11, 49}, + dictWord{6, 11, 350}, + dictWord{7, 11, 1}, + dictWord{ + 7, + 11, + 377, + }, + dictWord{7, 11, 1693}, + dictWord{8, 11, 18}, + dictWord{8, 11, 678}, + dictWord{9, 11, 161}, + dictWord{9, 11, 585}, + dictWord{9, 11, 671}, + dictWord{ + 9, + 11, + 839, + }, + dictWord{11, 11, 912}, + dictWord{141, 11, 427}, + dictWord{5, 10, 859}, + dictWord{7, 10, 1160}, + dictWord{8, 10, 107}, + dictWord{9, 10, 291}, + dictWord{ + 9, + 10, + 439, + }, + dictWord{10, 10, 663}, + dictWord{11, 10, 609}, + dictWord{140, 10, 197}, + dictWord{8, 0, 261}, + dictWord{9, 0, 144}, + dictWord{9, 0, 466}, + dictWord{ + 10, + 0, + 370, + }, + dictWord{12, 0, 470}, + dictWord{13, 0, 144}, + dictWord{142, 0, 348}, + dictWord{137, 0, 897}, + dictWord{6, 0, 248}, + dictWord{9, 0, 546}, + dictWord{10, 0, 535}, + dictWord{11, 0, 681}, + dictWord{141, 0, 135}, + dictWord{4, 0, 358}, + dictWord{135, 0, 1496}, + dictWord{134, 0, 567}, + dictWord{136, 0, 445}, + dictWord{ + 4, + 10, + 117, + }, + dictWord{6, 10, 372}, + dictWord{7, 10, 1905}, + dictWord{142, 10, 323}, + dictWord{4, 10, 722}, + dictWord{139, 10, 471}, + dictWord{6, 0, 697}, + dictWord{ + 134, + 0, + 996, + }, + dictWord{7, 11, 2007}, + dictWord{9, 11, 101}, + dictWord{9, 11, 450}, + dictWord{10, 11, 66}, + dictWord{10, 11, 842}, + dictWord{11, 11, 536}, + dictWord{ + 140, + 11, + 587, + }, + dictWord{132, 0, 577}, + dictWord{134, 0, 1336}, + dictWord{9, 10, 5}, + dictWord{12, 10, 216}, + dictWord{12, 10, 294}, + dictWord{12, 10, 298}, + dictWord{12, 10, 400}, + dictWord{12, 10, 518}, + dictWord{13, 10, 229}, + dictWord{143, 10, 139}, + dictWord{6, 0, 174}, + dictWord{138, 0, 917}, + dictWord{ + 134, + 10, + 1774, + }, + dictWord{5, 10, 12}, + dictWord{7, 10, 375}, + dictWord{9, 10, 88}, + dictWord{9, 10, 438}, + dictWord{11, 11, 62}, + dictWord{139, 10, 270}, + dictWord{ + 134, + 11, + 1766, + }, + dictWord{6, 11, 0}, + dictWord{7, 11, 84}, + dictWord{7, 10, 816}, + dictWord{7, 10, 1241}, + dictWord{9, 10, 283}, + dictWord{9, 10, 520}, + dictWord{10, 10, 213}, + dictWord{10, 10, 307}, + dictWord{10, 10, 463}, + dictWord{10, 10, 671}, + dictWord{10, 10, 746}, + dictWord{11, 10, 401}, + dictWord{11, 10, 794}, + dictWord{ + 11, + 11, + 895, + }, + dictWord{12, 10, 517}, + dictWord{17, 11, 11}, + dictWord{18, 10, 107}, + dictWord{147, 10, 115}, + dictWord{5, 0, 878}, + dictWord{133, 0, 972}, + dictWord{ + 6, + 11, + 1665, + }, + dictWord{7, 11, 256}, + dictWord{7, 11, 1388}, + dictWord{138, 11, 499}, + dictWord{4, 10, 258}, + dictWord{136, 10, 639}, + dictWord{4, 11, 22}, + dictWord{5, 11, 10}, + dictWord{6, 10, 22}, + dictWord{7, 11, 848}, + dictWord{7, 10, 903}, + dictWord{7, 10, 1963}, + dictWord{8, 11, 97}, + dictWord{138, 10, 577}, + dictWord{ + 5, + 10, + 681, + }, + dictWord{136, 10, 782}, + dictWord{133, 11, 481}, + dictWord{132, 0, 351}, + dictWord{4, 10, 664}, + dictWord{5, 10, 804}, + dictWord{139, 10, 1013}, + dictWord{6, 11, 134}, + dictWord{7, 11, 437}, + dictWord{7, 11, 959}, + dictWord{9, 11, 37}, + dictWord{14, 11, 285}, + dictWord{14, 11, 371}, + dictWord{144, 11, 60}, + dictWord{7, 11, 486}, + dictWord{8, 11, 155}, + dictWord{11, 11, 93}, + dictWord{140, 11, 164}, + dictWord{132, 0, 286}, + dictWord{7, 0, 438}, + dictWord{7, 0, 627}, + dictWord{7, 0, 1516}, + dictWord{8, 0, 40}, + dictWord{9, 0, 56}, + dictWord{9, 0, 294}, + dictWord{10, 0, 30}, + dictWord{11, 0, 969}, + dictWord{11, 0, 995}, + dictWord{146, 0, 148}, + dictWord{5, 11, 591}, + dictWord{135, 11, 337}, + dictWord{134, 0, 1950}, + dictWord{133, 10, 32}, + dictWord{138, 11, 500}, + dictWord{5, 11, 380}, + dictWord{ + 5, + 11, + 650, + }, + dictWord{136, 11, 310}, + dictWord{4, 11, 364}, + dictWord{7, 11, 1156}, + dictWord{7, 11, 1187}, + dictWord{137, 11, 409}, + dictWord{4, 0, 738}, + dictWord{134, 11, 482}, + dictWord{4, 11, 781}, + dictWord{6, 11, 487}, + dictWord{7, 11, 926}, + dictWord{8, 11, 263}, + dictWord{139, 11, 500}, + dictWord{135, 11, 418}, + dictWord{6, 0, 2047}, + dictWord{10, 0, 969}, + dictWord{4, 10, 289}, + dictWord{7, 10, 629}, + dictWord{7, 10, 1698}, + dictWord{7, 10, 1711}, + dictWord{ + 140, + 10, + 215, + }, + dictWord{6, 10, 450}, + dictWord{136, 10, 109}, + dictWord{134, 0, 818}, + dictWord{136, 10, 705}, + dictWord{133, 0, 866}, + dictWord{4, 11, 94}, + dictWord{ + 135, + 11, + 1265, + }, + dictWord{132, 11, 417}, + dictWord{134, 0, 1467}, + dictWord{135, 10, 1238}, + dictWord{4, 0, 972}, + dictWord{6, 0, 1851}, + dictWord{ + 134, + 0, + 1857, + }, + dictWord{134, 0, 355}, + dictWord{133, 0, 116}, + dictWord{132, 0, 457}, + dictWord{135, 11, 1411}, + dictWord{4, 11, 408}, + dictWord{4, 11, 741}, + dictWord{135, 11, 500}, + dictWord{134, 10, 26}, + dictWord{142, 11, 137}, + dictWord{5, 0, 527}, + dictWord{6, 0, 189}, + dictWord{7, 0, 859}, + dictWord{136, 0, 267}, + dictWord{11, 0, 104}, + dictWord{11, 0, 554}, + dictWord{15, 0, 60}, + dictWord{143, 0, 125}, + dictWord{134, 0, 1613}, + dictWord{4, 10, 414}, + dictWord{5, 10, 467}, + dictWord{ + 9, + 10, + 654, + }, + dictWord{10, 10, 451}, + dictWord{12, 10, 59}, + dictWord{141, 10, 375}, + dictWord{135, 10, 17}, + dictWord{134, 0, 116}, + dictWord{135, 11, 541}, + dictWord{135, 10, 955}, + dictWord{6, 11, 73}, + dictWord{135, 11, 177}, + dictWord{133, 11, 576}, + dictWord{134, 0, 886}, + dictWord{133, 0, 487}, + dictWord{ + 4, + 0, + 86, + }, + dictWord{5, 0, 667}, + dictWord{5, 0, 753}, + dictWord{6, 0, 316}, + dictWord{6, 0, 455}, + dictWord{135, 0, 946}, + dictWord{142, 11, 231}, + dictWord{150, 0, 45}, + dictWord{134, 0, 863}, + dictWord{134, 0, 1953}, + dictWord{6, 10, 280}, + dictWord{10, 10, 502}, + dictWord{11, 10, 344}, + dictWord{140, 10, 38}, + dictWord{4, 0, 79}, + dictWord{7, 0, 1773}, + dictWord{10, 0, 450}, + dictWord{11, 0, 589}, + dictWord{13, 0, 332}, + dictWord{13, 0, 493}, + dictWord{14, 0, 183}, + dictWord{14, 0, 334}, + dictWord{14, 0, 362}, + dictWord{14, 0, 368}, + dictWord{14, 0, 376}, + dictWord{14, 0, 379}, + dictWord{19, 0, 90}, + dictWord{19, 0, 103}, + dictWord{19, 0, 127}, + dictWord{ + 148, + 0, + 90, + }, + dictWord{5, 10, 45}, + dictWord{7, 10, 1161}, + dictWord{11, 10, 448}, + dictWord{11, 10, 880}, + dictWord{13, 10, 139}, + dictWord{13, 10, 407}, + dictWord{ + 15, + 10, + 16, + }, + dictWord{17, 10, 95}, + dictWord{18, 10, 66}, + dictWord{18, 10, 88}, + dictWord{18, 10, 123}, + dictWord{149, 10, 7}, + dictWord{136, 10, 777}, + dictWord{ + 4, + 10, + 410, + }, + dictWord{135, 10, 521}, + dictWord{135, 10, 1778}, + dictWord{135, 11, 538}, + dictWord{142, 0, 381}, + dictWord{133, 11, 413}, + dictWord{ + 134, + 0, + 1142, + }, + dictWord{6, 0, 1189}, + dictWord{136, 11, 495}, + dictWord{5, 0, 663}, + dictWord{6, 0, 1962}, + dictWord{134, 0, 2003}, + dictWord{7, 11, 54}, + dictWord{ + 8, + 11, + 312, + }, + dictWord{10, 11, 191}, + dictWord{10, 11, 614}, + dictWord{140, 11, 567}, + dictWord{132, 10, 436}, + dictWord{133, 0, 846}, + dictWord{10, 0, 528}, + dictWord{11, 0, 504}, + dictWord{7, 10, 1587}, + dictWord{135, 10, 1707}, + dictWord{5, 0, 378}, + dictWord{8, 0, 465}, + dictWord{9, 0, 286}, + dictWord{10, 0, 185}, + dictWord{ + 10, + 0, + 562, + }, + dictWord{10, 0, 635}, + dictWord{11, 0, 31}, + dictWord{11, 0, 393}, + dictWord{13, 0, 312}, + dictWord{18, 0, 65}, + dictWord{18, 0, 96}, + dictWord{147, 0, 89}, + dictWord{7, 0, 899}, + dictWord{14, 0, 325}, + dictWord{6, 11, 468}, + dictWord{7, 11, 567}, + dictWord{7, 11, 1478}, + dictWord{8, 11, 530}, + dictWord{142, 11, 290}, + dictWord{7, 0, 1880}, + dictWord{9, 0, 680}, + dictWord{139, 0, 798}, + dictWord{134, 0, 1770}, + dictWord{132, 0, 648}, + dictWord{150, 11, 35}, + dictWord{5, 0, 945}, + dictWord{6, 0, 1656}, + dictWord{6, 0, 1787}, + dictWord{7, 0, 167}, + dictWord{8, 0, 824}, + dictWord{9, 0, 391}, + dictWord{10, 0, 375}, + dictWord{139, 0, 185}, + dictWord{ + 6, + 11, + 484, + }, + dictWord{135, 11, 822}, + dictWord{134, 0, 2046}, + dictWord{7, 0, 1645}, + dictWord{8, 0, 352}, + dictWord{137, 0, 249}, + dictWord{132, 0, 152}, + dictWord{6, 0, 611}, + dictWord{135, 0, 1733}, + dictWord{6, 11, 1724}, + dictWord{135, 11, 2022}, + dictWord{133, 0, 1006}, + dictWord{141, 11, 96}, + dictWord{ + 5, + 0, + 420, + }, + dictWord{135, 0, 1449}, + dictWord{146, 11, 149}, + dictWord{135, 0, 832}, + dictWord{135, 10, 663}, + dictWord{133, 0, 351}, + dictWord{5, 0, 40}, + dictWord{ + 7, + 0, + 598, + }, + dictWord{7, 0, 1638}, + dictWord{8, 0, 78}, + dictWord{9, 0, 166}, + dictWord{9, 0, 640}, + dictWord{9, 0, 685}, + dictWord{9, 0, 773}, + dictWord{11, 0, 215}, + dictWord{13, 0, 65}, + dictWord{14, 0, 172}, + dictWord{14, 0, 317}, + dictWord{145, 0, 6}, + dictWord{8, 0, 60}, + dictWord{9, 0, 343}, + dictWord{139, 0, 769}, + dictWord{ + 134, + 0, + 1354, + }, + dictWord{132, 0, 724}, + dictWord{137, 0, 745}, + dictWord{132, 11, 474}, + dictWord{7, 0, 1951}, + dictWord{8, 0, 765}, + dictWord{8, 0, 772}, + dictWord{ + 140, + 0, + 671, + }, + dictWord{7, 0, 108}, + dictWord{8, 0, 219}, + dictWord{8, 0, 388}, + dictWord{9, 0, 775}, + dictWord{11, 0, 275}, + dictWord{140, 0, 464}, + dictWord{137, 0, 639}, + dictWord{135, 10, 503}, + dictWord{133, 11, 366}, + dictWord{5, 0, 15}, + dictWord{6, 0, 56}, + dictWord{7, 0, 1758}, + dictWord{8, 0, 500}, + dictWord{9, 0, 730}, + dictWord{ + 11, + 0, + 331, + }, + dictWord{13, 0, 150}, + dictWord{14, 0, 282}, + dictWord{5, 11, 305}, + dictWord{9, 11, 560}, + dictWord{141, 11, 208}, + dictWord{4, 10, 113}, + dictWord{ + 5, + 10, + 163, + }, + dictWord{5, 10, 735}, + dictWord{7, 10, 1009}, + dictWord{9, 10, 9}, + dictWord{9, 10, 771}, + dictWord{12, 10, 90}, + dictWord{13, 10, 138}, + dictWord{ + 13, + 10, + 410, + }, + dictWord{143, 10, 128}, + dictWord{4, 10, 324}, + dictWord{138, 10, 104}, + dictWord{135, 11, 466}, + dictWord{142, 11, 27}, + dictWord{134, 0, 1886}, + dictWord{5, 0, 205}, + dictWord{6, 0, 438}, + dictWord{9, 0, 711}, + dictWord{4, 11, 480}, + dictWord{6, 11, 167}, + dictWord{6, 11, 302}, + dictWord{6, 11, 1642}, + dictWord{ + 7, + 11, + 130, + }, + dictWord{7, 11, 656}, + dictWord{7, 11, 837}, + dictWord{7, 11, 1547}, + dictWord{7, 11, 1657}, + dictWord{8, 11, 429}, + dictWord{9, 11, 228}, + dictWord{ + 10, + 11, + 643, + }, + dictWord{13, 11, 289}, + dictWord{13, 11, 343}, + dictWord{147, 11, 101}, + dictWord{134, 0, 865}, + dictWord{6, 0, 2025}, + dictWord{136, 0, 965}, + dictWord{ + 7, + 11, + 278, + }, + dictWord{10, 11, 739}, + dictWord{11, 11, 708}, + dictWord{141, 11, 348}, + dictWord{133, 0, 534}, + dictWord{135, 11, 1922}, + dictWord{ + 137, + 0, + 691, + }, + dictWord{4, 10, 935}, + dictWord{133, 10, 823}, + dictWord{6, 0, 443}, + dictWord{9, 0, 237}, + dictWord{9, 0, 571}, + dictWord{9, 0, 695}, + dictWord{10, 0, 139}, + dictWord{11, 0, 715}, + dictWord{12, 0, 417}, + dictWord{141, 0, 421}, + dictWord{5, 10, 269}, + dictWord{7, 10, 434}, + dictWord{7, 10, 891}, + dictWord{8, 10, 339}, + dictWord{ + 9, + 10, + 702, + }, + dictWord{11, 10, 594}, + dictWord{11, 10, 718}, + dictWord{145, 10, 100}, + dictWord{6, 0, 1555}, + dictWord{7, 0, 878}, + dictWord{9, 10, 485}, + dictWord{141, 10, 264}, + dictWord{134, 10, 1713}, + dictWord{7, 10, 1810}, + dictWord{11, 10, 866}, + dictWord{12, 10, 103}, + dictWord{141, 10, 495}, + dictWord{ + 135, + 10, + 900, + }, + dictWord{6, 0, 1410}, + dictWord{9, 11, 316}, + dictWord{139, 11, 256}, + dictWord{4, 0, 995}, + dictWord{135, 0, 1033}, + dictWord{132, 0, 578}, + dictWord{10, 0, 881}, + dictWord{12, 0, 740}, + dictWord{12, 0, 743}, + dictWord{140, 0, 759}, + dictWord{132, 0, 822}, + dictWord{133, 0, 923}, + dictWord{142, 10, 143}, + dictWord{135, 11, 1696}, + dictWord{6, 11, 363}, + dictWord{7, 11, 1955}, + dictWord{136, 11, 725}, + dictWord{132, 0, 924}, + dictWord{133, 0, 665}, + dictWord{ + 135, + 10, + 2029, + }, + dictWord{135, 0, 1901}, + dictWord{4, 0, 265}, + dictWord{6, 0, 1092}, + dictWord{6, 0, 1417}, + dictWord{7, 0, 807}, + dictWord{135, 0, 950}, + dictWord{ + 5, + 0, + 93, + }, + dictWord{12, 0, 267}, + dictWord{141, 0, 498}, + dictWord{135, 0, 1451}, + dictWord{5, 11, 813}, + dictWord{135, 11, 2046}, + dictWord{5, 10, 625}, + dictWord{135, 10, 1617}, + dictWord{135, 0, 747}, + dictWord{6, 0, 788}, + dictWord{137, 0, 828}, + dictWord{7, 0, 184}, + dictWord{11, 0, 307}, + dictWord{11, 0, 400}, + dictWord{15, 0, 130}, + dictWord{5, 11, 712}, + dictWord{7, 11, 1855}, + dictWord{8, 10, 425}, + dictWord{8, 10, 693}, + dictWord{9, 10, 720}, + dictWord{10, 10, 380}, + dictWord{10, 10, 638}, + dictWord{11, 11, 17}, + dictWord{11, 10, 473}, + dictWord{12, 10, 61}, + dictWord{13, 11, 321}, + dictWord{144, 11, 67}, + dictWord{135, 0, 198}, + dictWord{6, 11, 320}, + dictWord{7, 11, 781}, + dictWord{7, 11, 1921}, + dictWord{9, 11, 55}, + dictWord{10, 11, 186}, + dictWord{10, 11, 273}, + dictWord{10, 11, 664}, + dictWord{10, 11, 801}, + dictWord{11, 11, 996}, + dictWord{11, 11, 997}, + dictWord{13, 11, 157}, + dictWord{142, 11, 170}, + dictWord{136, 11, 271}, + dictWord{ + 135, + 0, + 994, + }, + dictWord{7, 11, 103}, + dictWord{7, 11, 863}, + dictWord{11, 11, 184}, + dictWord{14, 11, 299}, + dictWord{145, 11, 62}, + dictWord{11, 10, 551}, + dictWord{142, 10, 159}, + dictWord{5, 0, 233}, + dictWord{5, 0, 320}, + dictWord{6, 0, 140}, + dictWord{8, 0, 295}, + dictWord{8, 0, 615}, + dictWord{136, 11, 615}, + dictWord{ + 133, + 0, + 978, + }, + dictWord{4, 0, 905}, + dictWord{6, 0, 1701}, + dictWord{137, 0, 843}, + dictWord{132, 10, 168}, + dictWord{4, 0, 974}, + dictWord{8, 0, 850}, + dictWord{ + 12, + 0, + 709, + }, + dictWord{12, 0, 768}, + dictWord{140, 0, 786}, + dictWord{135, 10, 91}, + dictWord{152, 0, 6}, + dictWord{138, 10, 532}, + dictWord{135, 10, 1884}, + dictWord{132, 0, 509}, + dictWord{6, 0, 1307}, + dictWord{135, 0, 273}, + dictWord{5, 11, 77}, + dictWord{7, 11, 1455}, + dictWord{10, 11, 843}, + dictWord{19, 11, 73}, + dictWord{150, 11, 5}, + dictWord{132, 11, 458}, + dictWord{135, 11, 1420}, + dictWord{6, 11, 109}, + dictWord{138, 11, 382}, + dictWord{6, 0, 201}, + dictWord{6, 11, 330}, + dictWord{7, 10, 70}, + dictWord{7, 11, 1084}, + dictWord{10, 10, 240}, + dictWord{11, 11, 142}, + dictWord{147, 10, 93}, + dictWord{7, 0, 1041}, + dictWord{ + 140, + 11, + 328, + }, + dictWord{133, 11, 354}, + dictWord{134, 0, 1040}, + dictWord{133, 0, 693}, + dictWord{134, 0, 774}, + dictWord{139, 0, 234}, + dictWord{132, 0, 336}, + dictWord{7, 0, 1399}, + dictWord{139, 10, 392}, + dictWord{20, 0, 22}, + dictWord{148, 11, 22}, + dictWord{5, 0, 802}, + dictWord{7, 0, 2021}, + dictWord{136, 0, 805}, + dictWord{ + 5, + 0, + 167, + }, + dictWord{5, 0, 899}, + dictWord{6, 0, 410}, + dictWord{137, 0, 777}, + dictWord{137, 0, 789}, + dictWord{134, 0, 1705}, + dictWord{7, 10, 655}, + dictWord{ + 135, + 10, + 1844, + }, + dictWord{4, 10, 145}, + dictWord{6, 10, 176}, + dictWord{7, 10, 395}, + dictWord{137, 10, 562}, + dictWord{132, 10, 501}, + dictWord{135, 0, 10}, + dictWord{5, 0, 11}, + dictWord{6, 0, 117}, + dictWord{6, 0, 485}, + dictWord{7, 0, 1133}, + dictWord{9, 0, 582}, + dictWord{9, 0, 594}, + dictWord{10, 0, 82}, + dictWord{11, 0, 21}, + dictWord{11, 0, 818}, + dictWord{12, 0, 535}, + dictWord{13, 0, 86}, + dictWord{20, 0, 91}, + dictWord{23, 0, 13}, + dictWord{134, 10, 509}, + dictWord{4, 0, 264}, + dictWord{ + 7, + 0, + 1067, + }, + dictWord{8, 0, 204}, + dictWord{8, 0, 385}, + dictWord{139, 0, 953}, + dictWord{139, 11, 737}, + dictWord{138, 0, 56}, + dictWord{134, 0, 1917}, + dictWord{ + 133, + 0, + 470, + }, + dictWord{10, 11, 657}, + dictWord{14, 11, 297}, + dictWord{142, 11, 361}, + dictWord{135, 11, 412}, + dictWord{7, 0, 1198}, + dictWord{7, 11, 1198}, + dictWord{8, 11, 556}, + dictWord{14, 11, 123}, + dictWord{14, 11, 192}, + dictWord{143, 11, 27}, + dictWord{7, 11, 1985}, + dictWord{14, 11, 146}, + dictWord{15, 11, 42}, + dictWord{16, 11, 23}, + dictWord{17, 11, 86}, + dictWord{146, 11, 17}, + dictWord{11, 0, 1015}, + dictWord{136, 11, 122}, + dictWord{4, 10, 114}, + dictWord{ + 9, + 10, + 492, + }, + dictWord{13, 10, 462}, + dictWord{142, 10, 215}, + dictWord{4, 10, 77}, + dictWord{5, 10, 361}, + dictWord{6, 10, 139}, + dictWord{6, 10, 401}, + dictWord{ + 6, + 10, + 404, + }, + dictWord{7, 10, 413}, + dictWord{7, 10, 715}, + dictWord{7, 10, 1716}, + dictWord{11, 10, 279}, + dictWord{12, 10, 179}, + dictWord{12, 10, 258}, + dictWord{ + 13, + 10, + 244, + }, + dictWord{142, 10, 358}, + dictWord{134, 10, 1717}, + dictWord{7, 10, 1061}, + dictWord{8, 10, 82}, + dictWord{11, 10, 250}, + dictWord{12, 10, 420}, + dictWord{141, 10, 184}, + dictWord{133, 0, 715}, + dictWord{135, 10, 724}, + dictWord{9, 0, 919}, + dictWord{9, 0, 922}, + dictWord{9, 0, 927}, + dictWord{9, 0, 933}, + dictWord{9, 0, 962}, + dictWord{9, 0, 1000}, + dictWord{9, 0, 1002}, + dictWord{9, 0, 1021}, + dictWord{12, 0, 890}, + dictWord{12, 0, 907}, + dictWord{12, 0, 930}, + dictWord{ + 15, + 0, + 207, + }, + dictWord{15, 0, 228}, + dictWord{15, 0, 238}, + dictWord{149, 0, 61}, + dictWord{8, 0, 794}, + dictWord{9, 0, 400}, + dictWord{10, 0, 298}, + dictWord{142, 0, 228}, + dictWord{5, 11, 430}, + dictWord{5, 11, 932}, + dictWord{6, 11, 131}, + dictWord{7, 11, 417}, + dictWord{9, 11, 522}, + dictWord{11, 11, 314}, + dictWord{141, 11, 390}, + dictWord{132, 0, 867}, + dictWord{8, 0, 724}, + dictWord{132, 11, 507}, + dictWord{137, 11, 261}, + dictWord{4, 11, 343}, + dictWord{133, 11, 511}, + dictWord{ + 6, + 0, + 190, + }, + dictWord{7, 0, 768}, + dictWord{135, 0, 1170}, + dictWord{6, 10, 513}, + dictWord{135, 10, 1052}, + dictWord{7, 11, 455}, + dictWord{138, 11, 591}, + dictWord{134, 0, 1066}, + dictWord{137, 10, 899}, + dictWord{14, 0, 67}, + dictWord{147, 0, 60}, + dictWord{4, 0, 948}, + dictWord{18, 0, 174}, + dictWord{146, 0, 176}, + dictWord{135, 0, 1023}, + dictWord{7, 10, 1417}, + dictWord{12, 10, 382}, + dictWord{17, 10, 48}, + dictWord{152, 10, 12}, + dictWord{134, 11, 575}, + dictWord{ + 132, + 0, + 764, + }, + dictWord{6, 10, 545}, + dictWord{7, 10, 565}, + dictWord{7, 10, 1669}, + dictWord{10, 10, 114}, + dictWord{11, 10, 642}, + dictWord{140, 10, 618}, + dictWord{ + 6, + 0, + 137, + }, + dictWord{9, 0, 75}, + dictWord{9, 0, 253}, + dictWord{10, 0, 194}, + dictWord{138, 0, 444}, + dictWord{4, 0, 756}, + dictWord{133, 10, 5}, + dictWord{8, 0, 1008}, + dictWord{135, 10, 192}, + dictWord{132, 0, 842}, + dictWord{11, 0, 643}, + dictWord{12, 0, 115}, + dictWord{136, 10, 763}, + dictWord{139, 0, 67}, + dictWord{ + 133, + 10, + 759, + }, + dictWord{4, 0, 821}, + dictWord{5, 0, 760}, + dictWord{7, 0, 542}, + dictWord{8, 0, 135}, + dictWord{8, 0, 496}, + dictWord{135, 11, 580}, + dictWord{7, 10, 370}, + dictWord{7, 10, 1007}, + dictWord{7, 10, 1177}, + dictWord{135, 10, 1565}, + dictWord{135, 10, 1237}, + dictWord{140, 0, 736}, + dictWord{7, 0, 319}, + dictWord{ + 7, + 0, + 355, + }, + dictWord{7, 0, 763}, + dictWord{10, 0, 389}, + dictWord{145, 0, 43}, + dictWord{8, 11, 333}, + dictWord{138, 11, 182}, + dictWord{4, 10, 87}, + dictWord{5, 10, 250}, + dictWord{141, 10, 298}, + dictWord{138, 0, 786}, + dictWord{134, 0, 2044}, + dictWord{8, 11, 330}, + dictWord{140, 11, 477}, + dictWord{135, 11, 1338}, + dictWord{132, 11, 125}, + dictWord{134, 0, 1030}, + dictWord{134, 0, 1083}, + dictWord{132, 11, 721}, + dictWord{135, 10, 814}, + dictWord{7, 11, 776}, + dictWord{ + 8, + 11, + 145, + }, + dictWord{147, 11, 56}, + dictWord{134, 0, 1226}, + dictWord{4, 10, 57}, + dictWord{7, 10, 1195}, + dictWord{7, 10, 1438}, + dictWord{7, 10, 1548}, + dictWord{ + 7, + 10, + 1835, + }, + dictWord{7, 10, 1904}, + dictWord{9, 10, 757}, + dictWord{10, 10, 604}, + dictWord{139, 10, 519}, + dictWord{7, 11, 792}, + dictWord{8, 11, 147}, + dictWord{10, 11, 821}, + dictWord{139, 11, 1021}, + dictWord{137, 11, 797}, + dictWord{4, 0, 58}, + dictWord{5, 0, 286}, + dictWord{6, 0, 319}, + dictWord{7, 0, 402}, + dictWord{ + 7, + 0, + 1254, + }, + dictWord{7, 0, 1903}, + dictWord{8, 0, 356}, + dictWord{140, 0, 408}, + dictWord{4, 0, 389}, + dictWord{4, 0, 815}, + dictWord{9, 0, 181}, + dictWord{9, 0, 255}, + dictWord{10, 0, 8}, + dictWord{10, 0, 29}, + dictWord{10, 0, 816}, + dictWord{11, 0, 311}, + dictWord{11, 0, 561}, + dictWord{12, 0, 67}, + dictWord{141, 0, 181}, + dictWord{ + 7, + 11, + 1472, + }, + dictWord{135, 11, 1554}, + dictWord{7, 11, 1071}, + dictWord{7, 11, 1541}, + dictWord{7, 11, 1767}, + dictWord{7, 11, 1806}, + dictWord{7, 11, 1999}, + dictWord{9, 11, 248}, + dictWord{10, 11, 400}, + dictWord{11, 11, 162}, + dictWord{11, 11, 178}, + dictWord{11, 11, 242}, + dictWord{12, 11, 605}, + dictWord{ + 15, + 11, + 26, + }, + dictWord{144, 11, 44}, + dictWord{5, 11, 168}, + dictWord{5, 11, 930}, + dictWord{8, 11, 74}, + dictWord{9, 11, 623}, + dictWord{12, 11, 500}, + dictWord{ + 12, + 11, + 579, + }, + dictWord{13, 11, 41}, + dictWord{143, 11, 93}, + dictWord{6, 11, 220}, + dictWord{7, 11, 1101}, + dictWord{141, 11, 105}, + dictWord{5, 0, 474}, + dictWord{ + 7, + 0, + 507, + }, + dictWord{4, 10, 209}, + dictWord{7, 11, 507}, + dictWord{135, 10, 902}, + dictWord{132, 0, 427}, + dictWord{6, 0, 413}, + dictWord{7, 10, 335}, + dictWord{ + 7, + 10, + 1437, + }, + dictWord{7, 10, 1668}, + dictWord{8, 10, 553}, + dictWord{8, 10, 652}, + dictWord{8, 10, 656}, + dictWord{9, 10, 558}, + dictWord{11, 10, 743}, + dictWord{ + 149, + 10, + 18, + }, + dictWord{132, 0, 730}, + dictWord{6, 11, 19}, + dictWord{7, 11, 1413}, + dictWord{139, 11, 428}, + dictWord{133, 0, 373}, + dictWord{132, 10, 559}, + dictWord{7, 11, 96}, + dictWord{8, 11, 401}, + dictWord{137, 11, 896}, + dictWord{7, 0, 799}, + dictWord{7, 0, 1972}, + dictWord{5, 10, 1017}, + dictWord{138, 10, 511}, + dictWord{135, 0, 1793}, + dictWord{7, 11, 1961}, + dictWord{7, 11, 1965}, + dictWord{8, 11, 702}, + dictWord{136, 11, 750}, + dictWord{8, 11, 150}, + dictWord{8, 11, 737}, + dictWord{140, 11, 366}, + dictWord{132, 0, 322}, + dictWord{133, 10, 709}, + dictWord{8, 11, 800}, + dictWord{9, 11, 148}, + dictWord{9, 11, 872}, + dictWord{ + 9, + 11, + 890, + }, + dictWord{11, 11, 309}, + dictWord{11, 11, 1001}, + dictWord{13, 11, 267}, + dictWord{141, 11, 323}, + dictWord{134, 10, 1745}, + dictWord{7, 0, 290}, + dictWord{136, 10, 206}, + dictWord{7, 0, 1651}, + dictWord{145, 0, 89}, + dictWord{139, 0, 2}, + dictWord{132, 0, 672}, + dictWord{6, 0, 1860}, + dictWord{8, 0, 905}, + dictWord{ + 10, + 0, + 844, + }, + dictWord{10, 0, 846}, + dictWord{10, 0, 858}, + dictWord{12, 0, 699}, + dictWord{12, 0, 746}, + dictWord{140, 0, 772}, + dictWord{135, 11, 424}, + dictWord{133, 11, 547}, + dictWord{133, 0, 737}, + dictWord{5, 11, 490}, + dictWord{6, 11, 615}, + dictWord{6, 11, 620}, + dictWord{135, 11, 683}, + dictWord{6, 0, 746}, + dictWord{134, 0, 1612}, + dictWord{132, 10, 776}, + dictWord{9, 11, 385}, + dictWord{149, 11, 17}, + dictWord{133, 0, 145}, + dictWord{135, 10, 1272}, + dictWord{ + 7, + 0, + 884, + }, + dictWord{140, 0, 124}, + dictWord{4, 0, 387}, + dictWord{135, 0, 1288}, + dictWord{5, 11, 133}, + dictWord{136, 10, 406}, + dictWord{136, 11, 187}, + dictWord{ + 6, + 0, + 679, + }, + dictWord{8, 11, 8}, + dictWord{138, 11, 0}, + dictWord{135, 0, 550}, + dictWord{135, 11, 798}, + dictWord{136, 11, 685}, + dictWord{7, 11, 1086}, + dictWord{145, 11, 46}, + dictWord{8, 10, 175}, + dictWord{10, 10, 168}, + dictWord{138, 10, 573}, + dictWord{135, 0, 1305}, + dictWord{4, 0, 576}, + dictWord{ + 135, + 0, + 1263, + }, + dictWord{6, 0, 686}, + dictWord{134, 0, 1563}, + dictWord{134, 0, 607}, + dictWord{5, 0, 919}, + dictWord{134, 0, 1673}, + dictWord{148, 0, 37}, + dictWord{ + 8, + 11, + 774, + }, + dictWord{10, 11, 670}, + dictWord{140, 11, 51}, + dictWord{133, 10, 784}, + dictWord{139, 10, 882}, + dictWord{4, 0, 82}, + dictWord{5, 0, 333}, + dictWord{ + 5, + 0, + 904, + }, + dictWord{6, 0, 207}, + dictWord{7, 0, 325}, + dictWord{7, 0, 1726}, + dictWord{8, 0, 101}, + dictWord{10, 0, 778}, + dictWord{139, 0, 220}, + dictWord{135, 11, 371}, + dictWord{132, 0, 958}, + dictWord{133, 0, 903}, + dictWord{4, 11, 127}, + dictWord{5, 11, 350}, + dictWord{6, 11, 356}, + dictWord{8, 11, 426}, + dictWord{9, 11, 572}, + dictWord{10, 11, 247}, + dictWord{139, 11, 312}, + dictWord{140, 0, 147}, + dictWord{6, 11, 59}, + dictWord{7, 11, 885}, + dictWord{9, 11, 603}, + dictWord{ + 141, + 11, + 397, + }, + dictWord{10, 0, 367}, + dictWord{9, 10, 14}, + dictWord{9, 10, 441}, + dictWord{139, 10, 9}, + dictWord{11, 10, 966}, + dictWord{12, 10, 287}, + dictWord{ + 13, + 10, + 342, + }, + dictWord{13, 10, 402}, + dictWord{15, 10, 110}, + dictWord{143, 10, 163}, + dictWord{134, 0, 690}, + dictWord{132, 0, 705}, + dictWord{9, 0, 651}, + dictWord{ + 11, + 0, + 971, + }, + dictWord{13, 0, 273}, + dictWord{7, 10, 1428}, + dictWord{7, 10, 1640}, + dictWord{7, 10, 1867}, + dictWord{9, 10, 169}, + dictWord{9, 10, 182}, + dictWord{ + 9, + 10, + 367, + }, + dictWord{9, 10, 478}, + dictWord{9, 10, 506}, + dictWord{9, 10, 551}, + dictWord{9, 10, 557}, + dictWord{9, 10, 648}, + dictWord{9, 10, 697}, + dictWord{ + 9, + 10, + 705, + }, + dictWord{9, 10, 725}, + dictWord{9, 10, 787}, + dictWord{9, 10, 794}, + dictWord{10, 10, 198}, + dictWord{10, 10, 214}, + dictWord{10, 10, 267}, + dictWord{ + 10, + 10, + 275, + }, + dictWord{10, 10, 456}, + dictWord{10, 10, 551}, + dictWord{10, 10, 561}, + dictWord{10, 10, 613}, + dictWord{10, 10, 627}, + dictWord{10, 10, 668}, + dictWord{10, 10, 675}, + dictWord{10, 10, 691}, + dictWord{10, 10, 695}, + dictWord{10, 10, 707}, + dictWord{10, 10, 715}, + dictWord{11, 10, 183}, + dictWord{ + 11, + 10, + 201, + }, + dictWord{11, 10, 262}, + dictWord{11, 10, 352}, + dictWord{11, 10, 439}, + dictWord{11, 10, 493}, + dictWord{11, 10, 572}, + dictWord{11, 10, 591}, + dictWord{ + 11, + 10, + 608, + }, + dictWord{11, 10, 611}, + dictWord{11, 10, 646}, + dictWord{11, 10, 674}, + dictWord{11, 10, 711}, + dictWord{11, 10, 751}, + dictWord{11, 10, 761}, + dictWord{11, 10, 776}, + dictWord{11, 10, 785}, + dictWord{11, 10, 850}, + dictWord{11, 10, 853}, + dictWord{11, 10, 862}, + dictWord{11, 10, 865}, + dictWord{ + 11, + 10, + 868, + }, + dictWord{11, 10, 875}, + dictWord{11, 10, 898}, + dictWord{11, 10, 902}, + dictWord{11, 10, 903}, + dictWord{11, 10, 910}, + dictWord{11, 10, 932}, + dictWord{ + 11, + 10, + 942, + }, + dictWord{11, 10, 957}, + dictWord{11, 10, 967}, + dictWord{11, 10, 972}, + dictWord{12, 10, 148}, + dictWord{12, 10, 195}, + dictWord{12, 10, 220}, + dictWord{12, 10, 237}, + dictWord{12, 10, 318}, + dictWord{12, 10, 339}, + dictWord{12, 10, 393}, + dictWord{12, 10, 445}, + dictWord{12, 10, 450}, + dictWord{ + 12, + 10, + 474, + }, + dictWord{12, 10, 505}, + dictWord{12, 10, 509}, + dictWord{12, 10, 533}, + dictWord{12, 10, 591}, + dictWord{12, 10, 594}, + dictWord{12, 10, 597}, + dictWord{ + 12, + 10, + 621, + }, + dictWord{12, 10, 633}, + dictWord{12, 10, 642}, + dictWord{13, 10, 59}, + dictWord{13, 10, 60}, + dictWord{13, 10, 145}, + dictWord{13, 10, 239}, + dictWord{13, 10, 250}, + dictWord{13, 10, 329}, + dictWord{13, 10, 344}, + dictWord{13, 10, 365}, + dictWord{13, 10, 372}, + dictWord{13, 10, 387}, + dictWord{ + 13, + 10, + 403, + }, + dictWord{13, 10, 414}, + dictWord{13, 10, 456}, + dictWord{13, 10, 470}, + dictWord{13, 10, 478}, + dictWord{13, 10, 483}, + dictWord{13, 10, 489}, + dictWord{ + 14, + 10, + 55, + }, + dictWord{14, 10, 57}, + dictWord{14, 10, 81}, + dictWord{14, 10, 90}, + dictWord{14, 10, 148}, + dictWord{14, 10, 239}, + dictWord{14, 10, 266}, + dictWord{ + 14, + 10, + 321, + }, + dictWord{14, 10, 326}, + dictWord{14, 10, 327}, + dictWord{14, 10, 330}, + dictWord{14, 10, 347}, + dictWord{14, 10, 355}, + dictWord{14, 10, 401}, + dictWord{14, 10, 404}, + dictWord{14, 10, 411}, + dictWord{14, 10, 414}, + dictWord{14, 10, 416}, + dictWord{14, 10, 420}, + dictWord{15, 10, 61}, + dictWord{ + 15, + 10, + 74, + }, + dictWord{15, 10, 87}, + dictWord{15, 10, 88}, + dictWord{15, 10, 94}, + dictWord{15, 10, 96}, + dictWord{15, 10, 116}, + dictWord{15, 10, 149}, + dictWord{ + 15, + 10, + 154, + }, + dictWord{16, 10, 50}, + dictWord{16, 10, 63}, + dictWord{16, 10, 73}, + dictWord{17, 10, 2}, + dictWord{17, 10, 66}, + dictWord{17, 10, 92}, + dictWord{17, 10, 103}, + dictWord{17, 10, 112}, + dictWord{17, 10, 120}, + dictWord{18, 10, 50}, + dictWord{18, 10, 54}, + dictWord{18, 10, 82}, + dictWord{18, 10, 86}, + dictWord{18, 10, 90}, + dictWord{18, 10, 111}, + dictWord{18, 10, 115}, + dictWord{18, 10, 156}, + dictWord{19, 10, 40}, + dictWord{19, 10, 79}, + dictWord{20, 10, 78}, + dictWord{149, 10, 22}, + dictWord{7, 0, 887}, + dictWord{5, 10, 161}, + dictWord{135, 10, 839}, + dictWord{142, 11, 98}, + dictWord{134, 0, 90}, + dictWord{138, 11, 356}, + dictWord{ + 135, + 11, + 441, + }, + dictWord{6, 11, 111}, + dictWord{7, 11, 4}, + dictWord{8, 11, 163}, + dictWord{8, 11, 776}, + dictWord{138, 11, 566}, + dictWord{134, 0, 908}, + dictWord{ + 134, + 0, + 1261, + }, + dictWord{7, 0, 813}, + dictWord{12, 0, 497}, + dictWord{141, 0, 56}, + dictWord{134, 0, 1235}, + dictWord{135, 0, 429}, + dictWord{135, 11, 1994}, + dictWord{138, 0, 904}, + dictWord{6, 0, 125}, + dictWord{7, 0, 1277}, + dictWord{137, 0, 772}, + dictWord{151, 0, 12}, + dictWord{4, 0, 841}, + dictWord{5, 0, 386}, + dictWord{ + 133, + 11, + 386, + }, + dictWord{5, 11, 297}, + dictWord{135, 11, 1038}, + dictWord{6, 0, 860}, + dictWord{6, 0, 1069}, + dictWord{135, 11, 309}, + dictWord{136, 0, 946}, + dictWord{135, 10, 1814}, + dictWord{141, 11, 418}, + dictWord{136, 11, 363}, + dictWord{10, 0, 768}, + dictWord{139, 0, 787}, + dictWord{22, 11, 30}, + dictWord{ + 150, + 11, + 33, + }, + dictWord{6, 0, 160}, + dictWord{7, 0, 1106}, + dictWord{9, 0, 770}, + dictWord{11, 0, 112}, + dictWord{140, 0, 413}, + dictWord{11, 11, 216}, + dictWord{ + 139, + 11, + 340, + }, + dictWord{136, 10, 139}, + dictWord{135, 11, 1390}, + dictWord{135, 11, 808}, + dictWord{132, 11, 280}, + dictWord{12, 0, 271}, + dictWord{17, 0, 109}, + dictWord{7, 10, 643}, + dictWord{136, 10, 236}, + dictWord{140, 11, 54}, + dictWord{4, 11, 421}, + dictWord{133, 11, 548}, + dictWord{11, 0, 719}, + dictWord{12, 0, 36}, + dictWord{141, 0, 337}, + dictWord{7, 0, 581}, + dictWord{9, 0, 644}, + dictWord{137, 0, 699}, + dictWord{11, 11, 511}, + dictWord{13, 11, 394}, + dictWord{14, 11, 298}, + dictWord{14, 11, 318}, + dictWord{146, 11, 103}, + dictWord{7, 0, 304}, + dictWord{9, 0, 646}, + dictWord{9, 0, 862}, + dictWord{11, 0, 696}, + dictWord{12, 0, 208}, + dictWord{15, 0, 79}, + dictWord{147, 0, 108}, + dictWord{4, 0, 631}, + dictWord{7, 0, 1126}, + dictWord{135, 0, 1536}, + dictWord{135, 11, 1527}, + dictWord{8, 0, 880}, + dictWord{10, 0, 869}, + dictWord{138, 0, 913}, + dictWord{7, 0, 1513}, + dictWord{5, 10, 54}, + dictWord{6, 11, 254}, + dictWord{9, 11, 109}, + dictWord{138, 11, 103}, + dictWord{135, 0, 981}, + dictWord{133, 11, 729}, + dictWord{132, 10, 744}, + dictWord{132, 0, 434}, + dictWord{134, 0, 550}, + dictWord{7, 0, 930}, + dictWord{10, 0, 476}, + dictWord{13, 0, 452}, + dictWord{19, 0, 104}, + dictWord{6, 11, 1630}, + dictWord{10, 10, 402}, + dictWord{146, 10, 55}, + dictWord{5, 0, 553}, + dictWord{138, 0, 824}, + dictWord{136, 0, 452}, + dictWord{8, 0, 151}, + dictWord{137, 10, 624}, + dictWord{132, 10, 572}, + dictWord{132, 0, 772}, + dictWord{133, 11, 671}, + dictWord{ + 133, + 0, + 292, + }, + dictWord{138, 0, 135}, + dictWord{132, 11, 889}, + dictWord{140, 11, 207}, + dictWord{9, 0, 504}, + dictWord{6, 10, 43}, + dictWord{7, 10, 38}, + dictWord{ + 8, + 10, + 248, + }, + dictWord{138, 10, 513}, + dictWord{6, 0, 1089}, + dictWord{135, 11, 1910}, + dictWord{4, 11, 627}, + dictWord{133, 11, 775}, + dictWord{135, 0, 783}, + dictWord{133, 10, 766}, + dictWord{133, 10, 363}, + dictWord{7, 0, 387}, + dictWord{135, 11, 387}, + dictWord{7, 0, 393}, + dictWord{10, 0, 603}, + dictWord{11, 0, 206}, + dictWord{7, 11, 202}, + dictWord{11, 11, 362}, + dictWord{11, 11, 948}, + dictWord{140, 11, 388}, + dictWord{6, 11, 507}, + dictWord{7, 11, 451}, + dictWord{8, 11, 389}, + dictWord{12, 11, 490}, + dictWord{13, 11, 16}, + dictWord{13, 11, 215}, + dictWord{13, 11, 351}, + dictWord{18, 11, 132}, + dictWord{147, 11, 125}, + dictWord{ + 4, + 0, + 912, + }, + dictWord{9, 0, 232}, + dictWord{135, 11, 841}, + dictWord{6, 10, 258}, + dictWord{140, 10, 409}, + dictWord{5, 10, 249}, + dictWord{148, 10, 82}, + dictWord{ + 136, + 11, + 566, + }, + dictWord{6, 0, 977}, + dictWord{135, 11, 1214}, + dictWord{7, 0, 1973}, + dictWord{136, 0, 716}, + dictWord{135, 0, 98}, + dictWord{133, 0, 733}, + dictWord{ + 5, + 11, + 912, + }, + dictWord{134, 11, 1695}, + dictWord{5, 10, 393}, + dictWord{6, 10, 378}, + dictWord{7, 10, 1981}, + dictWord{9, 10, 32}, + dictWord{9, 10, 591}, + dictWord{10, 10, 685}, + dictWord{10, 10, 741}, + dictWord{142, 10, 382}, + dictWord{133, 10, 788}, + dictWord{10, 0, 19}, + dictWord{11, 0, 911}, + dictWord{7, 10, 1968}, + dictWord{141, 10, 509}, + dictWord{5, 0, 668}, + dictWord{5, 11, 236}, + dictWord{6, 11, 572}, + dictWord{8, 11, 492}, + dictWord{11, 11, 618}, + dictWord{144, 11, 56}, + dictWord{135, 11, 1789}, + dictWord{4, 0, 360}, + dictWord{5, 0, 635}, + dictWord{5, 0, 700}, + dictWord{5, 10, 58}, + dictWord{5, 10, 171}, + dictWord{5, 10, 683}, + dictWord{ + 6, + 10, + 291, + }, + dictWord{6, 10, 566}, + dictWord{7, 10, 1650}, + dictWord{11, 10, 523}, + dictWord{12, 10, 273}, + dictWord{12, 10, 303}, + dictWord{15, 10, 39}, + dictWord{143, 10, 111}, + dictWord{133, 0, 901}, + dictWord{134, 10, 589}, + dictWord{5, 11, 190}, + dictWord{136, 11, 318}, + dictWord{140, 0, 656}, + dictWord{ + 7, + 0, + 726, + }, + dictWord{152, 0, 9}, + dictWord{4, 10, 917}, + dictWord{133, 10, 1005}, + dictWord{135, 10, 1598}, + dictWord{134, 11, 491}, + dictWord{4, 10, 919}, + dictWord{133, 11, 434}, + dictWord{137, 0, 72}, + dictWord{6, 0, 1269}, + dictWord{6, 0, 1566}, + dictWord{134, 0, 1621}, + dictWord{9, 0, 463}, + dictWord{10, 0, 595}, + dictWord{4, 10, 255}, + dictWord{5, 10, 302}, + dictWord{6, 10, 132}, + dictWord{7, 10, 128}, + dictWord{7, 10, 283}, + dictWord{7, 10, 1299}, + dictWord{10, 10, 52}, + dictWord{ + 10, + 10, + 514, + }, + dictWord{11, 10, 925}, + dictWord{13, 10, 92}, + dictWord{142, 10, 309}, + dictWord{135, 0, 1454}, + dictWord{134, 0, 1287}, + dictWord{11, 0, 600}, + dictWord{13, 0, 245}, + dictWord{137, 10, 173}, + dictWord{136, 0, 989}, + dictWord{7, 0, 164}, + dictWord{7, 0, 1571}, + dictWord{9, 0, 107}, + dictWord{140, 0, 225}, + dictWord{6, 0, 1061}, + dictWord{141, 10, 442}, + dictWord{4, 0, 27}, + dictWord{5, 0, 484}, + dictWord{5, 0, 510}, + dictWord{6, 0, 434}, + dictWord{7, 0, 1000}, + dictWord{ + 7, + 0, + 1098, + }, + dictWord{136, 0, 2}, + dictWord{7, 11, 85}, + dictWord{7, 11, 247}, + dictWord{8, 11, 585}, + dictWord{10, 11, 163}, + dictWord{138, 11, 316}, + dictWord{ + 11, + 11, + 103, + }, + dictWord{142, 11, 0}, + dictWord{134, 0, 1127}, + dictWord{4, 0, 460}, + dictWord{134, 0, 852}, + dictWord{134, 10, 210}, + dictWord{4, 0, 932}, + dictWord{ + 133, + 0, + 891, + }, + dictWord{6, 0, 588}, + dictWord{147, 11, 83}, + dictWord{8, 0, 625}, + dictWord{4, 10, 284}, + dictWord{134, 10, 223}, + dictWord{134, 0, 76}, + dictWord{8, 0, 92}, + dictWord{137, 0, 221}, + dictWord{4, 11, 124}, + dictWord{10, 11, 457}, + dictWord{11, 11, 121}, + dictWord{11, 11, 169}, + dictWord{11, 11, 422}, + dictWord{ + 11, + 11, + 870, + }, + dictWord{12, 11, 214}, + dictWord{13, 11, 389}, + dictWord{14, 11, 187}, + dictWord{143, 11, 77}, + dictWord{9, 11, 618}, + dictWord{138, 11, 482}, + dictWord{ + 4, + 10, + 218, + }, + dictWord{7, 10, 526}, + dictWord{143, 10, 137}, + dictWord{13, 0, 9}, + dictWord{14, 0, 104}, + dictWord{14, 0, 311}, + dictWord{4, 10, 270}, + dictWord{ + 5, + 10, + 192, + }, + dictWord{6, 10, 332}, + dictWord{135, 10, 1322}, + dictWord{140, 10, 661}, + dictWord{135, 11, 1193}, + dictWord{6, 11, 107}, + dictWord{7, 11, 638}, + dictWord{7, 11, 1632}, + dictWord{137, 11, 396}, + dictWord{132, 0, 763}, + dictWord{4, 0, 622}, + dictWord{5, 11, 370}, + dictWord{134, 11, 1756}, + dictWord{ + 133, + 0, + 253, + }, + dictWord{135, 0, 546}, + dictWord{9, 0, 73}, + dictWord{10, 0, 110}, + dictWord{14, 0, 185}, + dictWord{17, 0, 119}, + dictWord{133, 11, 204}, + dictWord{7, 0, 624}, + dictWord{7, 0, 916}, + dictWord{10, 0, 256}, + dictWord{139, 0, 87}, + dictWord{7, 10, 379}, + dictWord{8, 10, 481}, + dictWord{137, 10, 377}, + dictWord{5, 0, 212}, + dictWord{12, 0, 35}, + dictWord{13, 0, 382}, + dictWord{5, 11, 970}, + dictWord{134, 11, 1706}, + dictWord{9, 0, 746}, + dictWord{5, 10, 1003}, + dictWord{134, 10, 149}, + dictWord{10, 0, 150}, + dictWord{11, 0, 849}, + dictWord{13, 0, 330}, + dictWord{8, 10, 262}, + dictWord{9, 10, 627}, + dictWord{11, 10, 214}, + dictWord{11, 10, 404}, + dictWord{11, 10, 457}, + dictWord{11, 10, 780}, + dictWord{11, 10, 913}, + dictWord{13, 10, 401}, + dictWord{142, 10, 200}, + dictWord{134, 0, 1466}, + dictWord{ + 135, + 11, + 3, + }, + dictWord{6, 0, 1299}, + dictWord{4, 11, 35}, + dictWord{5, 11, 121}, + dictWord{5, 11, 483}, + dictWord{5, 11, 685}, + dictWord{6, 11, 489}, + dictWord{7, 11, 1204}, + dictWord{136, 11, 394}, + dictWord{135, 10, 742}, + dictWord{4, 10, 142}, + dictWord{136, 10, 304}, + dictWord{4, 11, 921}, + dictWord{133, 11, 1007}, + dictWord{ + 134, + 0, + 1518, + }, + dictWord{6, 0, 1229}, + dictWord{135, 0, 1175}, + dictWord{133, 0, 816}, + dictWord{12, 0, 159}, + dictWord{4, 10, 471}, + dictWord{4, 11, 712}, + dictWord{ + 5, + 10, + 51, + }, + dictWord{6, 10, 602}, + dictWord{7, 10, 925}, + dictWord{8, 10, 484}, + dictWord{138, 10, 195}, + dictWord{134, 11, 1629}, + dictWord{5, 0, 869}, + dictWord{ + 5, + 0, + 968, + }, + dictWord{6, 0, 1626}, + dictWord{8, 0, 734}, + dictWord{136, 0, 784}, + dictWord{4, 0, 542}, + dictWord{6, 0, 1716}, + dictWord{6, 0, 1727}, + dictWord{ + 7, + 0, + 1082, + }, + dictWord{7, 0, 1545}, + dictWord{8, 0, 56}, + dictWord{8, 0, 118}, + dictWord{8, 0, 412}, + dictWord{8, 0, 564}, + dictWord{9, 0, 888}, + dictWord{9, 0, 908}, + dictWord{ + 10, + 0, + 50, + }, + dictWord{10, 0, 423}, + dictWord{11, 0, 685}, + dictWord{11, 0, 697}, + dictWord{11, 0, 933}, + dictWord{12, 0, 299}, + dictWord{13, 0, 126}, + dictWord{ + 13, + 0, + 136, + }, + dictWord{13, 0, 170}, + dictWord{13, 0, 190}, + dictWord{136, 10, 688}, + dictWord{132, 10, 697}, + dictWord{4, 0, 232}, + dictWord{9, 0, 202}, + dictWord{ + 10, + 0, + 474, + }, + dictWord{140, 0, 433}, + dictWord{136, 0, 212}, + dictWord{6, 0, 108}, + dictWord{7, 0, 1003}, + dictWord{7, 0, 1181}, + dictWord{8, 0, 111}, + dictWord{ + 136, + 0, + 343, + }, + dictWord{5, 10, 221}, + dictWord{135, 11, 1255}, + dictWord{133, 11, 485}, + dictWord{134, 0, 1712}, + dictWord{142, 0, 216}, + dictWord{5, 0, 643}, + dictWord{ + 6, + 0, + 516, + }, + dictWord{4, 11, 285}, + dictWord{5, 11, 317}, + dictWord{6, 11, 301}, + dictWord{7, 11, 7}, + dictWord{8, 11, 153}, + dictWord{10, 11, 766}, + dictWord{ + 11, + 11, + 468, + }, + dictWord{12, 11, 467}, + dictWord{141, 11, 143}, + dictWord{4, 0, 133}, + dictWord{7, 0, 711}, + dictWord{7, 0, 1298}, + dictWord{135, 0, 1585}, + dictWord{ + 134, + 0, + 650, + }, + dictWord{135, 11, 512}, + dictWord{6, 0, 99}, + dictWord{7, 0, 1808}, + dictWord{145, 0, 57}, + dictWord{6, 0, 246}, + dictWord{6, 0, 574}, + dictWord{7, 0, 428}, + dictWord{9, 0, 793}, + dictWord{10, 0, 669}, + dictWord{11, 0, 485}, + dictWord{11, 0, 840}, + dictWord{12, 0, 300}, + dictWord{14, 0, 250}, + dictWord{145, 0, 55}, + dictWord{ + 4, + 10, + 132, + }, + dictWord{5, 10, 69}, + dictWord{135, 10, 1242}, + dictWord{136, 0, 1023}, + dictWord{7, 0, 302}, + dictWord{132, 10, 111}, + dictWord{135, 0, 1871}, + dictWord{132, 0, 728}, + dictWord{9, 0, 252}, + dictWord{132, 10, 767}, + dictWord{6, 0, 461}, + dictWord{7, 0, 1590}, + dictWord{7, 10, 1416}, + dictWord{7, 10, 2005}, + dictWord{8, 10, 131}, + dictWord{8, 10, 466}, + dictWord{9, 10, 672}, + dictWord{13, 10, 252}, + dictWord{148, 10, 103}, + dictWord{6, 0, 323}, + dictWord{135, 0, 1564}, + dictWord{7, 0, 461}, + dictWord{136, 0, 775}, + dictWord{6, 10, 44}, + dictWord{136, 10, 368}, + dictWord{139, 0, 172}, + dictWord{132, 0, 464}, + dictWord{4, 10, 570}, + dictWord{133, 10, 120}, + dictWord{137, 11, 269}, + dictWord{6, 10, 227}, + dictWord{135, 10, 1589}, + dictWord{6, 11, 1719}, + dictWord{6, 11, 1735}, + dictWord{ + 7, + 11, + 2016, + }, + dictWord{7, 11, 2020}, + dictWord{8, 11, 837}, + dictWord{137, 11, 852}, + dictWord{7, 0, 727}, + dictWord{146, 0, 73}, + dictWord{132, 0, 1023}, + dictWord{135, 11, 852}, + dictWord{135, 10, 1529}, + dictWord{136, 0, 577}, + dictWord{138, 11, 568}, + dictWord{134, 0, 1037}, + dictWord{8, 11, 67}, + dictWord{ + 138, + 11, + 419, + }, + dictWord{4, 0, 413}, + dictWord{5, 0, 677}, + dictWord{8, 0, 432}, + dictWord{140, 0, 280}, + dictWord{10, 0, 600}, + dictWord{6, 10, 1667}, + dictWord{ + 7, + 11, + 967, + }, + dictWord{7, 10, 2036}, + dictWord{141, 11, 11}, + dictWord{6, 10, 511}, + dictWord{140, 10, 132}, + dictWord{6, 0, 799}, + dictWord{5, 10, 568}, + dictWord{ + 6, + 10, + 138, + }, + dictWord{135, 10, 1293}, + dictWord{8, 0, 159}, + dictWord{4, 10, 565}, + dictWord{136, 10, 827}, + dictWord{7, 0, 646}, + dictWord{7, 0, 1730}, + dictWord{ + 11, + 0, + 446, + }, + dictWord{141, 0, 178}, + dictWord{4, 10, 922}, + dictWord{133, 10, 1023}, + dictWord{135, 11, 11}, + dictWord{132, 0, 395}, + dictWord{11, 0, 145}, + dictWord{135, 10, 1002}, + dictWord{9, 0, 174}, + dictWord{10, 0, 164}, + dictWord{11, 0, 440}, + dictWord{11, 0, 514}, + dictWord{11, 0, 841}, + dictWord{15, 0, 98}, + dictWord{149, 0, 20}, + dictWord{134, 0, 426}, + dictWord{10, 0, 608}, + dictWord{139, 0, 1002}, + dictWord{7, 11, 320}, + dictWord{8, 11, 51}, + dictWord{12, 11, 481}, + dictWord{12, 11, 570}, + dictWord{148, 11, 106}, + dictWord{9, 0, 977}, + dictWord{9, 0, 983}, + dictWord{132, 11, 445}, + dictWord{138, 0, 250}, + dictWord{139, 0, 100}, + dictWord{6, 0, 1982}, + dictWord{136, 10, 402}, + dictWord{133, 11, 239}, + dictWord{4, 10, 716}, + dictWord{141, 10, 31}, + dictWord{5, 0, 476}, + dictWord{7, 11, 83}, + dictWord{7, 11, 1990}, + dictWord{8, 11, 130}, + dictWord{139, 11, 720}, + dictWord{8, 10, 691}, + dictWord{136, 10, 731}, + dictWord{5, 11, 123}, + dictWord{ + 6, + 11, + 530, + }, + dictWord{7, 11, 348}, + dictWord{135, 11, 1419}, + dictWord{5, 0, 76}, + dictWord{6, 0, 458}, + dictWord{6, 0, 497}, + dictWord{7, 0, 868}, + dictWord{9, 0, 658}, + dictWord{10, 0, 594}, + dictWord{11, 0, 173}, + dictWord{11, 0, 566}, + dictWord{12, 0, 20}, + dictWord{12, 0, 338}, + dictWord{141, 0, 200}, + dictWord{9, 11, 139}, + dictWord{ + 10, + 11, + 399, + }, + dictWord{11, 11, 469}, + dictWord{12, 11, 634}, + dictWord{141, 11, 223}, + dictWord{9, 10, 840}, + dictWord{138, 10, 803}, + dictWord{133, 10, 847}, + dictWord{11, 11, 223}, + dictWord{140, 11, 168}, + dictWord{132, 11, 210}, + dictWord{8, 0, 447}, + dictWord{9, 10, 53}, + dictWord{9, 10, 268}, + dictWord{9, 10, 901}, + dictWord{10, 10, 518}, + dictWord{10, 10, 829}, + dictWord{11, 10, 188}, + dictWord{13, 10, 74}, + dictWord{14, 10, 46}, + dictWord{15, 10, 17}, + dictWord{15, 10, 33}, + dictWord{17, 10, 40}, + dictWord{18, 10, 36}, + dictWord{19, 10, 20}, + dictWord{22, 10, 1}, + dictWord{152, 10, 2}, + dictWord{4, 0, 526}, + dictWord{7, 0, 1029}, + dictWord{135, 0, 1054}, + dictWord{19, 11, 59}, + dictWord{150, 11, 2}, + dictWord{4, 0, 636}, + dictWord{6, 0, 1875}, + dictWord{6, 0, 1920}, + dictWord{9, 0, 999}, + dictWord{ + 12, + 0, + 807, + }, + dictWord{12, 0, 825}, + dictWord{15, 0, 179}, + dictWord{15, 0, 190}, + dictWord{18, 0, 182}, + dictWord{136, 10, 532}, + dictWord{6, 0, 1699}, + dictWord{ + 7, + 0, + 660, + }, + dictWord{7, 0, 1124}, + dictWord{17, 0, 31}, + dictWord{19, 0, 22}, + dictWord{151, 0, 14}, + dictWord{135, 10, 681}, + dictWord{132, 11, 430}, + dictWord{ + 140, + 10, + 677, + }, + dictWord{4, 10, 684}, + dictWord{136, 10, 384}, + dictWord{132, 11, 756}, + dictWord{133, 11, 213}, + dictWord{7, 0, 188}, + dictWord{7, 10, 110}, + dictWord{ + 8, + 10, + 290, + }, + dictWord{8, 10, 591}, + dictWord{9, 10, 382}, + dictWord{9, 10, 649}, + dictWord{11, 10, 71}, + dictWord{11, 10, 155}, + dictWord{11, 10, 313}, + dictWord{ + 12, + 10, + 5, + }, + dictWord{13, 10, 325}, + dictWord{142, 10, 287}, + dictWord{7, 10, 360}, + dictWord{7, 10, 425}, + dictWord{9, 10, 66}, + dictWord{9, 10, 278}, + dictWord{ + 138, + 10, + 644, + }, + dictWord{142, 11, 164}, + dictWord{4, 0, 279}, + dictWord{7, 0, 301}, + dictWord{137, 0, 362}, + dictWord{134, 11, 586}, + dictWord{135, 0, 1743}, + dictWord{4, 0, 178}, + dictWord{133, 0, 399}, + dictWord{4, 10, 900}, + dictWord{133, 10, 861}, + dictWord{5, 10, 254}, + dictWord{7, 10, 985}, + dictWord{136, 10, 73}, + dictWord{133, 11, 108}, + dictWord{7, 10, 1959}, + dictWord{136, 10, 683}, + dictWord{133, 11, 219}, + dictWord{4, 11, 193}, + dictWord{5, 11, 916}, + dictWord{ + 7, + 11, + 364, + }, + dictWord{10, 11, 398}, + dictWord{10, 11, 726}, + dictWord{11, 11, 317}, + dictWord{11, 11, 626}, + dictWord{12, 11, 142}, + dictWord{12, 11, 288}, + dictWord{ + 12, + 11, + 678, + }, + dictWord{13, 11, 313}, + dictWord{15, 11, 113}, + dictWord{18, 11, 114}, + dictWord{21, 11, 30}, + dictWord{150, 11, 53}, + dictWord{6, 11, 241}, + dictWord{7, 11, 907}, + dictWord{8, 11, 832}, + dictWord{9, 11, 342}, + dictWord{10, 11, 729}, + dictWord{11, 11, 284}, + dictWord{11, 11, 445}, + dictWord{11, 11, 651}, + dictWord{11, 11, 863}, + dictWord{13, 11, 398}, + dictWord{146, 11, 99}, + dictWord{132, 0, 872}, + dictWord{134, 0, 831}, + dictWord{134, 0, 1692}, + dictWord{ + 6, + 0, + 202, + }, + dictWord{6, 0, 1006}, + dictWord{9, 0, 832}, + dictWord{10, 0, 636}, + dictWord{11, 0, 208}, + dictWord{12, 0, 360}, + dictWord{17, 0, 118}, + dictWord{18, 0, 27}, + dictWord{20, 0, 67}, + dictWord{137, 11, 734}, + dictWord{132, 10, 725}, + dictWord{7, 11, 993}, + dictWord{138, 11, 666}, + dictWord{134, 0, 1954}, + dictWord{ + 134, + 10, + 196, + }, + dictWord{7, 0, 872}, + dictWord{10, 0, 516}, + dictWord{139, 0, 167}, + dictWord{133, 10, 831}, + dictWord{4, 11, 562}, + dictWord{9, 11, 254}, + dictWord{ + 139, + 11, + 879, + }, + dictWord{137, 0, 313}, + dictWord{4, 0, 224}, + dictWord{132, 11, 786}, + dictWord{11, 0, 24}, + dictWord{12, 0, 170}, + dictWord{136, 10, 723}, + dictWord{ + 5, + 0, + 546, + }, + dictWord{7, 0, 35}, + dictWord{8, 0, 11}, + dictWord{8, 0, 12}, + dictWord{9, 0, 315}, + dictWord{9, 0, 533}, + dictWord{10, 0, 802}, + dictWord{11, 0, 166}, + dictWord{ + 12, + 0, + 525, + }, + dictWord{142, 0, 243}, + dictWord{7, 0, 1937}, + dictWord{13, 10, 80}, + dictWord{13, 10, 437}, + dictWord{145, 10, 74}, + dictWord{5, 0, 241}, + dictWord{ + 8, + 0, + 242, + }, + dictWord{9, 0, 451}, + dictWord{10, 0, 667}, + dictWord{11, 0, 598}, + dictWord{140, 0, 429}, + dictWord{150, 0, 46}, + dictWord{6, 0, 1273}, + dictWord{ + 137, + 0, + 830, + }, + dictWord{5, 10, 848}, + dictWord{6, 10, 66}, + dictWord{136, 10, 764}, + dictWord{6, 0, 825}, + dictWord{134, 0, 993}, + dictWord{4, 0, 1006}, + dictWord{ + 10, + 0, + 327, + }, + dictWord{13, 0, 271}, + dictWord{4, 10, 36}, + dictWord{7, 10, 1387}, + dictWord{139, 10, 755}, + dictWord{134, 0, 1023}, + dictWord{135, 0, 1580}, + dictWord{ + 4, + 0, + 366, + }, + dictWord{137, 0, 516}, + dictWord{132, 10, 887}, + dictWord{6, 0, 1736}, + dictWord{135, 0, 1891}, + dictWord{6, 11, 216}, + dictWord{7, 11, 901}, + dictWord{ + 7, + 11, + 1343, + }, + dictWord{136, 11, 493}, + dictWord{6, 10, 165}, + dictWord{138, 10, 388}, + dictWord{7, 11, 341}, + dictWord{139, 11, 219}, + dictWord{4, 10, 719}, + dictWord{135, 10, 155}, + dictWord{134, 0, 1935}, + dictWord{132, 0, 826}, + dictWord{6, 0, 331}, + dictWord{6, 0, 1605}, + dictWord{8, 0, 623}, + dictWord{11, 0, 139}, + dictWord{139, 0, 171}, + dictWord{135, 11, 1734}, + dictWord{10, 11, 115}, + dictWord{11, 11, 420}, + dictWord{12, 11, 154}, + dictWord{13, 11, 404}, + dictWord{ + 14, + 11, + 346, + }, + dictWord{15, 11, 54}, + dictWord{143, 11, 112}, + dictWord{7, 0, 288}, + dictWord{4, 10, 353}, + dictWord{6, 10, 146}, + dictWord{6, 10, 1789}, + dictWord{ + 7, + 10, + 990, + }, + dictWord{7, 10, 1348}, + dictWord{9, 10, 665}, + dictWord{9, 10, 898}, + dictWord{11, 10, 893}, + dictWord{142, 10, 212}, + dictWord{6, 0, 916}, + dictWord{134, 0, 1592}, + dictWord{7, 0, 1888}, + dictWord{4, 10, 45}, + dictWord{135, 10, 1257}, + dictWord{5, 11, 1011}, + dictWord{136, 11, 701}, + dictWord{ + 139, + 11, + 596, + }, + dictWord{4, 11, 54}, + dictWord{5, 11, 666}, + dictWord{7, 11, 1039}, + dictWord{7, 11, 1130}, + dictWord{9, 11, 195}, + dictWord{138, 11, 302}, + dictWord{ + 134, + 0, + 1471, + }, + dictWord{134, 0, 1570}, + dictWord{132, 0, 394}, + dictWord{140, 10, 65}, + dictWord{136, 10, 816}, + dictWord{135, 0, 1931}, + dictWord{7, 0, 574}, + dictWord{135, 0, 1719}, + dictWord{134, 11, 467}, + dictWord{132, 0, 658}, + dictWord{9, 0, 781}, + dictWord{10, 0, 144}, + dictWord{11, 0, 385}, + dictWord{13, 0, 161}, + dictWord{13, 0, 228}, + dictWord{13, 0, 268}, + dictWord{20, 0, 107}, + dictWord{134, 11, 1669}, + dictWord{136, 0, 374}, + dictWord{135, 0, 735}, + dictWord{4, 0, 344}, + dictWord{6, 0, 498}, + dictWord{139, 0, 323}, + dictWord{7, 0, 586}, + dictWord{7, 0, 1063}, + dictWord{6, 10, 559}, + dictWord{134, 10, 1691}, + dictWord{137, 0, 155}, + dictWord{133, 0, 906}, + dictWord{7, 11, 122}, + dictWord{9, 11, 259}, + dictWord{10, 11, 84}, + dictWord{11, 11, 470}, + dictWord{12, 11, 541}, + dictWord{ + 141, + 11, + 379, + }, + dictWord{134, 0, 1139}, + dictWord{10, 0, 108}, + dictWord{139, 0, 116}, + dictWord{134, 10, 456}, + dictWord{133, 10, 925}, + dictWord{5, 11, 82}, + dictWord{ + 5, + 11, + 131, + }, + dictWord{7, 11, 1755}, + dictWord{8, 11, 31}, + dictWord{9, 11, 168}, + dictWord{9, 11, 764}, + dictWord{139, 11, 869}, + dictWord{134, 11, 605}, + dictWord{ + 5, + 11, + 278, + }, + dictWord{137, 11, 68}, + dictWord{4, 11, 163}, + dictWord{5, 11, 201}, + dictWord{5, 11, 307}, + dictWord{5, 11, 310}, + dictWord{6, 11, 335}, + dictWord{ + 7, + 11, + 284, + }, + dictWord{136, 11, 165}, + dictWord{135, 11, 1660}, + dictWord{6, 11, 33}, + dictWord{135, 11, 1244}, + dictWord{4, 0, 616}, + dictWord{136, 11, 483}, + dictWord{8, 0, 857}, + dictWord{8, 0, 902}, + dictWord{8, 0, 910}, + dictWord{10, 0, 879}, + dictWord{12, 0, 726}, + dictWord{4, 11, 199}, + dictWord{139, 11, 34}, + dictWord{136, 0, 692}, + dictWord{6, 10, 193}, + dictWord{7, 10, 240}, + dictWord{7, 10, 1682}, + dictWord{10, 10, 51}, + dictWord{10, 10, 640}, + dictWord{11, 10, 410}, + dictWord{13, 10, 82}, + dictWord{14, 10, 247}, + dictWord{14, 10, 331}, + dictWord{142, 10, 377}, + dictWord{6, 0, 823}, + dictWord{134, 0, 983}, + dictWord{ + 139, + 10, + 411, + }, + dictWord{132, 0, 305}, + dictWord{136, 10, 633}, + dictWord{138, 11, 203}, + dictWord{134, 0, 681}, + dictWord{6, 11, 326}, + dictWord{7, 11, 677}, + dictWord{137, 11, 425}, + dictWord{5, 0, 214}, + dictWord{7, 0, 603}, + dictWord{8, 0, 611}, + dictWord{9, 0, 686}, + dictWord{10, 0, 88}, + dictWord{11, 0, 459}, + dictWord{ + 11, + 0, + 496, + }, + dictWord{12, 0, 463}, + dictWord{12, 0, 590}, + dictWord{141, 0, 0}, + dictWord{136, 0, 1004}, + dictWord{142, 0, 23}, + dictWord{134, 0, 1703}, + dictWord{ + 147, + 11, + 8, + }, + dictWord{145, 11, 56}, + dictWord{135, 0, 1443}, + dictWord{4, 10, 237}, + dictWord{135, 10, 514}, + dictWord{6, 0, 714}, + dictWord{145, 0, 19}, + dictWord{ + 5, + 11, + 358, + }, + dictWord{7, 11, 473}, + dictWord{7, 11, 1184}, + dictWord{10, 11, 662}, + dictWord{13, 11, 212}, + dictWord{13, 11, 304}, + dictWord{13, 11, 333}, + dictWord{145, 11, 98}, + dictWord{4, 0, 737}, + dictWord{10, 0, 98}, + dictWord{11, 0, 294}, + dictWord{12, 0, 60}, + dictWord{12, 0, 437}, + dictWord{13, 0, 64}, + dictWord{ + 13, + 0, + 380, + }, + dictWord{142, 0, 430}, + dictWord{6, 10, 392}, + dictWord{7, 10, 65}, + dictWord{135, 10, 2019}, + dictWord{6, 0, 1758}, + dictWord{8, 0, 520}, + dictWord{ + 9, + 0, + 345, + }, + dictWord{9, 0, 403}, + dictWord{142, 0, 350}, + dictWord{5, 0, 47}, + dictWord{10, 0, 242}, + dictWord{138, 0, 579}, + dictWord{5, 0, 139}, + dictWord{7, 0, 1168}, + dictWord{138, 0, 539}, + dictWord{134, 0, 1459}, + dictWord{13, 0, 388}, + dictWord{141, 11, 388}, + dictWord{134, 0, 253}, + dictWord{7, 10, 1260}, + dictWord{ + 135, + 10, + 1790, + }, + dictWord{10, 0, 252}, + dictWord{9, 10, 222}, + dictWord{139, 10, 900}, + dictWord{140, 0, 745}, + dictWord{133, 11, 946}, + dictWord{4, 0, 107}, + dictWord{ + 7, + 0, + 613, + }, + dictWord{8, 0, 439}, + dictWord{8, 0, 504}, + dictWord{9, 0, 501}, + dictWord{10, 0, 383}, + dictWord{139, 0, 477}, + dictWord{135, 11, 1485}, + dictWord{ + 132, + 0, + 871, + }, + dictWord{7, 11, 411}, + dictWord{7, 11, 590}, + dictWord{8, 11, 631}, + dictWord{9, 11, 323}, + dictWord{10, 11, 355}, + dictWord{11, 11, 491}, + dictWord{ + 12, + 11, + 143, + }, + dictWord{12, 11, 402}, + dictWord{13, 11, 73}, + dictWord{14, 11, 408}, + dictWord{15, 11, 107}, + dictWord{146, 11, 71}, + dictWord{132, 0, 229}, + dictWord{132, 0, 903}, + dictWord{140, 0, 71}, + dictWord{133, 0, 549}, + dictWord{4, 0, 47}, + dictWord{6, 0, 373}, + dictWord{7, 0, 452}, + dictWord{7, 0, 543}, + dictWord{ + 7, + 0, + 1828, + }, + dictWord{7, 0, 1856}, + dictWord{9, 0, 6}, + dictWord{11, 0, 257}, + dictWord{139, 0, 391}, + dictWord{7, 11, 1467}, + dictWord{8, 11, 328}, + dictWord{ + 10, + 11, + 544, + }, + dictWord{11, 11, 955}, + dictWord{13, 11, 320}, + dictWord{145, 11, 83}, + dictWord{5, 0, 980}, + dictWord{134, 0, 1754}, + dictWord{136, 0, 865}, + dictWord{ + 5, + 0, + 705, + }, + dictWord{137, 0, 606}, + dictWord{7, 0, 161}, + dictWord{8, 10, 201}, + dictWord{136, 10, 605}, + dictWord{143, 11, 35}, + dictWord{5, 11, 835}, + dictWord{ + 6, + 11, + 483, + }, + dictWord{140, 10, 224}, + dictWord{7, 0, 536}, + dictWord{7, 0, 1331}, + dictWord{136, 0, 143}, + dictWord{134, 0, 1388}, + dictWord{5, 0, 724}, + dictWord{ + 10, + 0, + 305, + }, + dictWord{11, 0, 151}, + dictWord{12, 0, 33}, + dictWord{12, 0, 121}, + dictWord{12, 0, 381}, + dictWord{17, 0, 3}, + dictWord{17, 0, 27}, + dictWord{17, 0, 78}, + dictWord{18, 0, 18}, + dictWord{19, 0, 54}, + dictWord{149, 0, 5}, + dictWord{4, 10, 523}, + dictWord{133, 10, 638}, + dictWord{5, 0, 19}, + dictWord{134, 0, 533}, + dictWord{ + 5, + 0, + 395, + }, + dictWord{5, 0, 951}, + dictWord{134, 0, 1776}, + dictWord{135, 0, 1908}, + dictWord{132, 0, 846}, + dictWord{10, 0, 74}, + dictWord{11, 0, 663}, + dictWord{ + 12, + 0, + 210, + }, + dictWord{13, 0, 166}, + dictWord{13, 0, 310}, + dictWord{14, 0, 373}, + dictWord{18, 0, 95}, + dictWord{19, 0, 43}, + dictWord{6, 10, 242}, + dictWord{7, 10, 227}, + dictWord{7, 10, 1581}, + dictWord{8, 10, 104}, + dictWord{9, 10, 113}, + dictWord{9, 10, 220}, + dictWord{9, 10, 427}, + dictWord{10, 10, 239}, + dictWord{11, 10, 579}, + dictWord{11, 10, 1023}, + dictWord{13, 10, 4}, + dictWord{13, 10, 204}, + dictWord{13, 10, 316}, + dictWord{148, 10, 86}, + dictWord{9, 11, 716}, + dictWord{11, 11, 108}, + dictWord{13, 11, 123}, + dictWord{14, 11, 252}, + dictWord{19, 11, 38}, + dictWord{21, 11, 3}, + dictWord{151, 11, 11}, + dictWord{8, 0, 372}, + dictWord{9, 0, 122}, + dictWord{138, 0, 175}, + dictWord{132, 11, 677}, + dictWord{7, 11, 1374}, + dictWord{136, 11, 540}, + dictWord{135, 10, 861}, + dictWord{132, 0, 695}, + dictWord{ + 7, + 0, + 497, + }, + dictWord{9, 0, 387}, + dictWord{147, 0, 81}, + dictWord{136, 0, 937}, + dictWord{134, 0, 718}, + dictWord{7, 0, 1328}, + dictWord{136, 10, 494}, + dictWord{ + 132, + 11, + 331, + }, + dictWord{6, 0, 1581}, + dictWord{133, 11, 747}, + dictWord{5, 0, 284}, + dictWord{6, 0, 49}, + dictWord{6, 0, 350}, + dictWord{7, 0, 1}, + dictWord{7, 0, 377}, + dictWord{7, 0, 1693}, + dictWord{8, 0, 18}, + dictWord{8, 0, 678}, + dictWord{9, 0, 161}, + dictWord{9, 0, 585}, + dictWord{9, 0, 671}, + dictWord{9, 0, 839}, + dictWord{11, 0, 912}, + dictWord{141, 0, 427}, + dictWord{7, 10, 1306}, + dictWord{8, 10, 505}, + dictWord{9, 10, 482}, + dictWord{10, 10, 126}, + dictWord{11, 10, 225}, + dictWord{12, 10, 347}, + dictWord{12, 10, 449}, + dictWord{13, 10, 19}, + dictWord{14, 10, 218}, + dictWord{142, 10, 435}, + dictWord{10, 10, 764}, + dictWord{12, 10, 120}, + dictWord{ + 13, + 10, + 39, + }, + dictWord{145, 10, 127}, + dictWord{4, 0, 597}, + dictWord{133, 10, 268}, + dictWord{134, 0, 1094}, + dictWord{4, 0, 1008}, + dictWord{134, 0, 1973}, + dictWord{132, 0, 811}, + dictWord{139, 0, 908}, + dictWord{135, 0, 1471}, + dictWord{133, 11, 326}, + dictWord{4, 10, 384}, + dictWord{135, 10, 1022}, + dictWord{ + 7, + 0, + 1935, + }, + dictWord{8, 0, 324}, + dictWord{12, 0, 42}, + dictWord{4, 11, 691}, + dictWord{7, 11, 1935}, + dictWord{8, 11, 324}, + dictWord{9, 11, 35}, + dictWord{10, 11, 680}, + dictWord{11, 11, 364}, + dictWord{12, 11, 42}, + dictWord{13, 11, 357}, + dictWord{146, 11, 16}, + dictWord{135, 0, 2014}, + dictWord{7, 0, 2007}, + dictWord{ + 9, + 0, + 101, + }, + dictWord{9, 0, 450}, + dictWord{10, 0, 66}, + dictWord{10, 0, 842}, + dictWord{11, 0, 536}, + dictWord{12, 0, 587}, + dictWord{6, 11, 32}, + dictWord{7, 11, 385}, + dictWord{7, 11, 757}, + dictWord{7, 11, 1916}, + dictWord{8, 11, 37}, + dictWord{8, 11, 94}, + dictWord{8, 11, 711}, + dictWord{9, 11, 541}, + dictWord{10, 11, 162}, + dictWord{ + 10, + 11, + 795, + }, + dictWord{11, 11, 989}, + dictWord{11, 11, 1010}, + dictWord{12, 11, 14}, + dictWord{142, 11, 308}, + dictWord{139, 0, 586}, + dictWord{ + 135, + 10, + 1703, + }, + dictWord{7, 0, 1077}, + dictWord{11, 0, 28}, + dictWord{9, 10, 159}, + dictWord{140, 10, 603}, + dictWord{6, 0, 1221}, + dictWord{136, 10, 583}, + dictWord{ + 6, + 11, + 152, + }, + dictWord{6, 11, 349}, + dictWord{6, 11, 1682}, + dictWord{7, 11, 1252}, + dictWord{8, 11, 112}, + dictWord{9, 11, 435}, + dictWord{9, 11, 668}, + dictWord{ + 10, + 11, + 290, + }, + dictWord{10, 11, 319}, + dictWord{10, 11, 815}, + dictWord{11, 11, 180}, + dictWord{11, 11, 837}, + dictWord{12, 11, 240}, + dictWord{13, 11, 152}, + dictWord{13, 11, 219}, + dictWord{142, 11, 158}, + dictWord{139, 0, 62}, + dictWord{132, 10, 515}, + dictWord{8, 10, 632}, + dictWord{8, 10, 697}, + dictWord{ + 137, + 10, + 854, + }, + dictWord{134, 0, 1766}, + dictWord{132, 11, 581}, + dictWord{6, 11, 126}, + dictWord{7, 11, 573}, + dictWord{8, 11, 397}, + dictWord{142, 11, 44}, + dictWord{ + 150, + 0, + 28, + }, + dictWord{11, 0, 670}, + dictWord{22, 0, 25}, + dictWord{4, 10, 136}, + dictWord{133, 10, 551}, + dictWord{6, 0, 1665}, + dictWord{7, 0, 256}, + dictWord{ + 7, + 0, + 1388, + }, + dictWord{138, 0, 499}, + dictWord{4, 0, 22}, + dictWord{5, 0, 10}, + dictWord{7, 0, 1576}, + dictWord{136, 0, 97}, + dictWord{134, 10, 1782}, + dictWord{5, 0, 481}, + dictWord{7, 10, 1287}, + dictWord{9, 10, 44}, + dictWord{10, 10, 552}, + dictWord{10, 10, 642}, + dictWord{11, 10, 839}, + dictWord{12, 10, 274}, + dictWord{ + 12, + 10, + 275, + }, + dictWord{12, 10, 372}, + dictWord{13, 10, 91}, + dictWord{142, 10, 125}, + dictWord{133, 11, 926}, + dictWord{7, 11, 1232}, + dictWord{137, 11, 531}, + dictWord{6, 0, 134}, + dictWord{7, 0, 437}, + dictWord{7, 0, 1824}, + dictWord{9, 0, 37}, + dictWord{14, 0, 285}, + dictWord{142, 0, 371}, + dictWord{7, 0, 486}, + dictWord{8, 0, 155}, + dictWord{11, 0, 93}, + dictWord{140, 0, 164}, + dictWord{6, 0, 1391}, + dictWord{134, 0, 1442}, + dictWord{133, 11, 670}, + dictWord{133, 0, 591}, + dictWord{ + 6, + 10, + 147, + }, + dictWord{7, 10, 886}, + dictWord{7, 11, 1957}, + dictWord{9, 10, 753}, + dictWord{138, 10, 268}, + dictWord{5, 0, 380}, + dictWord{5, 0, 650}, + dictWord{ + 7, + 0, + 1173, + }, + dictWord{136, 0, 310}, + dictWord{4, 0, 364}, + dictWord{7, 0, 1156}, + dictWord{7, 0, 1187}, + dictWord{137, 0, 409}, + dictWord{135, 11, 1621}, + dictWord{ + 134, + 0, + 482, + }, + dictWord{133, 11, 506}, + dictWord{4, 0, 781}, + dictWord{6, 0, 487}, + dictWord{7, 0, 926}, + dictWord{8, 0, 263}, + dictWord{139, 0, 500}, + dictWord{ + 138, + 10, + 137, + }, + dictWord{135, 11, 242}, + dictWord{139, 11, 96}, + dictWord{133, 10, 414}, + dictWord{135, 10, 1762}, + dictWord{134, 0, 804}, + dictWord{5, 11, 834}, + dictWord{7, 11, 1202}, + dictWord{8, 11, 14}, + dictWord{9, 11, 481}, + dictWord{137, 11, 880}, + dictWord{134, 10, 599}, + dictWord{4, 0, 94}, + dictWord{135, 0, 1265}, + dictWord{4, 0, 415}, + dictWord{132, 0, 417}, + dictWord{5, 0, 348}, + dictWord{6, 0, 522}, + dictWord{6, 10, 1749}, + dictWord{7, 11, 1526}, + dictWord{138, 11, 465}, + dictWord{134, 10, 1627}, + dictWord{132, 0, 1012}, + dictWord{132, 10, 488}, + dictWord{4, 11, 357}, + dictWord{6, 11, 172}, + dictWord{7, 11, 143}, + dictWord{ + 137, + 11, + 413, + }, + dictWord{4, 10, 83}, + dictWord{4, 11, 590}, + dictWord{146, 11, 76}, + dictWord{140, 10, 676}, + dictWord{7, 11, 287}, + dictWord{8, 11, 355}, + dictWord{ + 9, + 11, + 293, + }, + dictWord{137, 11, 743}, + dictWord{134, 10, 278}, + dictWord{6, 0, 1803}, + dictWord{18, 0, 165}, + dictWord{24, 0, 21}, + dictWord{5, 11, 169}, + dictWord{ + 7, + 11, + 333, + }, + dictWord{136, 11, 45}, + dictWord{12, 10, 97}, + dictWord{140, 11, 97}, + dictWord{4, 0, 408}, + dictWord{4, 0, 741}, + dictWord{135, 0, 500}, + dictWord{ + 132, + 11, + 198, + }, + dictWord{7, 10, 388}, + dictWord{7, 10, 644}, + dictWord{139, 10, 781}, + dictWord{4, 11, 24}, + dictWord{5, 11, 140}, + dictWord{5, 11, 185}, + dictWord{ + 7, + 11, + 1500, + }, + dictWord{11, 11, 565}, + dictWord{139, 11, 838}, + dictWord{6, 0, 1321}, + dictWord{9, 0, 257}, + dictWord{7, 10, 229}, + dictWord{8, 10, 59}, + dictWord{ + 9, + 10, + 190, + }, + dictWord{10, 10, 378}, + dictWord{140, 10, 191}, + dictWord{4, 11, 334}, + dictWord{133, 11, 593}, + dictWord{135, 11, 1885}, + dictWord{134, 0, 1138}, + dictWord{4, 0, 249}, + dictWord{6, 0, 73}, + dictWord{135, 0, 177}, + dictWord{133, 0, 576}, + dictWord{142, 0, 231}, + dictWord{137, 0, 288}, + dictWord{132, 10, 660}, + dictWord{7, 10, 1035}, + dictWord{138, 10, 737}, + dictWord{135, 0, 1487}, + dictWord{6, 0, 989}, + dictWord{9, 0, 433}, + dictWord{7, 10, 690}, + dictWord{9, 10, 587}, + dictWord{140, 10, 521}, + dictWord{7, 0, 1264}, + dictWord{7, 0, 1678}, + dictWord{11, 0, 945}, + dictWord{12, 0, 341}, + dictWord{12, 0, 471}, + dictWord{140, 0, 569}, + dictWord{132, 11, 709}, + dictWord{133, 11, 897}, + dictWord{5, 11, 224}, + dictWord{13, 11, 174}, + dictWord{146, 11, 52}, + dictWord{135, 11, 1840}, + dictWord{ + 134, + 10, + 1744, + }, + dictWord{12, 0, 87}, + dictWord{16, 0, 74}, + dictWord{4, 10, 733}, + dictWord{9, 10, 194}, + dictWord{10, 10, 92}, + dictWord{11, 10, 198}, + dictWord{ + 12, + 10, + 84, + }, + dictWord{141, 10, 128}, + dictWord{140, 0, 779}, + dictWord{135, 0, 538}, + dictWord{4, 11, 608}, + dictWord{133, 11, 497}, + dictWord{133, 0, 413}, + dictWord{7, 11, 1375}, + dictWord{7, 11, 1466}, + dictWord{138, 11, 331}, + dictWord{136, 0, 495}, + dictWord{6, 11, 540}, + dictWord{136, 11, 136}, + dictWord{7, 0, 54}, + dictWord{8, 0, 312}, + dictWord{10, 0, 191}, + dictWord{10, 0, 614}, + dictWord{140, 0, 567}, + dictWord{6, 0, 468}, + dictWord{7, 0, 567}, + dictWord{7, 0, 1478}, + dictWord{ + 8, + 0, + 530, + }, + dictWord{14, 0, 290}, + dictWord{133, 11, 999}, + dictWord{4, 11, 299}, + dictWord{7, 10, 306}, + dictWord{135, 11, 1004}, + dictWord{142, 11, 296}, + dictWord{134, 0, 1484}, + dictWord{133, 10, 979}, + dictWord{6, 0, 609}, + dictWord{9, 0, 815}, + dictWord{12, 11, 137}, + dictWord{14, 11, 9}, + dictWord{14, 11, 24}, + dictWord{142, 11, 64}, + dictWord{133, 11, 456}, + dictWord{6, 0, 484}, + dictWord{135, 0, 822}, + dictWord{133, 10, 178}, + dictWord{136, 11, 180}, + dictWord{ + 132, + 11, + 755, + }, + dictWord{137, 0, 900}, + dictWord{135, 0, 1335}, + dictWord{6, 0, 1724}, + dictWord{135, 0, 2022}, + dictWord{135, 11, 1139}, + dictWord{5, 0, 640}, + dictWord{132, 10, 390}, + dictWord{6, 0, 1831}, + dictWord{138, 11, 633}, + dictWord{135, 11, 566}, + dictWord{4, 11, 890}, + dictWord{5, 11, 805}, + dictWord{5, 11, 819}, + dictWord{5, 11, 961}, + dictWord{6, 11, 396}, + dictWord{6, 11, 1631}, + dictWord{6, 11, 1678}, + dictWord{7, 11, 1967}, + dictWord{7, 11, 2041}, + dictWord{ + 9, + 11, + 630, + }, + dictWord{11, 11, 8}, + dictWord{11, 11, 1019}, + dictWord{12, 11, 176}, + dictWord{13, 11, 225}, + dictWord{14, 11, 292}, + dictWord{149, 11, 24}, + dictWord{ + 132, + 0, + 474, + }, + dictWord{134, 0, 1103}, + dictWord{135, 0, 1504}, + dictWord{134, 0, 1576}, + dictWord{6, 0, 961}, + dictWord{6, 0, 1034}, + dictWord{140, 0, 655}, + dictWord{11, 11, 514}, + dictWord{149, 11, 20}, + dictWord{5, 0, 305}, + dictWord{135, 11, 1815}, + dictWord{7, 11, 1505}, + dictWord{10, 11, 190}, + dictWord{ + 10, + 11, + 634, + }, + dictWord{11, 11, 792}, + dictWord{12, 11, 358}, + dictWord{140, 11, 447}, + dictWord{5, 11, 0}, + dictWord{6, 11, 536}, + dictWord{7, 11, 604}, + dictWord{ + 13, + 11, + 445, + }, + dictWord{145, 11, 126}, + dictWord{7, 0, 1236}, + dictWord{133, 10, 105}, + dictWord{4, 0, 480}, + dictWord{6, 0, 217}, + dictWord{6, 0, 302}, + dictWord{ + 6, + 0, + 1642, + }, + dictWord{7, 0, 130}, + dictWord{7, 0, 837}, + dictWord{7, 0, 1321}, + dictWord{7, 0, 1547}, + dictWord{7, 0, 1657}, + dictWord{8, 0, 429}, + dictWord{9, 0, 228}, + dictWord{13, 0, 289}, + dictWord{13, 0, 343}, + dictWord{19, 0, 101}, + dictWord{6, 11, 232}, + dictWord{6, 11, 412}, + dictWord{7, 11, 1074}, + dictWord{8, 11, 9}, + dictWord{ + 8, + 11, + 157, + }, + dictWord{8, 11, 786}, + dictWord{9, 11, 196}, + dictWord{9, 11, 352}, + dictWord{9, 11, 457}, + dictWord{10, 11, 337}, + dictWord{11, 11, 232}, + dictWord{ + 11, + 11, + 877, + }, + dictWord{12, 11, 480}, + dictWord{140, 11, 546}, + dictWord{5, 10, 438}, + dictWord{7, 11, 958}, + dictWord{9, 10, 694}, + dictWord{12, 10, 627}, + dictWord{ + 13, + 11, + 38, + }, + dictWord{141, 10, 210}, + dictWord{4, 11, 382}, + dictWord{136, 11, 579}, + dictWord{7, 0, 278}, + dictWord{10, 0, 739}, + dictWord{11, 0, 708}, + dictWord{ + 141, + 0, + 348, + }, + dictWord{4, 11, 212}, + dictWord{135, 11, 1206}, + dictWord{135, 11, 1898}, + dictWord{6, 0, 708}, + dictWord{6, 0, 1344}, + dictWord{152, 10, 11}, + dictWord{137, 11, 768}, + dictWord{134, 0, 1840}, + dictWord{140, 0, 233}, + dictWord{8, 10, 25}, + dictWord{138, 10, 826}, + dictWord{6, 0, 2017}, + dictWord{ + 133, + 11, + 655, + }, + dictWord{6, 0, 1488}, + dictWord{139, 11, 290}, + dictWord{132, 10, 308}, + dictWord{134, 0, 1590}, + dictWord{134, 0, 1800}, + dictWord{134, 0, 1259}, + dictWord{16, 0, 28}, + dictWord{6, 11, 231}, + dictWord{7, 11, 95}, + dictWord{136, 11, 423}, + dictWord{133, 11, 300}, + dictWord{135, 10, 150}, + dictWord{ + 136, + 10, + 649, + }, + dictWord{7, 11, 1874}, + dictWord{137, 11, 641}, + dictWord{6, 11, 237}, + dictWord{7, 11, 611}, + dictWord{8, 11, 100}, + dictWord{9, 11, 416}, + dictWord{ + 11, + 11, + 335, + }, + dictWord{12, 11, 173}, + dictWord{146, 11, 101}, + dictWord{137, 0, 45}, + dictWord{134, 10, 521}, + dictWord{17, 0, 36}, + dictWord{14, 11, 26}, + dictWord{ + 146, + 11, + 150, + }, + dictWord{7, 0, 1442}, + dictWord{14, 0, 22}, + dictWord{5, 10, 339}, + dictWord{15, 10, 41}, + dictWord{15, 10, 166}, + dictWord{147, 10, 66}, + dictWord{ + 8, + 0, + 378, + }, + dictWord{6, 11, 581}, + dictWord{135, 11, 1119}, + dictWord{134, 0, 1507}, + dictWord{147, 11, 117}, + dictWord{139, 0, 39}, + dictWord{134, 0, 1054}, + dictWord{6, 0, 363}, + dictWord{7, 0, 1955}, + dictWord{136, 0, 725}, + dictWord{134, 0, 2036}, + dictWord{133, 11, 199}, + dictWord{6, 0, 1871}, + dictWord{9, 0, 935}, + dictWord{9, 0, 961}, + dictWord{9, 0, 1004}, + dictWord{9, 0, 1016}, + dictWord{12, 0, 805}, + dictWord{12, 0, 852}, + dictWord{12, 0, 853}, + dictWord{12, 0, 869}, + dictWord{ + 12, + 0, + 882, + }, + dictWord{12, 0, 896}, + dictWord{12, 0, 906}, + dictWord{12, 0, 917}, + dictWord{12, 0, 940}, + dictWord{15, 0, 170}, + dictWord{15, 0, 176}, + dictWord{ + 15, + 0, + 188, + }, + dictWord{15, 0, 201}, + dictWord{15, 0, 205}, + dictWord{15, 0, 212}, + dictWord{15, 0, 234}, + dictWord{15, 0, 244}, + dictWord{18, 0, 181}, + dictWord{18, 0, 193}, + dictWord{18, 0, 196}, + dictWord{18, 0, 201}, + dictWord{18, 0, 202}, + dictWord{18, 0, 210}, + dictWord{18, 0, 217}, + dictWord{18, 0, 235}, + dictWord{18, 0, 236}, + dictWord{18, 0, 237}, + dictWord{21, 0, 54}, + dictWord{21, 0, 55}, + dictWord{21, 0, 58}, + dictWord{21, 0, 59}, + dictWord{152, 0, 22}, + dictWord{134, 10, 1628}, + dictWord{ + 137, + 0, + 805, + }, + dictWord{5, 0, 813}, + dictWord{135, 0, 2046}, + dictWord{142, 11, 42}, + dictWord{5, 0, 712}, + dictWord{6, 0, 1240}, + dictWord{11, 0, 17}, + dictWord{ + 13, + 0, + 321, + }, + dictWord{144, 0, 67}, + dictWord{132, 0, 617}, + dictWord{135, 10, 829}, + dictWord{6, 0, 320}, + dictWord{7, 0, 781}, + dictWord{7, 0, 1921}, + dictWord{9, 0, 55}, + dictWord{10, 0, 186}, + dictWord{10, 0, 273}, + dictWord{10, 0, 664}, + dictWord{10, 0, 801}, + dictWord{11, 0, 996}, + dictWord{11, 0, 997}, + dictWord{13, 0, 157}, + dictWord{142, 0, 170}, + dictWord{136, 0, 271}, + dictWord{5, 10, 486}, + dictWord{135, 10, 1349}, + dictWord{18, 11, 91}, + dictWord{147, 11, 70}, + dictWord{10, 0, 445}, + dictWord{7, 10, 1635}, + dictWord{8, 10, 17}, + dictWord{138, 10, 295}, + dictWord{136, 11, 404}, + dictWord{7, 0, 103}, + dictWord{7, 0, 863}, + dictWord{11, 0, 184}, + dictWord{145, 0, 62}, + dictWord{138, 10, 558}, + dictWord{137, 0, 659}, + dictWord{6, 11, 312}, + dictWord{6, 11, 1715}, + dictWord{10, 11, 584}, + dictWord{ + 11, + 11, + 546, + }, + dictWord{11, 11, 692}, + dictWord{12, 11, 259}, + dictWord{12, 11, 295}, + dictWord{13, 11, 46}, + dictWord{141, 11, 154}, + dictWord{134, 0, 676}, + dictWord{132, 11, 588}, + dictWord{4, 11, 231}, + dictWord{5, 11, 61}, + dictWord{6, 11, 104}, + dictWord{7, 11, 729}, + dictWord{7, 11, 964}, + dictWord{7, 11, 1658}, + dictWord{140, 11, 414}, + dictWord{6, 11, 263}, + dictWord{138, 11, 757}, + dictWord{11, 0, 337}, + dictWord{142, 0, 303}, + dictWord{135, 11, 1363}, + dictWord{ + 132, + 11, + 320, + }, + dictWord{140, 0, 506}, + dictWord{134, 10, 447}, + dictWord{5, 0, 77}, + dictWord{7, 0, 1455}, + dictWord{10, 0, 843}, + dictWord{147, 0, 73}, + dictWord{ + 7, + 10, + 577, + }, + dictWord{7, 10, 1432}, + dictWord{9, 10, 475}, + dictWord{9, 10, 505}, + dictWord{9, 10, 526}, + dictWord{9, 10, 609}, + dictWord{9, 10, 689}, + dictWord{ + 9, + 10, + 726, + }, + dictWord{9, 10, 735}, + dictWord{9, 10, 738}, + dictWord{10, 10, 556}, + dictWord{10, 10, 674}, + dictWord{10, 10, 684}, + dictWord{11, 10, 89}, + dictWord{ + 11, + 10, + 202, + }, + dictWord{11, 10, 272}, + dictWord{11, 10, 380}, + dictWord{11, 10, 415}, + dictWord{11, 10, 505}, + dictWord{11, 10, 537}, + dictWord{11, 10, 550}, + dictWord{11, 10, 562}, + dictWord{11, 10, 640}, + dictWord{11, 10, 667}, + dictWord{11, 10, 688}, + dictWord{11, 10, 847}, + dictWord{11, 10, 927}, + dictWord{ + 11, + 10, + 930, + }, + dictWord{11, 10, 940}, + dictWord{12, 10, 144}, + dictWord{12, 10, 325}, + dictWord{12, 10, 329}, + dictWord{12, 10, 389}, + dictWord{12, 10, 403}, + dictWord{ + 12, + 10, + 451, + }, + dictWord{12, 10, 515}, + dictWord{12, 10, 604}, + dictWord{12, 10, 616}, + dictWord{12, 10, 626}, + dictWord{13, 10, 66}, + dictWord{13, 10, 131}, + dictWord{13, 10, 167}, + dictWord{13, 10, 236}, + dictWord{13, 10, 368}, + dictWord{13, 10, 411}, + dictWord{13, 10, 434}, + dictWord{13, 10, 453}, + dictWord{ + 13, + 10, + 461, + }, + dictWord{13, 10, 474}, + dictWord{14, 10, 59}, + dictWord{14, 10, 60}, + dictWord{14, 10, 139}, + dictWord{14, 10, 152}, + dictWord{14, 10, 276}, + dictWord{ + 14, + 10, + 353, + }, + dictWord{14, 10, 402}, + dictWord{15, 10, 28}, + dictWord{15, 10, 81}, + dictWord{15, 10, 123}, + dictWord{15, 10, 152}, + dictWord{18, 10, 136}, + dictWord{148, 10, 88}, + dictWord{132, 0, 458}, + dictWord{135, 0, 1420}, + dictWord{6, 0, 109}, + dictWord{10, 0, 382}, + dictWord{4, 11, 405}, + dictWord{4, 10, 609}, + dictWord{7, 10, 756}, + dictWord{7, 11, 817}, + dictWord{9, 10, 544}, + dictWord{11, 10, 413}, + dictWord{14, 11, 58}, + dictWord{14, 10, 307}, + dictWord{16, 10, 25}, + dictWord{17, 11, 37}, + dictWord{146, 11, 124}, + dictWord{6, 0, 330}, + dictWord{7, 0, 1084}, + dictWord{11, 0, 142}, + dictWord{133, 11, 974}, + dictWord{4, 10, 930}, + dictWord{133, 10, 947}, + dictWord{5, 10, 939}, + dictWord{142, 11, 394}, + dictWord{16, 0, 91}, + dictWord{145, 0, 87}, + dictWord{5, 11, 235}, + dictWord{5, 10, 962}, + dictWord{7, 11, 1239}, + dictWord{11, 11, 131}, + dictWord{140, 11, 370}, + dictWord{11, 0, 492}, + dictWord{5, 10, 651}, + dictWord{8, 10, 170}, + dictWord{9, 10, 61}, + dictWord{9, 10, 63}, + dictWord{10, 10, 23}, + dictWord{10, 10, 37}, + dictWord{10, 10, 834}, + dictWord{11, 10, 4}, + dictWord{11, 10, 281}, + dictWord{11, 10, 503}, + dictWord{ + 11, + 10, + 677, + }, + dictWord{12, 10, 96}, + dictWord{12, 10, 130}, + dictWord{12, 10, 244}, + dictWord{14, 10, 5}, + dictWord{14, 10, 40}, + dictWord{14, 10, 162}, + dictWord{ + 14, + 10, + 202, + }, + dictWord{146, 10, 133}, + dictWord{4, 10, 406}, + dictWord{5, 10, 579}, + dictWord{12, 10, 492}, + dictWord{150, 10, 15}, + dictWord{9, 11, 137}, + dictWord{138, 11, 221}, + dictWord{134, 0, 1239}, + dictWord{11, 0, 211}, + dictWord{140, 0, 145}, + dictWord{7, 11, 390}, + dictWord{138, 11, 140}, + dictWord{ + 135, + 11, + 1418, + }, + dictWord{135, 11, 1144}, + dictWord{134, 0, 1049}, + dictWord{7, 0, 321}, + dictWord{6, 10, 17}, + dictWord{7, 10, 1001}, + dictWord{7, 10, 1982}, + dictWord{ + 9, + 10, + 886, + }, + dictWord{10, 10, 489}, + dictWord{10, 10, 800}, + dictWord{11, 10, 782}, + dictWord{12, 10, 320}, + dictWord{13, 10, 467}, + dictWord{14, 10, 145}, + dictWord{14, 10, 387}, + dictWord{143, 10, 119}, + dictWord{145, 10, 17}, + dictWord{5, 11, 407}, + dictWord{11, 11, 489}, + dictWord{19, 11, 37}, + dictWord{20, 11, 73}, + dictWord{150, 11, 38}, + dictWord{133, 10, 458}, + dictWord{135, 0, 1985}, + dictWord{7, 10, 1983}, + dictWord{8, 10, 0}, + dictWord{8, 10, 171}, + dictWord{ + 9, + 10, + 120, + }, + dictWord{9, 10, 732}, + dictWord{10, 10, 473}, + dictWord{11, 10, 656}, + dictWord{11, 10, 998}, + dictWord{18, 10, 0}, + dictWord{18, 10, 2}, + dictWord{ + 147, + 10, + 21, + }, + dictWord{5, 11, 325}, + dictWord{7, 11, 1483}, + dictWord{8, 11, 5}, + dictWord{8, 11, 227}, + dictWord{9, 11, 105}, + dictWord{10, 11, 585}, + dictWord{ + 140, + 11, + 614, + }, + dictWord{136, 0, 122}, + dictWord{132, 0, 234}, + dictWord{135, 11, 1196}, + dictWord{6, 0, 976}, + dictWord{6, 0, 1098}, + dictWord{134, 0, 1441}, + dictWord{ + 7, + 0, + 253, + }, + dictWord{136, 0, 549}, + dictWord{6, 11, 621}, + dictWord{13, 11, 504}, + dictWord{144, 11, 19}, + dictWord{132, 10, 519}, + dictWord{5, 0, 430}, + dictWord{ + 5, + 0, + 932, + }, + dictWord{6, 0, 131}, + dictWord{7, 0, 417}, + dictWord{9, 0, 522}, + dictWord{11, 0, 314}, + dictWord{141, 0, 390}, + dictWord{14, 0, 149}, + dictWord{14, 0, 399}, + dictWord{143, 0, 57}, + dictWord{5, 10, 907}, + dictWord{6, 10, 31}, + dictWord{6, 11, 218}, + dictWord{7, 10, 491}, + dictWord{7, 10, 530}, + dictWord{8, 10, 592}, + dictWord{11, 10, 53}, + dictWord{11, 10, 779}, + dictWord{12, 10, 167}, + dictWord{12, 10, 411}, + dictWord{14, 10, 14}, + dictWord{14, 10, 136}, + dictWord{15, 10, 72}, + dictWord{16, 10, 17}, + dictWord{144, 10, 72}, + dictWord{140, 11, 330}, + dictWord{7, 11, 454}, + dictWord{7, 11, 782}, + dictWord{136, 11, 768}, + dictWord{ + 132, + 0, + 507, + }, + dictWord{10, 11, 676}, + dictWord{140, 11, 462}, + dictWord{6, 0, 630}, + dictWord{9, 0, 811}, + dictWord{4, 10, 208}, + dictWord{5, 10, 106}, + dictWord{ + 6, + 10, + 531, + }, + dictWord{8, 10, 408}, + dictWord{9, 10, 188}, + dictWord{138, 10, 572}, + dictWord{4, 0, 343}, + dictWord{5, 0, 511}, + dictWord{134, 10, 1693}, + dictWord{ + 134, + 11, + 164, + }, + dictWord{132, 0, 448}, + dictWord{7, 0, 455}, + dictWord{138, 0, 591}, + dictWord{135, 0, 1381}, + dictWord{12, 10, 441}, + dictWord{150, 11, 50}, + dictWord{9, 10, 449}, + dictWord{10, 10, 192}, + dictWord{138, 10, 740}, + dictWord{6, 0, 575}, + dictWord{132, 10, 241}, + dictWord{134, 0, 1175}, + dictWord{ + 134, + 0, + 653, + }, + dictWord{134, 0, 1761}, + dictWord{134, 0, 1198}, + dictWord{132, 10, 259}, + dictWord{6, 11, 343}, + dictWord{7, 11, 195}, + dictWord{9, 11, 226}, + dictWord{ + 10, + 11, + 197, + }, + dictWord{10, 11, 575}, + dictWord{11, 11, 502}, + dictWord{139, 11, 899}, + dictWord{7, 0, 1127}, + dictWord{7, 0, 1572}, + dictWord{10, 0, 297}, + dictWord{10, 0, 422}, + dictWord{11, 0, 764}, + dictWord{11, 0, 810}, + dictWord{12, 0, 264}, + dictWord{13, 0, 102}, + dictWord{13, 0, 300}, + dictWord{13, 0, 484}, + dictWord{ + 14, + 0, + 147, + }, + dictWord{14, 0, 229}, + dictWord{17, 0, 71}, + dictWord{18, 0, 118}, + dictWord{147, 0, 120}, + dictWord{135, 11, 666}, + dictWord{132, 0, 678}, + dictWord{ + 4, + 10, + 173, + }, + dictWord{5, 10, 312}, + dictWord{5, 10, 512}, + dictWord{135, 10, 1285}, + dictWord{7, 10, 1603}, + dictWord{7, 10, 1691}, + dictWord{9, 10, 464}, + dictWord{11, 10, 195}, + dictWord{12, 10, 279}, + dictWord{12, 10, 448}, + dictWord{14, 10, 11}, + dictWord{147, 10, 102}, + dictWord{16, 0, 99}, + dictWord{146, 0, 164}, + dictWord{7, 11, 1125}, + dictWord{9, 11, 143}, + dictWord{11, 11, 61}, + dictWord{14, 11, 405}, + dictWord{150, 11, 21}, + dictWord{137, 11, 260}, + dictWord{ + 4, + 10, + 452, + }, + dictWord{5, 10, 583}, + dictWord{5, 10, 817}, + dictWord{6, 10, 433}, + dictWord{7, 10, 593}, + dictWord{7, 10, 720}, + dictWord{7, 10, 1378}, + dictWord{ + 8, + 10, + 161, + }, + dictWord{9, 10, 284}, + dictWord{10, 10, 313}, + dictWord{139, 10, 886}, + dictWord{132, 10, 547}, + dictWord{136, 10, 722}, + dictWord{14, 0, 35}, + dictWord{142, 0, 191}, + dictWord{141, 0, 45}, + dictWord{138, 0, 121}, + dictWord{132, 0, 125}, + dictWord{134, 0, 1622}, + dictWord{133, 11, 959}, + dictWord{ + 8, + 10, + 420, + }, + dictWord{139, 10, 193}, + dictWord{132, 0, 721}, + dictWord{135, 10, 409}, + dictWord{136, 0, 145}, + dictWord{7, 0, 792}, + dictWord{8, 0, 147}, + dictWord{ + 10, + 0, + 821, + }, + dictWord{11, 0, 970}, + dictWord{11, 0, 1021}, + dictWord{136, 11, 173}, + dictWord{134, 11, 266}, + dictWord{132, 0, 715}, + dictWord{7, 0, 1999}, + dictWord{138, 10, 308}, + dictWord{133, 0, 531}, + dictWord{5, 0, 168}, + dictWord{5, 0, 930}, + dictWord{8, 0, 74}, + dictWord{9, 0, 623}, + dictWord{12, 0, 500}, + dictWord{ + 140, + 0, + 579, + }, + dictWord{144, 0, 65}, + dictWord{138, 11, 246}, + dictWord{6, 0, 220}, + dictWord{7, 0, 1101}, + dictWord{13, 0, 105}, + dictWord{142, 11, 314}, + dictWord{ + 5, + 10, + 1002, + }, + dictWord{136, 10, 745}, + dictWord{134, 0, 960}, + dictWord{20, 0, 0}, + dictWord{148, 11, 0}, + dictWord{4, 0, 1005}, + dictWord{4, 10, 239}, + dictWord{ + 6, + 10, + 477, + }, + dictWord{7, 10, 1607}, + dictWord{11, 10, 68}, + dictWord{139, 10, 617}, + dictWord{6, 0, 19}, + dictWord{7, 0, 1413}, + dictWord{139, 0, 428}, + dictWord{ + 149, + 10, + 13, + }, + dictWord{7, 0, 96}, + dictWord{8, 0, 401}, + dictWord{8, 0, 703}, + dictWord{9, 0, 896}, + dictWord{136, 11, 300}, + dictWord{134, 0, 1595}, + dictWord{145, 0, 116}, + dictWord{136, 0, 1021}, + dictWord{7, 0, 1961}, + dictWord{7, 0, 1965}, + dictWord{7, 0, 2030}, + dictWord{8, 0, 150}, + dictWord{8, 0, 702}, + dictWord{8, 0, 737}, + dictWord{ + 8, + 0, + 750, + }, + dictWord{140, 0, 366}, + dictWord{11, 11, 75}, + dictWord{142, 11, 267}, + dictWord{132, 10, 367}, + dictWord{8, 0, 800}, + dictWord{9, 0, 148}, + dictWord{ + 9, + 0, + 872, + }, + dictWord{9, 0, 890}, + dictWord{11, 0, 309}, + dictWord{11, 0, 1001}, + dictWord{13, 0, 267}, + dictWord{13, 0, 323}, + dictWord{5, 11, 427}, + dictWord{ + 5, + 11, + 734, + }, + dictWord{7, 11, 478}, + dictWord{136, 11, 52}, + dictWord{7, 11, 239}, + dictWord{11, 11, 217}, + dictWord{142, 11, 165}, + dictWord{132, 11, 323}, + dictWord{140, 11, 419}, + dictWord{13, 0, 299}, + dictWord{142, 0, 75}, + dictWord{6, 11, 87}, + dictWord{6, 11, 1734}, + dictWord{7, 11, 20}, + dictWord{7, 11, 1056}, + dictWord{ + 8, + 11, + 732, + }, + dictWord{9, 11, 406}, + dictWord{9, 11, 911}, + dictWord{138, 11, 694}, + dictWord{134, 0, 1383}, + dictWord{132, 10, 694}, + dictWord{ + 133, + 11, + 613, + }, + dictWord{137, 0, 779}, + dictWord{4, 0, 598}, + dictWord{140, 10, 687}, + dictWord{6, 0, 970}, + dictWord{135, 0, 424}, + dictWord{133, 0, 547}, + dictWord{ + 7, + 11, + 32, + }, + dictWord{7, 11, 984}, + dictWord{8, 11, 85}, + dictWord{8, 11, 709}, + dictWord{9, 11, 579}, + dictWord{9, 11, 847}, + dictWord{9, 11, 856}, + dictWord{10, 11, 799}, + dictWord{11, 11, 258}, + dictWord{11, 11, 1007}, + dictWord{12, 11, 331}, + dictWord{12, 11, 615}, + dictWord{13, 11, 188}, + dictWord{13, 11, 435}, + dictWord{ + 14, + 11, + 8, + }, + dictWord{15, 11, 165}, + dictWord{16, 11, 27}, + dictWord{148, 11, 40}, + dictWord{6, 0, 1222}, + dictWord{134, 0, 1385}, + dictWord{132, 0, 876}, + dictWord{ + 138, + 11, + 151, + }, + dictWord{135, 10, 213}, + dictWord{4, 11, 167}, + dictWord{135, 11, 82}, + dictWord{133, 0, 133}, + dictWord{6, 11, 24}, + dictWord{7, 11, 74}, + dictWord{ + 7, + 11, + 678, + }, + dictWord{137, 11, 258}, + dictWord{5, 11, 62}, + dictWord{6, 11, 534}, + dictWord{7, 11, 684}, + dictWord{7, 11, 1043}, + dictWord{7, 11, 1072}, + dictWord{ + 8, + 11, + 280, + }, + dictWord{8, 11, 541}, + dictWord{8, 11, 686}, + dictWord{10, 11, 519}, + dictWord{11, 11, 252}, + dictWord{140, 11, 282}, + dictWord{136, 0, 187}, + dictWord{8, 0, 8}, + dictWord{10, 0, 0}, + dictWord{10, 0, 818}, + dictWord{139, 0, 988}, + dictWord{132, 11, 359}, + dictWord{11, 0, 429}, + dictWord{15, 0, 51}, + dictWord{ + 135, + 10, + 1672, + }, + dictWord{136, 0, 685}, + dictWord{5, 11, 211}, + dictWord{7, 11, 88}, + dictWord{136, 11, 627}, + dictWord{134, 0, 472}, + dictWord{136, 0, 132}, + dictWord{ + 6, + 11, + 145, + }, + dictWord{141, 11, 336}, + dictWord{4, 10, 751}, + dictWord{11, 10, 390}, + dictWord{140, 10, 32}, + dictWord{6, 0, 938}, + dictWord{6, 0, 1060}, + dictWord{ + 4, + 11, + 263, + }, + dictWord{4, 10, 409}, + dictWord{133, 10, 78}, + dictWord{137, 0, 874}, + dictWord{8, 0, 774}, + dictWord{10, 0, 670}, + dictWord{12, 0, 51}, + dictWord{ + 4, + 11, + 916, + }, + dictWord{6, 10, 473}, + dictWord{7, 10, 1602}, + dictWord{10, 10, 698}, + dictWord{12, 10, 212}, + dictWord{13, 10, 307}, + dictWord{145, 10, 105}, + dictWord{146, 0, 92}, + dictWord{143, 10, 156}, + dictWord{132, 0, 830}, + dictWord{137, 0, 701}, + dictWord{4, 11, 599}, + dictWord{6, 11, 1634}, + dictWord{7, 11, 5}, + dictWord{7, 11, 55}, + dictWord{7, 11, 67}, + dictWord{7, 11, 97}, + dictWord{7, 11, 691}, + dictWord{7, 11, 979}, + dictWord{7, 11, 1697}, + dictWord{8, 11, 207}, + dictWord{ + 8, + 11, + 214, + }, + dictWord{8, 11, 231}, + dictWord{8, 11, 294}, + dictWord{8, 11, 336}, + dictWord{8, 11, 428}, + dictWord{8, 11, 451}, + dictWord{8, 11, 460}, + dictWord{8, 11, 471}, + dictWord{8, 11, 622}, + dictWord{8, 11, 626}, + dictWord{8, 11, 679}, + dictWord{8, 11, 759}, + dictWord{8, 11, 829}, + dictWord{9, 11, 11}, + dictWord{9, 11, 246}, + dictWord{ + 9, + 11, + 484, + }, + dictWord{9, 11, 573}, + dictWord{9, 11, 706}, + dictWord{9, 11, 762}, + dictWord{9, 11, 798}, + dictWord{9, 11, 855}, + dictWord{9, 11, 870}, + dictWord{ + 9, + 11, + 912, + }, + dictWord{10, 11, 303}, + dictWord{10, 11, 335}, + dictWord{10, 11, 424}, + dictWord{10, 11, 461}, + dictWord{10, 11, 543}, + dictWord{10, 11, 759}, + dictWord{10, 11, 814}, + dictWord{11, 11, 59}, + dictWord{11, 11, 199}, + dictWord{11, 11, 235}, + dictWord{11, 11, 475}, + dictWord{11, 11, 590}, + dictWord{11, 11, 929}, + dictWord{11, 11, 963}, + dictWord{12, 11, 114}, + dictWord{12, 11, 182}, + dictWord{12, 11, 226}, + dictWord{12, 11, 332}, + dictWord{12, 11, 439}, + dictWord{ + 12, + 11, + 575, + }, + dictWord{12, 11, 598}, + dictWord{13, 11, 8}, + dictWord{13, 11, 125}, + dictWord{13, 11, 194}, + dictWord{13, 11, 287}, + dictWord{14, 11, 197}, + dictWord{ + 14, + 11, + 383, + }, + dictWord{15, 11, 53}, + dictWord{17, 11, 63}, + dictWord{19, 11, 46}, + dictWord{19, 11, 98}, + dictWord{19, 11, 106}, + dictWord{148, 11, 85}, + dictWord{ + 4, + 0, + 127, + }, + dictWord{5, 0, 350}, + dictWord{6, 0, 356}, + dictWord{8, 0, 426}, + dictWord{9, 0, 572}, + dictWord{10, 0, 247}, + dictWord{139, 0, 312}, + dictWord{134, 0, 1215}, + dictWord{6, 0, 59}, + dictWord{9, 0, 603}, + dictWord{13, 0, 397}, + dictWord{7, 11, 1853}, + dictWord{138, 11, 437}, + dictWord{134, 0, 1762}, + dictWord{ + 147, + 11, + 126, + }, + dictWord{135, 10, 883}, + dictWord{13, 0, 293}, + dictWord{142, 0, 56}, + dictWord{133, 10, 617}, + dictWord{139, 10, 50}, + dictWord{5, 11, 187}, + dictWord{ + 7, + 10, + 1518, + }, + dictWord{139, 10, 694}, + dictWord{135, 0, 441}, + dictWord{6, 0, 111}, + dictWord{7, 0, 4}, + dictWord{8, 0, 163}, + dictWord{8, 0, 776}, + dictWord{ + 138, + 0, + 566, + }, + dictWord{132, 0, 806}, + dictWord{4, 11, 215}, + dictWord{9, 11, 38}, + dictWord{10, 11, 3}, + dictWord{11, 11, 23}, + dictWord{11, 11, 127}, + dictWord{ + 139, + 11, + 796, + }, + dictWord{14, 0, 233}, + dictWord{4, 10, 546}, + dictWord{135, 10, 2042}, + dictWord{135, 0, 1994}, + dictWord{134, 0, 1739}, + dictWord{135, 11, 1530}, + dictWord{136, 0, 393}, + dictWord{5, 0, 297}, + dictWord{7, 0, 1038}, + dictWord{14, 0, 359}, + dictWord{19, 0, 52}, + dictWord{148, 0, 47}, + dictWord{135, 0, 309}, + dictWord{ + 4, + 10, + 313, + }, + dictWord{133, 10, 577}, + dictWord{8, 10, 184}, + dictWord{141, 10, 433}, + dictWord{135, 10, 935}, + dictWord{12, 10, 186}, + dictWord{ + 12, + 10, + 292, + }, + dictWord{14, 10, 100}, + dictWord{146, 10, 70}, + dictWord{136, 0, 363}, + dictWord{14, 0, 175}, + dictWord{11, 10, 402}, + dictWord{12, 10, 109}, + dictWord{ + 12, + 10, + 431, + }, + dictWord{13, 10, 179}, + dictWord{13, 10, 206}, + dictWord{14, 10, 217}, + dictWord{16, 10, 3}, + dictWord{148, 10, 53}, + dictWord{5, 10, 886}, + dictWord{ + 6, + 10, + 46, + }, + dictWord{6, 10, 1790}, + dictWord{7, 10, 14}, + dictWord{7, 10, 732}, + dictWord{7, 10, 1654}, + dictWord{8, 10, 95}, + dictWord{8, 10, 327}, + dictWord{ + 8, + 10, + 616, + }, + dictWord{9, 10, 892}, + dictWord{10, 10, 598}, + dictWord{10, 10, 769}, + dictWord{11, 10, 134}, + dictWord{11, 10, 747}, + dictWord{12, 10, 378}, + dictWord{ + 142, + 10, + 97, + }, + dictWord{136, 0, 666}, + dictWord{135, 0, 1675}, + dictWord{6, 0, 655}, + dictWord{134, 0, 1600}, + dictWord{135, 0, 808}, + dictWord{133, 10, 1021}, + dictWord{4, 11, 28}, + dictWord{5, 11, 440}, + dictWord{7, 11, 248}, + dictWord{11, 11, 833}, + dictWord{140, 11, 344}, + dictWord{134, 11, 1654}, + dictWord{ + 132, + 0, + 280, + }, + dictWord{140, 0, 54}, + dictWord{4, 0, 421}, + dictWord{133, 0, 548}, + dictWord{132, 10, 153}, + dictWord{6, 11, 339}, + dictWord{135, 11, 923}, + dictWord{ + 133, + 11, + 853, + }, + dictWord{133, 10, 798}, + dictWord{132, 10, 587}, + dictWord{6, 11, 249}, + dictWord{7, 11, 1234}, + dictWord{139, 11, 573}, + dictWord{6, 10, 598}, + dictWord{7, 10, 42}, + dictWord{8, 10, 695}, + dictWord{10, 10, 212}, + dictWord{11, 10, 158}, + dictWord{14, 10, 196}, + dictWord{145, 10, 85}, + dictWord{7, 0, 249}, + dictWord{5, 10, 957}, + dictWord{133, 10, 1008}, + dictWord{4, 10, 129}, + dictWord{135, 10, 465}, + dictWord{6, 0, 254}, + dictWord{7, 0, 842}, + dictWord{7, 0, 1659}, + dictWord{9, 0, 109}, + dictWord{10, 0, 103}, + dictWord{7, 10, 908}, + dictWord{7, 10, 1201}, + dictWord{9, 10, 755}, + dictWord{11, 10, 906}, + dictWord{12, 10, 527}, + dictWord{146, 10, 7}, + dictWord{5, 0, 262}, + dictWord{136, 10, 450}, + dictWord{144, 0, 1}, + dictWord{10, 11, 201}, + dictWord{142, 11, 319}, + dictWord{7, 11, 49}, + dictWord{ + 7, + 11, + 392, + }, + dictWord{8, 11, 20}, + dictWord{8, 11, 172}, + dictWord{8, 11, 690}, + dictWord{9, 11, 383}, + dictWord{9, 11, 845}, + dictWord{10, 11, 48}, + dictWord{ + 11, + 11, + 293, + }, + dictWord{11, 11, 832}, + dictWord{11, 11, 920}, + dictWord{141, 11, 221}, + dictWord{5, 11, 858}, + dictWord{133, 11, 992}, + dictWord{134, 0, 805}, + dictWord{139, 10, 1003}, + dictWord{6, 0, 1630}, + dictWord{134, 11, 307}, + dictWord{7, 11, 1512}, + dictWord{135, 11, 1794}, + dictWord{6, 11, 268}, + dictWord{ + 137, + 11, + 62, + }, + dictWord{135, 10, 1868}, + dictWord{133, 0, 671}, + dictWord{4, 0, 989}, + dictWord{8, 0, 972}, + dictWord{136, 0, 998}, + dictWord{132, 11, 423}, + dictWord{132, 0, 889}, + dictWord{135, 0, 1382}, + dictWord{135, 0, 1910}, + dictWord{7, 10, 965}, + dictWord{7, 10, 1460}, + dictWord{135, 10, 1604}, + dictWord{ + 4, + 0, + 627, + }, + dictWord{5, 0, 775}, + dictWord{138, 11, 106}, + dictWord{134, 11, 348}, + dictWord{7, 0, 202}, + dictWord{11, 0, 362}, + dictWord{11, 0, 948}, + dictWord{ + 140, + 0, + 388, + }, + dictWord{138, 11, 771}, + dictWord{6, 11, 613}, + dictWord{136, 11, 223}, + dictWord{6, 0, 560}, + dictWord{7, 0, 451}, + dictWord{8, 0, 389}, + dictWord{ + 12, + 0, + 490, + }, + dictWord{13, 0, 16}, + dictWord{13, 0, 215}, + dictWord{13, 0, 351}, + dictWord{18, 0, 132}, + dictWord{147, 0, 125}, + dictWord{135, 0, 841}, + dictWord{ + 136, + 0, + 566, + }, + dictWord{136, 0, 938}, + dictWord{132, 11, 670}, + dictWord{5, 0, 912}, + dictWord{6, 0, 1695}, + dictWord{140, 11, 55}, + dictWord{9, 11, 40}, + dictWord{ + 139, + 11, + 136, + }, + dictWord{7, 0, 1361}, + dictWord{7, 10, 982}, + dictWord{10, 10, 32}, + dictWord{143, 10, 56}, + dictWord{11, 11, 259}, + dictWord{140, 11, 270}, + dictWord{ + 5, + 0, + 236, + }, + dictWord{6, 0, 572}, + dictWord{8, 0, 492}, + dictWord{11, 0, 618}, + dictWord{144, 0, 56}, + dictWord{8, 11, 572}, + dictWord{9, 11, 310}, + dictWord{9, 11, 682}, + dictWord{137, 11, 698}, + dictWord{134, 0, 1854}, + dictWord{5, 0, 190}, + dictWord{136, 0, 318}, + dictWord{133, 10, 435}, + dictWord{135, 0, 1376}, + dictWord{ + 4, + 11, + 296, + }, + dictWord{6, 11, 352}, + dictWord{7, 11, 401}, + dictWord{7, 11, 1410}, + dictWord{7, 11, 1594}, + dictWord{7, 11, 1674}, + dictWord{8, 11, 63}, + dictWord{ + 8, + 11, + 660, + }, + dictWord{137, 11, 74}, + dictWord{7, 0, 349}, + dictWord{5, 10, 85}, + dictWord{6, 10, 419}, + dictWord{7, 10, 305}, + dictWord{7, 10, 361}, + dictWord{7, 10, 1337}, + dictWord{8, 10, 71}, + dictWord{140, 10, 519}, + dictWord{4, 11, 139}, + dictWord{4, 11, 388}, + dictWord{140, 11, 188}, + dictWord{6, 0, 1972}, + dictWord{6, 0, 2013}, + dictWord{8, 0, 951}, + dictWord{10, 0, 947}, + dictWord{10, 0, 974}, + dictWord{10, 0, 1018}, + dictWord{142, 0, 476}, + dictWord{140, 10, 688}, + dictWord{ + 135, + 10, + 740, + }, + dictWord{5, 10, 691}, + dictWord{7, 10, 345}, + dictWord{9, 10, 94}, + dictWord{140, 10, 169}, + dictWord{9, 0, 344}, + dictWord{5, 10, 183}, + dictWord{6, 10, 582}, + dictWord{10, 10, 679}, + dictWord{140, 10, 435}, + dictWord{135, 10, 511}, + dictWord{132, 0, 850}, + dictWord{8, 11, 441}, + dictWord{10, 11, 314}, + dictWord{ + 143, + 11, + 3, + }, + dictWord{7, 10, 1993}, + dictWord{136, 10, 684}, + dictWord{4, 11, 747}, + dictWord{6, 11, 290}, + dictWord{6, 10, 583}, + dictWord{7, 11, 649}, + dictWord{ + 7, + 11, + 1479, + }, + dictWord{135, 11, 1583}, + dictWord{133, 11, 232}, + dictWord{133, 10, 704}, + dictWord{134, 0, 910}, + dictWord{4, 10, 179}, + dictWord{5, 10, 198}, + dictWord{133, 10, 697}, + dictWord{7, 10, 347}, + dictWord{7, 10, 971}, + dictWord{8, 10, 181}, + dictWord{138, 10, 711}, + dictWord{136, 11, 525}, + dictWord{ + 14, + 0, + 19, + }, + dictWord{14, 0, 28}, + dictWord{144, 0, 29}, + dictWord{7, 0, 85}, + dictWord{7, 0, 247}, + dictWord{8, 0, 585}, + dictWord{138, 0, 163}, + dictWord{4, 0, 487}, + dictWord{ + 7, + 11, + 472, + }, + dictWord{7, 11, 1801}, + dictWord{10, 11, 748}, + dictWord{141, 11, 458}, + dictWord{4, 10, 243}, + dictWord{5, 10, 203}, + dictWord{7, 10, 19}, + dictWord{ + 7, + 10, + 71, + }, + dictWord{7, 10, 113}, + dictWord{10, 10, 405}, + dictWord{11, 10, 357}, + dictWord{142, 10, 240}, + dictWord{7, 10, 1450}, + dictWord{139, 10, 99}, + dictWord{132, 11, 425}, + dictWord{138, 0, 145}, + dictWord{147, 0, 83}, + dictWord{6, 10, 492}, + dictWord{137, 11, 247}, + dictWord{4, 0, 1013}, + dictWord{ + 134, + 0, + 2033, + }, + dictWord{5, 10, 134}, + dictWord{6, 10, 408}, + dictWord{6, 10, 495}, + dictWord{135, 10, 1593}, + dictWord{135, 0, 1922}, + dictWord{134, 11, 1768}, + dictWord{4, 0, 124}, + dictWord{10, 0, 457}, + dictWord{11, 0, 121}, + dictWord{11, 0, 169}, + dictWord{11, 0, 870}, + dictWord{11, 0, 874}, + dictWord{12, 0, 214}, + dictWord{ + 14, + 0, + 187, + }, + dictWord{143, 0, 77}, + dictWord{5, 0, 557}, + dictWord{135, 0, 1457}, + dictWord{139, 0, 66}, + dictWord{5, 11, 943}, + dictWord{6, 11, 1779}, + dictWord{ + 142, + 10, + 4, + }, + dictWord{4, 10, 248}, + dictWord{4, 10, 665}, + dictWord{7, 10, 137}, + dictWord{137, 10, 349}, + dictWord{7, 0, 1193}, + dictWord{5, 11, 245}, + dictWord{ + 6, + 11, + 576, + }, + dictWord{7, 11, 582}, + dictWord{136, 11, 225}, + dictWord{144, 0, 82}, + dictWord{7, 10, 1270}, + dictWord{139, 10, 612}, + dictWord{5, 0, 454}, + dictWord{ + 10, + 0, + 352, + }, + dictWord{138, 11, 352}, + dictWord{18, 0, 57}, + dictWord{5, 10, 371}, + dictWord{135, 10, 563}, + dictWord{135, 0, 1333}, + dictWord{6, 0, 107}, + dictWord{ + 7, + 0, + 638, + }, + dictWord{7, 0, 1632}, + dictWord{9, 0, 396}, + dictWord{134, 11, 610}, + dictWord{5, 0, 370}, + dictWord{134, 0, 1756}, + dictWord{4, 10, 374}, + dictWord{ + 7, + 10, + 547, + }, + dictWord{7, 10, 1700}, + dictWord{7, 10, 1833}, + dictWord{139, 10, 858}, + dictWord{133, 0, 204}, + dictWord{6, 0, 1305}, + dictWord{9, 10, 311}, + dictWord{ + 141, + 10, + 42, + }, + dictWord{5, 0, 970}, + dictWord{134, 0, 1706}, + dictWord{6, 10, 1647}, + dictWord{7, 10, 1552}, + dictWord{7, 10, 2010}, + dictWord{9, 10, 494}, + dictWord{137, 10, 509}, + dictWord{13, 11, 455}, + dictWord{15, 11, 99}, + dictWord{15, 11, 129}, + dictWord{144, 11, 68}, + dictWord{135, 0, 3}, + dictWord{4, 0, 35}, + dictWord{ + 5, + 0, + 121, + }, + dictWord{5, 0, 483}, + dictWord{5, 0, 685}, + dictWord{6, 0, 489}, + dictWord{6, 0, 782}, + dictWord{6, 0, 1032}, + dictWord{7, 0, 1204}, + dictWord{136, 0, 394}, + dictWord{4, 0, 921}, + dictWord{133, 0, 1007}, + dictWord{8, 11, 360}, + dictWord{138, 11, 63}, + dictWord{135, 0, 1696}, + dictWord{134, 0, 1519}, + dictWord{ + 132, + 11, + 443, + }, + dictWord{135, 11, 944}, + dictWord{6, 10, 123}, + dictWord{7, 10, 214}, + dictWord{9, 10, 728}, + dictWord{10, 10, 157}, + dictWord{11, 10, 346}, + dictWord{11, 10, 662}, + dictWord{143, 10, 106}, + dictWord{137, 0, 981}, + dictWord{135, 10, 1435}, + dictWord{134, 0, 1072}, + dictWord{132, 0, 712}, + dictWord{ + 134, + 0, + 1629, + }, + dictWord{134, 0, 728}, + dictWord{4, 11, 298}, + dictWord{137, 11, 483}, + dictWord{6, 0, 1177}, + dictWord{6, 0, 1271}, + dictWord{5, 11, 164}, + dictWord{ + 7, + 11, + 121, + }, + dictWord{142, 11, 189}, + dictWord{7, 0, 1608}, + dictWord{4, 10, 707}, + dictWord{5, 10, 588}, + dictWord{6, 10, 393}, + dictWord{13, 10, 106}, + dictWord{ + 18, + 10, + 49, + }, + dictWord{147, 10, 41}, + dictWord{23, 0, 16}, + dictWord{151, 11, 16}, + dictWord{6, 10, 211}, + dictWord{7, 10, 1690}, + dictWord{11, 10, 486}, + dictWord{140, 10, 369}, + dictWord{133, 0, 485}, + dictWord{19, 11, 15}, + dictWord{149, 11, 27}, + dictWord{4, 11, 172}, + dictWord{9, 11, 611}, + dictWord{10, 11, 436}, + dictWord{12, 11, 673}, + dictWord{141, 11, 255}, + dictWord{5, 11, 844}, + dictWord{10, 11, 484}, + dictWord{11, 11, 754}, + dictWord{12, 11, 457}, + dictWord{ + 14, + 11, + 171, + }, + dictWord{14, 11, 389}, + dictWord{146, 11, 153}, + dictWord{4, 0, 285}, + dictWord{5, 0, 27}, + dictWord{5, 0, 317}, + dictWord{6, 0, 301}, + dictWord{7, 0, 7}, + dictWord{ + 8, + 0, + 153, + }, + dictWord{10, 0, 766}, + dictWord{11, 0, 468}, + dictWord{12, 0, 467}, + dictWord{141, 0, 143}, + dictWord{134, 0, 1462}, + dictWord{9, 11, 263}, + dictWord{ + 10, + 11, + 147, + }, + dictWord{138, 11, 492}, + dictWord{133, 11, 537}, + dictWord{6, 0, 1945}, + dictWord{6, 0, 1986}, + dictWord{6, 0, 1991}, + dictWord{134, 0, 2038}, + dictWord{134, 10, 219}, + dictWord{137, 11, 842}, + dictWord{14, 0, 52}, + dictWord{17, 0, 50}, + dictWord{5, 10, 582}, + dictWord{6, 10, 1646}, + dictWord{7, 10, 99}, + dictWord{7, 10, 1962}, + dictWord{7, 10, 1986}, + dictWord{8, 10, 515}, + dictWord{8, 10, 773}, + dictWord{9, 10, 23}, + dictWord{9, 10, 491}, + dictWord{12, 10, 620}, + dictWord{142, 10, 93}, + dictWord{138, 11, 97}, + dictWord{20, 0, 21}, + dictWord{20, 0, 44}, + dictWord{133, 10, 851}, + dictWord{136, 0, 819}, + dictWord{139, 0, 917}, + dictWord{5, 11, 230}, + dictWord{5, 11, 392}, + dictWord{6, 11, 420}, + dictWord{8, 10, 762}, + dictWord{8, 10, 812}, + dictWord{9, 11, 568}, + dictWord{9, 10, 910}, + dictWord{140, 11, 612}, + dictWord{135, 0, 784}, + dictWord{15, 0, 135}, + dictWord{143, 11, 135}, + dictWord{10, 0, 454}, + dictWord{140, 0, 324}, + dictWord{4, 11, 0}, + dictWord{5, 11, 41}, + dictWord{7, 11, 1459}, + dictWord{7, 11, 1469}, + dictWord{7, 11, 1618}, + dictWord{7, 11, 1859}, + dictWord{9, 11, 549}, + dictWord{139, 11, 905}, + dictWord{4, 10, 98}, + dictWord{7, 10, 1365}, + dictWord{9, 10, 422}, + dictWord{9, 10, 670}, + dictWord{10, 10, 775}, + dictWord{11, 10, 210}, + dictWord{13, 10, 26}, + dictWord{13, 10, 457}, + dictWord{141, 10, 476}, + dictWord{6, 0, 1719}, + dictWord{6, 0, 1735}, + dictWord{7, 0, 2016}, + dictWord{7, 0, 2020}, + dictWord{8, 0, 837}, + dictWord{137, 0, 852}, + dictWord{133, 11, 696}, + dictWord{135, 0, 852}, + dictWord{132, 0, 952}, + dictWord{134, 10, 1730}, + dictWord{132, 11, 771}, + dictWord{ + 138, + 0, + 568, + }, + dictWord{137, 0, 448}, + dictWord{139, 0, 146}, + dictWord{8, 0, 67}, + dictWord{138, 0, 419}, + dictWord{133, 11, 921}, + dictWord{137, 10, 147}, + dictWord{134, 0, 1826}, + dictWord{10, 0, 657}, + dictWord{14, 0, 297}, + dictWord{142, 0, 361}, + dictWord{6, 0, 666}, + dictWord{6, 0, 767}, + dictWord{134, 0, 1542}, + dictWord{139, 0, 729}, + dictWord{6, 11, 180}, + dictWord{7, 11, 1137}, + dictWord{8, 11, 751}, + dictWord{139, 11, 805}, + dictWord{4, 11, 183}, + dictWord{7, 11, 271}, + dictWord{11, 11, 824}, + dictWord{11, 11, 952}, + dictWord{13, 11, 278}, + dictWord{13, 11, 339}, + dictWord{13, 11, 482}, + dictWord{14, 11, 424}, + dictWord{ + 148, + 11, + 99, + }, + dictWord{4, 0, 669}, + dictWord{5, 11, 477}, + dictWord{5, 11, 596}, + dictWord{6, 11, 505}, + dictWord{7, 11, 1221}, + dictWord{11, 11, 907}, + dictWord{ + 12, + 11, + 209, + }, + dictWord{141, 11, 214}, + dictWord{135, 11, 1215}, + dictWord{5, 0, 402}, + dictWord{6, 10, 30}, + dictWord{11, 10, 56}, + dictWord{139, 10, 305}, + dictWord{ + 7, + 11, + 564, + }, + dictWord{142, 11, 168}, + dictWord{139, 0, 152}, + dictWord{7, 0, 912}, + dictWord{135, 10, 1614}, + dictWord{4, 10, 150}, + dictWord{5, 10, 303}, + dictWord{134, 10, 327}, + dictWord{7, 0, 320}, + dictWord{8, 0, 51}, + dictWord{9, 0, 868}, + dictWord{10, 0, 833}, + dictWord{12, 0, 481}, + dictWord{12, 0, 570}, + dictWord{ + 148, + 0, + 106, + }, + dictWord{132, 0, 445}, + dictWord{7, 11, 274}, + dictWord{11, 11, 263}, + dictWord{11, 11, 479}, + dictWord{11, 11, 507}, + dictWord{140, 11, 277}, + dictWord{10, 0, 555}, + dictWord{11, 0, 308}, + dictWord{19, 0, 95}, + dictWord{6, 11, 1645}, + dictWord{8, 10, 192}, + dictWord{10, 10, 78}, + dictWord{141, 10, 359}, + dictWord{135, 10, 786}, + dictWord{6, 11, 92}, + dictWord{6, 11, 188}, + dictWord{7, 11, 1269}, + dictWord{7, 11, 1524}, + dictWord{7, 11, 1876}, + dictWord{10, 11, 228}, + dictWord{139, 11, 1020}, + dictWord{4, 11, 459}, + dictWord{133, 11, 966}, + dictWord{11, 0, 386}, + dictWord{6, 10, 1638}, + dictWord{7, 10, 79}, + dictWord{ + 7, + 10, + 496, + }, + dictWord{9, 10, 138}, + dictWord{10, 10, 336}, + dictWord{12, 10, 412}, + dictWord{12, 10, 440}, + dictWord{142, 10, 305}, + dictWord{133, 0, 239}, + dictWord{ + 7, + 0, + 83, + }, + dictWord{7, 0, 1990}, + dictWord{8, 0, 130}, + dictWord{139, 0, 720}, + dictWord{138, 11, 709}, + dictWord{4, 0, 143}, + dictWord{5, 0, 550}, + dictWord{ + 133, + 0, + 752, + }, + dictWord{5, 0, 123}, + dictWord{6, 0, 530}, + dictWord{7, 0, 348}, + dictWord{135, 0, 1419}, + dictWord{135, 0, 2024}, + dictWord{6, 11, 18}, + dictWord{7, 11, 179}, + dictWord{7, 11, 721}, + dictWord{7, 11, 932}, + dictWord{8, 11, 548}, + dictWord{8, 11, 757}, + dictWord{9, 11, 54}, + dictWord{9, 11, 65}, + dictWord{9, 11, 532}, + dictWord{ + 9, + 11, + 844, + }, + dictWord{10, 11, 113}, + dictWord{10, 11, 117}, + dictWord{10, 11, 236}, + dictWord{10, 11, 315}, + dictWord{10, 11, 430}, + dictWord{10, 11, 798}, + dictWord{11, 11, 153}, + dictWord{11, 11, 351}, + dictWord{11, 11, 375}, + dictWord{12, 11, 78}, + dictWord{12, 11, 151}, + dictWord{12, 11, 392}, + dictWord{ + 14, + 11, + 248, + }, + dictWord{143, 11, 23}, + dictWord{7, 10, 204}, + dictWord{7, 10, 415}, + dictWord{8, 10, 42}, + dictWord{10, 10, 85}, + dictWord{139, 10, 564}, + dictWord{ + 134, + 0, + 958, + }, + dictWord{133, 11, 965}, + dictWord{132, 0, 210}, + dictWord{135, 11, 1429}, + dictWord{138, 11, 480}, + dictWord{134, 11, 182}, + dictWord{ + 139, + 11, + 345, + }, + dictWord{10, 11, 65}, + dictWord{10, 11, 488}, + dictWord{138, 11, 497}, + dictWord{4, 10, 3}, + dictWord{5, 10, 247}, + dictWord{5, 10, 644}, + dictWord{ + 7, + 10, + 744, + }, + dictWord{7, 10, 1207}, + dictWord{7, 10, 1225}, + dictWord{7, 10, 1909}, + dictWord{146, 10, 147}, + dictWord{132, 0, 430}, + dictWord{5, 10, 285}, + dictWord{ + 9, + 10, + 67, + }, + dictWord{13, 10, 473}, + dictWord{143, 10, 82}, + dictWord{144, 11, 16}, + dictWord{7, 11, 1162}, + dictWord{9, 11, 588}, + dictWord{10, 11, 260}, + dictWord{151, 10, 8}, + dictWord{133, 0, 213}, + dictWord{138, 0, 7}, + dictWord{135, 0, 801}, + dictWord{134, 11, 1786}, + dictWord{135, 11, 308}, + dictWord{6, 0, 936}, + dictWord{134, 0, 1289}, + dictWord{133, 0, 108}, + dictWord{132, 0, 885}, + dictWord{133, 0, 219}, + dictWord{139, 0, 587}, + dictWord{4, 0, 193}, + dictWord{5, 0, 916}, + dictWord{6, 0, 1041}, + dictWord{7, 0, 364}, + dictWord{10, 0, 398}, + dictWord{10, 0, 726}, + dictWord{11, 0, 317}, + dictWord{11, 0, 626}, + dictWord{12, 0, 142}, + dictWord{12, 0, 288}, + dictWord{12, 0, 678}, + dictWord{13, 0, 313}, + dictWord{15, 0, 113}, + dictWord{146, 0, 114}, + dictWord{135, 0, 1165}, + dictWord{6, 0, 241}, + dictWord{ + 9, + 0, + 342, + }, + dictWord{10, 0, 729}, + dictWord{11, 0, 284}, + dictWord{11, 0, 445}, + dictWord{11, 0, 651}, + dictWord{11, 0, 863}, + dictWord{13, 0, 398}, + dictWord{ + 146, + 0, + 99, + }, + dictWord{7, 0, 907}, + dictWord{136, 0, 832}, + dictWord{9, 0, 303}, + dictWord{4, 10, 29}, + dictWord{6, 10, 532}, + dictWord{7, 10, 1628}, + dictWord{7, 10, 1648}, + dictWord{9, 10, 350}, + dictWord{10, 10, 433}, + dictWord{11, 10, 97}, + dictWord{11, 10, 557}, + dictWord{11, 10, 745}, + dictWord{12, 10, 289}, + dictWord{ + 12, + 10, + 335, + }, + dictWord{12, 10, 348}, + dictWord{12, 10, 606}, + dictWord{13, 10, 116}, + dictWord{13, 10, 233}, + dictWord{13, 10, 466}, + dictWord{14, 10, 181}, + dictWord{ + 14, + 10, + 209, + }, + dictWord{14, 10, 232}, + dictWord{14, 10, 236}, + dictWord{14, 10, 300}, + dictWord{16, 10, 41}, + dictWord{148, 10, 97}, + dictWord{7, 11, 423}, + dictWord{7, 10, 1692}, + dictWord{136, 11, 588}, + dictWord{6, 0, 931}, + dictWord{134, 0, 1454}, + dictWord{5, 10, 501}, + dictWord{7, 10, 1704}, + dictWord{9, 10, 553}, + dictWord{11, 10, 520}, + dictWord{12, 10, 557}, + dictWord{141, 10, 249}, + dictWord{136, 11, 287}, + dictWord{4, 0, 562}, + dictWord{9, 0, 254}, + dictWord{ + 139, + 0, + 879, + }, + dictWord{132, 0, 786}, + dictWord{14, 11, 32}, + dictWord{18, 11, 85}, + dictWord{20, 11, 2}, + dictWord{152, 11, 16}, + dictWord{135, 0, 1294}, + dictWord{ + 7, + 11, + 723, + }, + dictWord{135, 11, 1135}, + dictWord{6, 0, 216}, + dictWord{7, 0, 901}, + dictWord{7, 0, 1343}, + dictWord{8, 0, 493}, + dictWord{134, 11, 403}, + dictWord{ + 7, + 11, + 719, + }, + dictWord{8, 11, 809}, + dictWord{136, 11, 834}, + dictWord{5, 11, 210}, + dictWord{6, 11, 213}, + dictWord{7, 11, 60}, + dictWord{10, 11, 364}, + dictWord{ + 139, + 11, + 135, + }, + dictWord{7, 0, 341}, + dictWord{11, 0, 219}, + dictWord{5, 11, 607}, + dictWord{8, 11, 326}, + dictWord{136, 11, 490}, + dictWord{4, 11, 701}, + dictWord{ + 5, + 11, + 472, + }, + dictWord{5, 11, 639}, + dictWord{7, 11, 1249}, + dictWord{9, 11, 758}, + dictWord{139, 11, 896}, + dictWord{135, 11, 380}, + dictWord{135, 11, 1947}, + dictWord{139, 0, 130}, + dictWord{135, 0, 1734}, + dictWord{10, 0, 115}, + dictWord{11, 0, 420}, + dictWord{12, 0, 154}, + dictWord{13, 0, 404}, + dictWord{14, 0, 346}, + dictWord{143, 0, 54}, + dictWord{134, 10, 129}, + dictWord{4, 11, 386}, + dictWord{7, 11, 41}, + dictWord{8, 11, 405}, + dictWord{9, 11, 497}, + dictWord{11, 11, 110}, + dictWord{11, 11, 360}, + dictWord{15, 11, 37}, + dictWord{144, 11, 84}, + dictWord{141, 11, 282}, + dictWord{5, 11, 46}, + dictWord{7, 11, 1452}, + dictWord{7, 11, 1480}, + dictWord{8, 11, 634}, + dictWord{140, 11, 472}, + dictWord{4, 11, 524}, + dictWord{136, 11, 810}, + dictWord{10, 11, 238}, + dictWord{141, 11, 33}, + dictWord{ + 133, + 0, + 604, + }, + dictWord{5, 0, 1011}, + dictWord{136, 0, 701}, + dictWord{8, 0, 856}, + dictWord{8, 0, 858}, + dictWord{8, 0, 879}, + dictWord{12, 0, 702}, + dictWord{142, 0, 447}, + dictWord{4, 0, 54}, + dictWord{5, 0, 666}, + dictWord{7, 0, 1039}, + dictWord{7, 0, 1130}, + dictWord{9, 0, 195}, + dictWord{138, 0, 302}, + dictWord{4, 10, 25}, + dictWord{ + 5, + 10, + 60, + }, + dictWord{6, 10, 504}, + dictWord{7, 10, 614}, + dictWord{7, 10, 1155}, + dictWord{140, 10, 0}, + dictWord{7, 10, 1248}, + dictWord{11, 10, 621}, + dictWord{ + 139, + 10, + 702, + }, + dictWord{133, 11, 997}, + dictWord{137, 10, 321}, + dictWord{134, 0, 1669}, + dictWord{134, 0, 1791}, + dictWord{4, 10, 379}, + dictWord{ + 135, + 10, + 1397, + }, + dictWord{138, 11, 372}, + dictWord{5, 11, 782}, + dictWord{5, 11, 829}, + dictWord{134, 11, 1738}, + dictWord{135, 0, 1228}, + dictWord{4, 10, 118}, + dictWord{6, 10, 274}, + dictWord{6, 10, 361}, + dictWord{7, 10, 75}, + dictWord{141, 10, 441}, + dictWord{132, 0, 623}, + dictWord{9, 11, 279}, + dictWord{10, 11, 407}, + dictWord{14, 11, 84}, + dictWord{150, 11, 18}, + dictWord{137, 10, 841}, + dictWord{135, 0, 798}, + dictWord{140, 10, 693}, + dictWord{5, 10, 314}, + dictWord{6, 10, 221}, + dictWord{7, 10, 419}, + dictWord{10, 10, 650}, + dictWord{11, 10, 396}, + dictWord{12, 10, 156}, + dictWord{13, 10, 369}, + dictWord{14, 10, 333}, + dictWord{ + 145, + 10, + 47, + }, + dictWord{135, 11, 1372}, + dictWord{7, 0, 122}, + dictWord{9, 0, 259}, + dictWord{10, 0, 84}, + dictWord{11, 0, 470}, + dictWord{12, 0, 541}, + dictWord{ + 141, + 0, + 379, + }, + dictWord{134, 0, 837}, + dictWord{8, 0, 1013}, + dictWord{4, 11, 78}, + dictWord{5, 11, 96}, + dictWord{5, 11, 182}, + dictWord{7, 11, 1724}, + dictWord{ + 7, + 11, + 1825, + }, + dictWord{10, 11, 394}, + dictWord{10, 11, 471}, + dictWord{11, 11, 532}, + dictWord{14, 11, 340}, + dictWord{145, 11, 88}, + dictWord{134, 0, 577}, + dictWord{135, 11, 1964}, + dictWord{132, 10, 913}, + dictWord{134, 0, 460}, + dictWord{8, 0, 891}, + dictWord{10, 0, 901}, + dictWord{10, 0, 919}, + dictWord{10, 0, 932}, + dictWord{12, 0, 715}, + dictWord{12, 0, 728}, + dictWord{12, 0, 777}, + dictWord{14, 0, 457}, + dictWord{144, 0, 103}, + dictWord{5, 0, 82}, + dictWord{5, 0, 131}, + dictWord{ + 7, + 0, + 1755, + }, + dictWord{8, 0, 31}, + dictWord{9, 0, 168}, + dictWord{9, 0, 764}, + dictWord{139, 0, 869}, + dictWord{136, 10, 475}, + dictWord{6, 0, 605}, + dictWord{ + 5, + 10, + 1016, + }, + dictWord{9, 11, 601}, + dictWord{9, 11, 619}, + dictWord{10, 11, 505}, + dictWord{10, 11, 732}, + dictWord{11, 11, 355}, + dictWord{140, 11, 139}, + dictWord{ + 7, + 10, + 602, + }, + dictWord{8, 10, 179}, + dictWord{10, 10, 781}, + dictWord{140, 10, 126}, + dictWord{134, 0, 1246}, + dictWord{6, 10, 329}, + dictWord{138, 10, 111}, + dictWord{6, 11, 215}, + dictWord{7, 11, 1028}, + dictWord{7, 11, 1473}, + dictWord{7, 11, 1721}, + dictWord{9, 11, 424}, + dictWord{138, 11, 779}, + dictWord{5, 0, 278}, + dictWord{137, 0, 68}, + dictWord{6, 0, 932}, + dictWord{6, 0, 1084}, + dictWord{144, 0, 86}, + dictWord{4, 0, 163}, + dictWord{5, 0, 201}, + dictWord{5, 0, 307}, + dictWord{ + 5, + 0, + 310, + }, + dictWord{6, 0, 335}, + dictWord{7, 0, 284}, + dictWord{7, 0, 1660}, + dictWord{136, 0, 165}, + dictWord{136, 0, 781}, + dictWord{134, 0, 707}, + dictWord{6, 0, 33}, + dictWord{135, 0, 1244}, + dictWord{5, 10, 821}, + dictWord{6, 11, 67}, + dictWord{6, 10, 1687}, + dictWord{7, 11, 258}, + dictWord{7, 11, 1630}, + dictWord{9, 11, 354}, + dictWord{9, 11, 675}, + dictWord{10, 11, 830}, + dictWord{14, 11, 80}, + dictWord{145, 11, 80}, + dictWord{6, 11, 141}, + dictWord{7, 11, 225}, + dictWord{9, 11, 59}, + dictWord{9, 11, 607}, + dictWord{10, 11, 312}, + dictWord{11, 11, 687}, + dictWord{12, 11, 555}, + dictWord{13, 11, 373}, + dictWord{13, 11, 494}, + dictWord{148, 11, 58}, + dictWord{134, 0, 1113}, + dictWord{9, 0, 388}, + dictWord{5, 10, 71}, + dictWord{7, 10, 1407}, + dictWord{9, 10, 704}, + dictWord{10, 10, 261}, + dictWord{10, 10, 619}, + dictWord{11, 10, 547}, + dictWord{11, 10, 619}, + dictWord{143, 10, 157}, + dictWord{7, 0, 1953}, + dictWord{136, 0, 720}, + dictWord{138, 0, 203}, + dictWord{ + 7, + 10, + 2008, + }, + dictWord{9, 10, 337}, + dictWord{138, 10, 517}, + dictWord{6, 0, 326}, + dictWord{7, 0, 677}, + dictWord{137, 0, 425}, + dictWord{139, 11, 81}, + dictWord{ + 7, + 0, + 1316, + }, + dictWord{7, 0, 1412}, + dictWord{7, 0, 1839}, + dictWord{9, 0, 589}, + dictWord{11, 0, 241}, + dictWord{11, 0, 676}, + dictWord{11, 0, 811}, + dictWord{11, 0, 891}, + dictWord{12, 0, 140}, + dictWord{12, 0, 346}, + dictWord{12, 0, 479}, + dictWord{13, 0, 140}, + dictWord{13, 0, 381}, + dictWord{14, 0, 188}, + dictWord{18, 0, 30}, + dictWord{148, 0, 108}, + dictWord{5, 0, 416}, + dictWord{6, 10, 86}, + dictWord{6, 10, 603}, + dictWord{7, 10, 292}, + dictWord{7, 10, 561}, + dictWord{8, 10, 257}, + dictWord{ + 8, + 10, + 382, + }, + dictWord{9, 10, 721}, + dictWord{9, 10, 778}, + dictWord{11, 10, 581}, + dictWord{140, 10, 466}, + dictWord{4, 10, 486}, + dictWord{133, 10, 491}, + dictWord{134, 0, 1300}, + dictWord{132, 10, 72}, + dictWord{7, 0, 847}, + dictWord{6, 10, 265}, + dictWord{7, 11, 430}, + dictWord{139, 11, 46}, + dictWord{5, 11, 602}, + dictWord{6, 11, 106}, + dictWord{7, 11, 1786}, + dictWord{7, 11, 1821}, + dictWord{7, 11, 2018}, + dictWord{9, 11, 418}, + dictWord{137, 11, 763}, + dictWord{5, 0, 358}, + dictWord{7, 0, 535}, + dictWord{7, 0, 1184}, + dictWord{10, 0, 662}, + dictWord{13, 0, 212}, + dictWord{13, 0, 304}, + dictWord{13, 0, 333}, + dictWord{145, 0, 98}, + dictWord{ + 5, + 11, + 65, + }, + dictWord{6, 11, 416}, + dictWord{7, 11, 1720}, + dictWord{7, 11, 1924}, + dictWord{8, 11, 677}, + dictWord{10, 11, 109}, + dictWord{11, 11, 14}, + dictWord{ + 11, + 11, + 70, + }, + dictWord{11, 11, 569}, + dictWord{11, 11, 735}, + dictWord{15, 11, 153}, + dictWord{148, 11, 80}, + dictWord{6, 0, 1823}, + dictWord{8, 0, 839}, + dictWord{ + 8, + 0, + 852, + }, + dictWord{8, 0, 903}, + dictWord{10, 0, 940}, + dictWord{12, 0, 707}, + dictWord{140, 0, 775}, + dictWord{135, 11, 1229}, + dictWord{6, 0, 1522}, + dictWord{ + 140, + 0, + 654, + }, + dictWord{136, 11, 595}, + dictWord{139, 0, 163}, + dictWord{141, 0, 314}, + dictWord{132, 0, 978}, + dictWord{4, 0, 601}, + dictWord{6, 0, 2035}, + dictWord{137, 10, 234}, + dictWord{5, 10, 815}, + dictWord{6, 10, 1688}, + dictWord{134, 10, 1755}, + dictWord{133, 0, 946}, + dictWord{136, 0, 434}, + dictWord{ + 6, + 10, + 197, + }, + dictWord{136, 10, 205}, + dictWord{7, 0, 411}, + dictWord{7, 0, 590}, + dictWord{8, 0, 631}, + dictWord{9, 0, 323}, + dictWord{10, 0, 355}, + dictWord{11, 0, 491}, + dictWord{12, 0, 143}, + dictWord{12, 0, 402}, + dictWord{13, 0, 73}, + dictWord{14, 0, 408}, + dictWord{15, 0, 107}, + dictWord{146, 0, 71}, + dictWord{7, 0, 1467}, + dictWord{ + 8, + 0, + 328, + }, + dictWord{10, 0, 544}, + dictWord{11, 0, 955}, + dictWord{12, 0, 13}, + dictWord{13, 0, 320}, + dictWord{145, 0, 83}, + dictWord{142, 0, 410}, + dictWord{ + 11, + 0, + 511, + }, + dictWord{13, 0, 394}, + dictWord{14, 0, 298}, + dictWord{14, 0, 318}, + dictWord{146, 0, 103}, + dictWord{6, 10, 452}, + dictWord{7, 10, 312}, + dictWord{ + 138, + 10, + 219, + }, + dictWord{138, 10, 589}, + dictWord{4, 10, 333}, + dictWord{9, 10, 176}, + dictWord{12, 10, 353}, + dictWord{141, 10, 187}, + dictWord{135, 11, 329}, + dictWord{132, 11, 469}, + dictWord{5, 0, 835}, + dictWord{134, 0, 483}, + dictWord{134, 11, 1743}, + dictWord{5, 11, 929}, + dictWord{6, 11, 340}, + dictWord{8, 11, 376}, + dictWord{136, 11, 807}, + dictWord{134, 10, 1685}, + dictWord{132, 0, 677}, + dictWord{5, 11, 218}, + dictWord{7, 11, 1610}, + dictWord{138, 11, 83}, + dictWord{ + 5, + 11, + 571, + }, + dictWord{135, 11, 1842}, + dictWord{132, 11, 455}, + dictWord{137, 0, 70}, + dictWord{135, 0, 1405}, + dictWord{7, 10, 135}, + dictWord{8, 10, 7}, + dictWord{ + 8, + 10, + 62, + }, + dictWord{9, 10, 243}, + dictWord{10, 10, 658}, + dictWord{10, 10, 697}, + dictWord{11, 10, 456}, + dictWord{139, 10, 756}, + dictWord{9, 10, 395}, + dictWord{138, 10, 79}, + dictWord{137, 0, 108}, + dictWord{6, 11, 161}, + dictWord{7, 11, 372}, + dictWord{137, 11, 597}, + dictWord{132, 11, 349}, + dictWord{ + 132, + 0, + 777, + }, + dictWord{132, 0, 331}, + dictWord{135, 10, 631}, + dictWord{133, 0, 747}, + dictWord{6, 11, 432}, + dictWord{6, 11, 608}, + dictWord{139, 11, 322}, + dictWord{138, 10, 835}, + dictWord{5, 11, 468}, + dictWord{7, 11, 1809}, + dictWord{10, 11, 325}, + dictWord{11, 11, 856}, + dictWord{12, 11, 345}, + dictWord{ + 143, + 11, + 104, + }, + dictWord{133, 11, 223}, + dictWord{7, 10, 406}, + dictWord{7, 10, 459}, + dictWord{8, 10, 606}, + dictWord{139, 10, 726}, + dictWord{132, 11, 566}, + dictWord{142, 0, 68}, + dictWord{4, 11, 59}, + dictWord{135, 11, 1394}, + dictWord{6, 11, 436}, + dictWord{139, 11, 481}, + dictWord{4, 11, 48}, + dictWord{5, 11, 271}, + dictWord{135, 11, 953}, + dictWord{139, 11, 170}, + dictWord{5, 11, 610}, + dictWord{136, 11, 457}, + dictWord{133, 11, 755}, + dictWord{135, 11, 1217}, + dictWord{ + 133, + 10, + 612, + }, + dictWord{132, 11, 197}, + dictWord{132, 0, 505}, + dictWord{4, 10, 372}, + dictWord{7, 10, 482}, + dictWord{8, 10, 158}, + dictWord{9, 10, 602}, + dictWord{ + 9, + 10, + 615, + }, + dictWord{10, 10, 245}, + dictWord{10, 10, 678}, + dictWord{10, 10, 744}, + dictWord{11, 10, 248}, + dictWord{139, 10, 806}, + dictWord{133, 0, 326}, + dictWord{5, 10, 854}, + dictWord{135, 10, 1991}, + dictWord{4, 0, 691}, + dictWord{146, 0, 16}, + dictWord{6, 0, 628}, + dictWord{9, 0, 35}, + dictWord{10, 0, 680}, + dictWord{10, 0, 793}, + dictWord{11, 0, 364}, + dictWord{13, 0, 357}, + dictWord{143, 0, 164}, + dictWord{138, 0, 654}, + dictWord{6, 0, 32}, + dictWord{7, 0, 385}, + dictWord{ + 7, + 0, + 757, + }, + dictWord{7, 0, 1916}, + dictWord{8, 0, 37}, + dictWord{8, 0, 94}, + dictWord{8, 0, 711}, + dictWord{9, 0, 541}, + dictWord{10, 0, 162}, + dictWord{10, 0, 795}, + dictWord{ + 11, + 0, + 989, + }, + dictWord{11, 0, 1010}, + dictWord{12, 0, 14}, + dictWord{142, 0, 308}, + dictWord{133, 11, 217}, + dictWord{6, 0, 152}, + dictWord{6, 0, 349}, + dictWord{ + 6, + 0, + 1682, + }, + dictWord{7, 0, 1252}, + dictWord{8, 0, 112}, + dictWord{9, 0, 435}, + dictWord{9, 0, 668}, + dictWord{10, 0, 290}, + dictWord{10, 0, 319}, + dictWord{10, 0, 815}, + dictWord{11, 0, 180}, + dictWord{11, 0, 837}, + dictWord{12, 0, 240}, + dictWord{13, 0, 152}, + dictWord{13, 0, 219}, + dictWord{142, 0, 158}, + dictWord{4, 0, 581}, + dictWord{134, 0, 726}, + dictWord{5, 10, 195}, + dictWord{135, 10, 1685}, + dictWord{6, 0, 126}, + dictWord{7, 0, 573}, + dictWord{8, 0, 397}, + dictWord{142, 0, 44}, + dictWord{138, 0, 89}, + dictWord{7, 10, 1997}, + dictWord{8, 10, 730}, + dictWord{139, 10, 1006}, + dictWord{134, 0, 1531}, + dictWord{134, 0, 1167}, + dictWord{ + 5, + 0, + 926, + }, + dictWord{12, 0, 203}, + dictWord{133, 10, 751}, + dictWord{4, 11, 165}, + dictWord{7, 11, 1398}, + dictWord{135, 11, 1829}, + dictWord{7, 0, 1232}, + dictWord{137, 0, 531}, + dictWord{135, 10, 821}, + dictWord{134, 0, 943}, + dictWord{133, 0, 670}, + dictWord{4, 0, 880}, + dictWord{139, 0, 231}, + dictWord{ + 134, + 0, + 1617, + }, + dictWord{135, 0, 1957}, + dictWord{5, 11, 9}, + dictWord{7, 11, 297}, + dictWord{7, 11, 966}, + dictWord{140, 11, 306}, + dictWord{6, 0, 975}, + dictWord{ + 134, + 0, + 985, + }, + dictWord{5, 10, 950}, + dictWord{5, 10, 994}, + dictWord{134, 10, 351}, + dictWord{12, 11, 21}, + dictWord{151, 11, 7}, + dictWord{5, 11, 146}, + dictWord{ + 6, + 11, + 411, + }, + dictWord{138, 11, 721}, + dictWord{7, 0, 242}, + dictWord{135, 0, 1942}, + dictWord{6, 11, 177}, + dictWord{135, 11, 467}, + dictWord{5, 0, 421}, + dictWord{ + 7, + 10, + 47, + }, + dictWord{137, 10, 684}, + dictWord{5, 0, 834}, + dictWord{7, 0, 1202}, + dictWord{8, 0, 14}, + dictWord{9, 0, 481}, + dictWord{137, 0, 880}, + dictWord{138, 0, 465}, + dictWord{6, 0, 688}, + dictWord{9, 0, 834}, + dictWord{132, 10, 350}, + dictWord{132, 0, 855}, + dictWord{4, 0, 357}, + dictWord{6, 0, 172}, + dictWord{7, 0, 143}, + dictWord{137, 0, 413}, + dictWord{133, 11, 200}, + dictWord{132, 0, 590}, + dictWord{7, 10, 1812}, + dictWord{13, 10, 259}, + dictWord{13, 10, 356}, + dictWord{ + 14, + 10, + 242, + }, + dictWord{147, 10, 114}, + dictWord{133, 10, 967}, + dictWord{11, 0, 114}, + dictWord{4, 10, 473}, + dictWord{7, 10, 623}, + dictWord{8, 10, 808}, + dictWord{ + 9, + 10, + 871, + }, + dictWord{9, 10, 893}, + dictWord{11, 10, 431}, + dictWord{12, 10, 112}, + dictWord{12, 10, 217}, + dictWord{12, 10, 243}, + dictWord{12, 10, 562}, + dictWord{ + 12, + 10, + 663, + }, + dictWord{12, 10, 683}, + dictWord{13, 10, 141}, + dictWord{13, 10, 197}, + dictWord{13, 10, 227}, + dictWord{13, 10, 406}, + dictWord{13, 10, 487}, + dictWord{14, 10, 156}, + dictWord{14, 10, 203}, + dictWord{14, 10, 224}, + dictWord{14, 10, 256}, + dictWord{18, 10, 58}, + dictWord{150, 10, 0}, + dictWord{ + 138, + 10, + 286, + }, + dictWord{4, 10, 222}, + dictWord{7, 10, 286}, + dictWord{136, 10, 629}, + dictWord{5, 0, 169}, + dictWord{7, 0, 333}, + dictWord{136, 0, 45}, + dictWord{ + 134, + 11, + 481, + }, + dictWord{132, 0, 198}, + dictWord{4, 0, 24}, + dictWord{5, 0, 140}, + dictWord{5, 0, 185}, + dictWord{7, 0, 1500}, + dictWord{11, 0, 565}, + dictWord{11, 0, 838}, + dictWord{4, 11, 84}, + dictWord{7, 11, 1482}, + dictWord{10, 11, 76}, + dictWord{138, 11, 142}, + dictWord{133, 0, 585}, + dictWord{141, 10, 306}, + dictWord{ + 133, + 11, + 1015, + }, + dictWord{4, 11, 315}, + dictWord{5, 11, 507}, + dictWord{135, 11, 1370}, + dictWord{136, 10, 146}, + dictWord{6, 0, 691}, + dictWord{134, 0, 1503}, + dictWord{ + 4, + 0, + 334, + }, + dictWord{133, 0, 593}, + dictWord{4, 10, 465}, + dictWord{135, 10, 1663}, + dictWord{142, 11, 173}, + dictWord{135, 0, 913}, + dictWord{12, 0, 116}, + dictWord{134, 11, 1722}, + dictWord{134, 0, 1360}, + dictWord{132, 0, 802}, + dictWord{8, 11, 222}, + dictWord{8, 11, 476}, + dictWord{9, 11, 238}, + dictWord{ + 11, + 11, + 516, + }, + dictWord{11, 11, 575}, + dictWord{15, 11, 109}, + dictWord{146, 11, 100}, + dictWord{6, 0, 308}, + dictWord{9, 0, 673}, + dictWord{7, 10, 138}, + dictWord{ + 7, + 10, + 517, + }, + dictWord{139, 10, 238}, + dictWord{132, 0, 709}, + dictWord{6, 0, 1876}, + dictWord{6, 0, 1895}, + dictWord{9, 0, 994}, + dictWord{9, 0, 1006}, + dictWord{ + 12, + 0, + 829, + }, + dictWord{12, 0, 888}, + dictWord{12, 0, 891}, + dictWord{146, 0, 185}, + dictWord{148, 10, 94}, + dictWord{4, 0, 228}, + dictWord{133, 0, 897}, + dictWord{ + 7, + 0, + 1840, + }, + dictWord{5, 10, 495}, + dictWord{7, 10, 834}, + dictWord{9, 10, 733}, + dictWord{139, 10, 378}, + dictWord{133, 10, 559}, + dictWord{6, 10, 21}, + dictWord{ + 6, + 10, + 1737, + }, + dictWord{7, 10, 1444}, + dictWord{136, 10, 224}, + dictWord{4, 0, 608}, + dictWord{133, 0, 497}, + dictWord{6, 11, 40}, + dictWord{135, 11, 1781}, + dictWord{134, 0, 1573}, + dictWord{135, 0, 2039}, + dictWord{6, 0, 540}, + dictWord{136, 0, 136}, + dictWord{4, 0, 897}, + dictWord{5, 0, 786}, + dictWord{133, 10, 519}, + dictWord{6, 0, 1878}, + dictWord{6, 0, 1884}, + dictWord{9, 0, 938}, + dictWord{9, 0, 948}, + dictWord{9, 0, 955}, + dictWord{9, 0, 973}, + dictWord{9, 0, 1012}, + dictWord{ + 12, + 0, + 895, + }, + dictWord{12, 0, 927}, + dictWord{143, 0, 254}, + dictWord{134, 0, 1469}, + dictWord{133, 0, 999}, + dictWord{4, 0, 299}, + dictWord{135, 0, 1004}, + dictWord{ + 4, + 0, + 745, + }, + dictWord{133, 0, 578}, + dictWord{136, 11, 574}, + dictWord{133, 0, 456}, + dictWord{134, 0, 1457}, + dictWord{7, 0, 1679}, + dictWord{132, 10, 402}, + dictWord{7, 0, 693}, + dictWord{8, 0, 180}, + dictWord{12, 0, 163}, + dictWord{8, 10, 323}, + dictWord{136, 10, 479}, + dictWord{11, 10, 580}, + dictWord{142, 10, 201}, + dictWord{5, 10, 59}, + dictWord{135, 10, 672}, + dictWord{132, 11, 354}, + dictWord{146, 10, 34}, + dictWord{4, 0, 755}, + dictWord{135, 11, 1558}, + dictWord{ + 7, + 0, + 1740, + }, + dictWord{146, 0, 48}, + dictWord{4, 10, 85}, + dictWord{135, 10, 549}, + dictWord{139, 0, 338}, + dictWord{133, 10, 94}, + dictWord{134, 0, 1091}, + dictWord{135, 11, 469}, + dictWord{12, 0, 695}, + dictWord{12, 0, 704}, + dictWord{20, 0, 113}, + dictWord{5, 11, 830}, + dictWord{14, 11, 338}, + dictWord{148, 11, 81}, + dictWord{135, 0, 1464}, + dictWord{6, 10, 11}, + dictWord{135, 10, 187}, + dictWord{135, 0, 975}, + dictWord{13, 0, 335}, + dictWord{132, 10, 522}, + dictWord{ + 134, + 0, + 1979, + }, + dictWord{5, 11, 496}, + dictWord{135, 11, 203}, + dictWord{4, 10, 52}, + dictWord{135, 10, 661}, + dictWord{7, 0, 1566}, + dictWord{8, 0, 269}, + dictWord{ + 9, + 0, + 212, + }, + dictWord{9, 0, 718}, + dictWord{14, 0, 15}, + dictWord{14, 0, 132}, + dictWord{142, 0, 227}, + dictWord{4, 0, 890}, + dictWord{5, 0, 805}, + dictWord{5, 0, 819}, + dictWord{ + 5, + 0, + 961, + }, + dictWord{6, 0, 396}, + dictWord{6, 0, 1631}, + dictWord{6, 0, 1678}, + dictWord{7, 0, 1967}, + dictWord{7, 0, 2041}, + dictWord{9, 0, 630}, + dictWord{11, 0, 8}, + dictWord{11, 0, 1019}, + dictWord{12, 0, 176}, + dictWord{13, 0, 225}, + dictWord{14, 0, 292}, + dictWord{21, 0, 24}, + dictWord{4, 10, 383}, + dictWord{133, 10, 520}, + dictWord{134, 11, 547}, + dictWord{135, 11, 1748}, + dictWord{5, 11, 88}, + dictWord{137, 11, 239}, + dictWord{146, 11, 128}, + dictWord{7, 11, 650}, + dictWord{ + 135, + 11, + 1310, + }, + dictWord{4, 10, 281}, + dictWord{5, 10, 38}, + dictWord{7, 10, 194}, + dictWord{7, 10, 668}, + dictWord{7, 10, 1893}, + dictWord{137, 10, 397}, + dictWord{135, 0, 1815}, + dictWord{9, 10, 635}, + dictWord{139, 10, 559}, + dictWord{7, 0, 1505}, + dictWord{10, 0, 190}, + dictWord{10, 0, 634}, + dictWord{11, 0, 792}, + dictWord{12, 0, 358}, + dictWord{140, 0, 447}, + dictWord{5, 0, 0}, + dictWord{6, 0, 536}, + dictWord{7, 0, 604}, + dictWord{13, 0, 445}, + dictWord{145, 0, 126}, + dictWord{ + 7, + 11, + 1076, + }, + dictWord{9, 11, 80}, + dictWord{11, 11, 78}, + dictWord{11, 11, 421}, + dictWord{11, 11, 534}, + dictWord{140, 11, 545}, + dictWord{8, 0, 966}, + dictWord{ + 10, + 0, + 1023, + }, + dictWord{14, 11, 369}, + dictWord{146, 11, 72}, + dictWord{135, 11, 1641}, + dictWord{6, 0, 232}, + dictWord{6, 0, 412}, + dictWord{7, 0, 1074}, + dictWord{ + 8, + 0, + 9, + }, + dictWord{8, 0, 157}, + dictWord{8, 0, 786}, + dictWord{9, 0, 196}, + dictWord{9, 0, 352}, + dictWord{9, 0, 457}, + dictWord{10, 0, 337}, + dictWord{11, 0, 232}, + dictWord{ + 11, + 0, + 877, + }, + dictWord{12, 0, 480}, + dictWord{140, 0, 546}, + dictWord{135, 0, 958}, + dictWord{4, 0, 382}, + dictWord{136, 0, 579}, + dictWord{4, 0, 212}, + dictWord{ + 135, + 0, + 1206, + }, + dictWord{4, 11, 497}, + dictWord{5, 11, 657}, + dictWord{135, 11, 1584}, + dictWord{132, 0, 681}, + dictWord{8, 0, 971}, + dictWord{138, 0, 965}, + dictWord{ + 5, + 10, + 448, + }, + dictWord{136, 10, 535}, + dictWord{14, 0, 16}, + dictWord{146, 0, 44}, + dictWord{11, 0, 584}, + dictWord{11, 0, 616}, + dictWord{14, 0, 275}, + dictWord{ + 11, + 11, + 584, + }, + dictWord{11, 11, 616}, + dictWord{142, 11, 275}, + dictWord{136, 11, 13}, + dictWord{7, 10, 610}, + dictWord{135, 10, 1501}, + dictWord{7, 11, 642}, + dictWord{8, 11, 250}, + dictWord{11, 11, 123}, + dictWord{11, 11, 137}, + dictWord{13, 11, 48}, + dictWord{142, 11, 95}, + dictWord{133, 0, 655}, + dictWord{17, 0, 67}, + dictWord{147, 0, 74}, + dictWord{134, 0, 751}, + dictWord{134, 0, 1967}, + dictWord{6, 0, 231}, + dictWord{136, 0, 423}, + dictWord{5, 0, 300}, + dictWord{138, 0, 1016}, + dictWord{4, 10, 319}, + dictWord{5, 10, 699}, + dictWord{138, 10, 673}, + dictWord{6, 0, 237}, + dictWord{7, 0, 611}, + dictWord{8, 0, 100}, + dictWord{9, 0, 416}, + dictWord{ + 11, + 0, + 335, + }, + dictWord{12, 0, 173}, + dictWord{18, 0, 101}, + dictWord{6, 10, 336}, + dictWord{8, 10, 552}, + dictWord{9, 10, 285}, + dictWord{10, 10, 99}, + dictWord{ + 139, + 10, + 568, + }, + dictWord{134, 0, 1370}, + dictWord{7, 10, 1406}, + dictWord{9, 10, 218}, + dictWord{141, 10, 222}, + dictWord{133, 10, 256}, + dictWord{ + 135, + 0, + 1208, + }, + dictWord{14, 11, 213}, + dictWord{148, 11, 38}, + dictWord{6, 0, 1219}, + dictWord{135, 11, 1642}, + dictWord{13, 0, 417}, + dictWord{14, 0, 129}, + dictWord{143, 0, 15}, + dictWord{10, 11, 545}, + dictWord{140, 11, 301}, + dictWord{17, 10, 39}, + dictWord{148, 10, 36}, + dictWord{133, 0, 199}, + dictWord{4, 11, 904}, + dictWord{133, 11, 794}, + dictWord{12, 0, 427}, + dictWord{146, 0, 38}, + dictWord{134, 0, 949}, + dictWord{8, 0, 665}, + dictWord{135, 10, 634}, + dictWord{ + 132, + 10, + 618, + }, + dictWord{135, 10, 259}, + dictWord{132, 10, 339}, + dictWord{133, 11, 761}, + dictWord{141, 10, 169}, + dictWord{132, 10, 759}, + dictWord{5, 0, 688}, + dictWord{7, 0, 539}, + dictWord{135, 0, 712}, + dictWord{7, 11, 386}, + dictWord{138, 11, 713}, + dictWord{134, 0, 1186}, + dictWord{6, 11, 7}, + dictWord{6, 11, 35}, + dictWord{ + 7, + 11, + 147, + }, + dictWord{7, 11, 1069}, + dictWord{7, 11, 1568}, + dictWord{7, 11, 1575}, + dictWord{7, 11, 1917}, + dictWord{8, 11, 43}, + dictWord{8, 11, 208}, + dictWord{ + 9, + 11, + 128, + }, + dictWord{9, 11, 866}, + dictWord{10, 11, 20}, + dictWord{11, 11, 981}, + dictWord{147, 11, 33}, + dictWord{7, 11, 893}, + dictWord{8, 10, 482}, + dictWord{141, 11, 424}, + dictWord{6, 0, 312}, + dictWord{6, 0, 1715}, + dictWord{10, 0, 584}, + dictWord{11, 0, 546}, + dictWord{11, 0, 692}, + dictWord{12, 0, 259}, + dictWord{ + 12, + 0, + 295, + }, + dictWord{13, 0, 46}, + dictWord{141, 0, 154}, + dictWord{5, 10, 336}, + dictWord{6, 10, 341}, + dictWord{6, 10, 478}, + dictWord{6, 10, 1763}, + dictWord{ + 136, + 10, + 386, + }, + dictWord{137, 0, 151}, + dictWord{132, 0, 588}, + dictWord{152, 0, 4}, + dictWord{6, 11, 322}, + dictWord{9, 11, 552}, + dictWord{11, 11, 274}, + dictWord{ + 13, + 11, + 209, + }, + dictWord{13, 11, 499}, + dictWord{14, 11, 85}, + dictWord{15, 11, 126}, + dictWord{145, 11, 70}, + dictWord{135, 10, 73}, + dictWord{4, 0, 231}, + dictWord{ + 5, + 0, + 61, + }, + dictWord{6, 0, 104}, + dictWord{7, 0, 729}, + dictWord{7, 0, 964}, + dictWord{7, 0, 1658}, + dictWord{140, 0, 414}, + dictWord{6, 0, 263}, + dictWord{138, 0, 757}, + dictWord{135, 10, 1971}, + dictWord{4, 0, 612}, + dictWord{133, 0, 561}, + dictWord{132, 0, 320}, + dictWord{135, 10, 1344}, + dictWord{8, 11, 83}, + dictWord{ + 8, + 11, + 817, + }, + dictWord{9, 11, 28}, + dictWord{9, 11, 29}, + dictWord{9, 11, 885}, + dictWord{10, 11, 387}, + dictWord{11, 11, 633}, + dictWord{11, 11, 740}, + dictWord{ + 13, + 11, + 235, + }, + dictWord{13, 11, 254}, + dictWord{15, 11, 143}, + dictWord{143, 11, 146}, + dictWord{5, 10, 396}, + dictWord{134, 10, 501}, + dictWord{140, 11, 49}, + dictWord{132, 0, 225}, + dictWord{4, 10, 929}, + dictWord{5, 10, 799}, + dictWord{8, 10, 46}, + dictWord{136, 10, 740}, + dictWord{4, 0, 405}, + dictWord{7, 0, 817}, + dictWord{ + 14, + 0, + 58, + }, + dictWord{17, 0, 37}, + dictWord{146, 0, 124}, + dictWord{133, 0, 974}, + dictWord{4, 11, 412}, + dictWord{133, 11, 581}, + dictWord{4, 10, 892}, + dictWord{ + 133, + 10, + 770, + }, + dictWord{4, 0, 996}, + dictWord{134, 0, 2026}, + dictWord{4, 0, 527}, + dictWord{5, 0, 235}, + dictWord{7, 0, 1239}, + dictWord{11, 0, 131}, + dictWord{ + 140, + 0, + 370, + }, + dictWord{9, 0, 16}, + dictWord{13, 0, 386}, + dictWord{135, 11, 421}, + dictWord{7, 0, 956}, + dictWord{7, 0, 1157}, + dictWord{7, 0, 1506}, + dictWord{7, 0, 1606}, + dictWord{7, 0, 1615}, + dictWord{7, 0, 1619}, + dictWord{7, 0, 1736}, + dictWord{7, 0, 1775}, + dictWord{8, 0, 590}, + dictWord{9, 0, 324}, + dictWord{9, 0, 736}, + dictWord{ + 9, + 0, + 774, + }, + dictWord{9, 0, 776}, + dictWord{9, 0, 784}, + dictWord{10, 0, 567}, + dictWord{10, 0, 708}, + dictWord{11, 0, 518}, + dictWord{11, 0, 613}, + dictWord{11, 0, 695}, + dictWord{11, 0, 716}, + dictWord{11, 0, 739}, + dictWord{11, 0, 770}, + dictWord{11, 0, 771}, + dictWord{11, 0, 848}, + dictWord{11, 0, 857}, + dictWord{11, 0, 931}, + dictWord{ + 11, + 0, + 947, + }, + dictWord{12, 0, 326}, + dictWord{12, 0, 387}, + dictWord{12, 0, 484}, + dictWord{12, 0, 528}, + dictWord{12, 0, 552}, + dictWord{12, 0, 613}, + dictWord{ + 13, + 0, + 189, + }, + dictWord{13, 0, 256}, + dictWord{13, 0, 340}, + dictWord{13, 0, 432}, + dictWord{13, 0, 436}, + dictWord{13, 0, 440}, + dictWord{13, 0, 454}, + dictWord{14, 0, 174}, + dictWord{14, 0, 220}, + dictWord{14, 0, 284}, + dictWord{14, 0, 390}, + dictWord{145, 0, 121}, + dictWord{135, 10, 158}, + dictWord{9, 0, 137}, + dictWord{138, 0, 221}, + dictWord{4, 11, 110}, + dictWord{10, 11, 415}, + dictWord{10, 11, 597}, + dictWord{142, 11, 206}, + dictWord{141, 11, 496}, + dictWord{135, 11, 205}, + dictWord{ + 151, + 10, + 25, + }, + dictWord{135, 11, 778}, + dictWord{7, 11, 1656}, + dictWord{7, 10, 2001}, + dictWord{9, 11, 369}, + dictWord{10, 11, 338}, + dictWord{10, 11, 490}, + dictWord{11, 11, 154}, + dictWord{11, 11, 545}, + dictWord{11, 11, 775}, + dictWord{13, 11, 77}, + dictWord{141, 11, 274}, + dictWord{4, 11, 444}, + dictWord{ + 10, + 11, + 146, + }, + dictWord{140, 11, 9}, + dictWord{7, 0, 390}, + dictWord{138, 0, 140}, + dictWord{135, 0, 1144}, + dictWord{134, 0, 464}, + dictWord{7, 10, 1461}, + dictWord{ + 140, + 10, + 91, + }, + dictWord{132, 10, 602}, + dictWord{4, 11, 283}, + dictWord{135, 11, 1194}, + dictWord{5, 0, 407}, + dictWord{11, 0, 204}, + dictWord{11, 0, 243}, + dictWord{ + 11, + 0, + 489, + }, + dictWord{12, 0, 293}, + dictWord{19, 0, 37}, + dictWord{20, 0, 73}, + dictWord{150, 0, 38}, + dictWord{7, 0, 1218}, + dictWord{136, 0, 303}, + dictWord{ + 5, + 0, + 325, + }, + dictWord{8, 0, 5}, + dictWord{8, 0, 227}, + dictWord{9, 0, 105}, + dictWord{10, 0, 585}, + dictWord{12, 0, 614}, + dictWord{4, 10, 13}, + dictWord{5, 10, 567}, + dictWord{ + 7, + 10, + 1498, + }, + dictWord{9, 10, 124}, + dictWord{11, 10, 521}, + dictWord{140, 10, 405}, + dictWord{135, 10, 1006}, + dictWord{7, 0, 800}, + dictWord{10, 0, 12}, + dictWord{134, 11, 1720}, + dictWord{135, 0, 1783}, + dictWord{132, 10, 735}, + dictWord{138, 10, 812}, + dictWord{4, 10, 170}, + dictWord{135, 10, 323}, + dictWord{ + 6, + 0, + 621, + }, + dictWord{13, 0, 504}, + dictWord{144, 0, 89}, + dictWord{5, 10, 304}, + dictWord{135, 10, 1403}, + dictWord{137, 11, 216}, + dictWord{6, 0, 920}, + dictWord{ + 6, + 0, + 1104, + }, + dictWord{9, 11, 183}, + dictWord{139, 11, 286}, + dictWord{4, 0, 376}, + dictWord{133, 10, 742}, + dictWord{134, 0, 218}, + dictWord{8, 0, 641}, + dictWord{ + 11, + 0, + 388, + }, + dictWord{140, 0, 580}, + dictWord{7, 0, 454}, + dictWord{7, 0, 782}, + dictWord{8, 0, 768}, + dictWord{140, 0, 686}, + dictWord{137, 11, 33}, + dictWord{ + 133, + 10, + 111, + }, + dictWord{144, 0, 0}, + dictWord{10, 0, 676}, + dictWord{140, 0, 462}, + dictWord{6, 0, 164}, + dictWord{136, 11, 735}, + dictWord{133, 10, 444}, + dictWord{ + 150, + 0, + 50, + }, + dictWord{7, 11, 1862}, + dictWord{12, 11, 491}, + dictWord{12, 11, 520}, + dictWord{13, 11, 383}, + dictWord{14, 11, 244}, + dictWord{146, 11, 12}, + dictWord{ + 5, + 11, + 132, + }, + dictWord{9, 11, 486}, + dictWord{9, 11, 715}, + dictWord{10, 11, 458}, + dictWord{11, 11, 373}, + dictWord{11, 11, 668}, + dictWord{11, 11, 795}, + dictWord{11, 11, 897}, + dictWord{12, 11, 272}, + dictWord{12, 11, 424}, + dictWord{12, 11, 539}, + dictWord{12, 11, 558}, + dictWord{14, 11, 245}, + dictWord{ + 14, + 11, + 263, + }, + dictWord{14, 11, 264}, + dictWord{14, 11, 393}, + dictWord{142, 11, 403}, + dictWord{8, 10, 123}, + dictWord{15, 10, 6}, + dictWord{144, 10, 7}, + dictWord{ + 6, + 0, + 285, + }, + dictWord{8, 0, 654}, + dictWord{11, 0, 749}, + dictWord{12, 0, 190}, + dictWord{12, 0, 327}, + dictWord{13, 0, 120}, + dictWord{13, 0, 121}, + dictWord{13, 0, 327}, + dictWord{15, 0, 47}, + dictWord{146, 0, 40}, + dictWord{5, 11, 8}, + dictWord{6, 11, 89}, + dictWord{6, 11, 400}, + dictWord{7, 11, 1569}, + dictWord{7, 11, 1623}, + dictWord{ + 7, + 11, + 1850, + }, + dictWord{8, 11, 218}, + dictWord{8, 11, 422}, + dictWord{9, 11, 570}, + dictWord{138, 11, 626}, + dictWord{6, 11, 387}, + dictWord{7, 11, 882}, + dictWord{141, 11, 111}, + dictWord{6, 0, 343}, + dictWord{7, 0, 195}, + dictWord{9, 0, 226}, + dictWord{10, 0, 197}, + dictWord{10, 0, 575}, + dictWord{11, 0, 502}, + dictWord{ + 11, + 0, + 899, + }, + dictWord{6, 11, 224}, + dictWord{7, 11, 877}, + dictWord{137, 11, 647}, + dictWord{5, 10, 937}, + dictWord{135, 10, 100}, + dictWord{135, 11, 790}, + dictWord{150, 0, 29}, + dictWord{147, 0, 8}, + dictWord{134, 0, 1812}, + dictWord{149, 0, 8}, + dictWord{135, 11, 394}, + dictWord{7, 0, 1125}, + dictWord{9, 0, 143}, + dictWord{ + 11, + 0, + 61, + }, + dictWord{14, 0, 405}, + dictWord{150, 0, 21}, + dictWord{10, 11, 755}, + dictWord{147, 11, 29}, + dictWord{9, 11, 378}, + dictWord{141, 11, 162}, + dictWord{135, 10, 922}, + dictWord{5, 10, 619}, + dictWord{133, 10, 698}, + dictWord{134, 0, 1327}, + dictWord{6, 0, 1598}, + dictWord{137, 0, 575}, + dictWord{ + 9, + 11, + 569, + }, + dictWord{12, 11, 12}, + dictWord{12, 11, 81}, + dictWord{12, 11, 319}, + dictWord{13, 11, 69}, + dictWord{14, 11, 259}, + dictWord{16, 11, 87}, + dictWord{ + 17, + 11, + 1, + }, + dictWord{17, 11, 21}, + dictWord{17, 11, 24}, + dictWord{18, 11, 15}, + dictWord{18, 11, 56}, + dictWord{18, 11, 59}, + dictWord{18, 11, 127}, + dictWord{18, 11, 154}, + dictWord{19, 11, 19}, + dictWord{148, 11, 31}, + dictWord{6, 0, 895}, + dictWord{135, 11, 1231}, + dictWord{5, 0, 959}, + dictWord{7, 11, 124}, + dictWord{136, 11, 38}, + dictWord{5, 11, 261}, + dictWord{7, 11, 78}, + dictWord{7, 11, 199}, + dictWord{8, 11, 815}, + dictWord{9, 11, 126}, + dictWord{138, 11, 342}, + dictWord{5, 10, 917}, + dictWord{134, 10, 1659}, + dictWord{7, 0, 1759}, + dictWord{5, 11, 595}, + dictWord{135, 11, 1863}, + dictWord{136, 0, 173}, + dictWord{134, 0, 266}, + dictWord{ + 142, + 0, + 261, + }, + dictWord{132, 11, 628}, + dictWord{5, 10, 251}, + dictWord{5, 10, 956}, + dictWord{8, 10, 268}, + dictWord{9, 10, 214}, + dictWord{146, 10, 142}, + dictWord{ + 7, + 11, + 266, + }, + dictWord{136, 11, 804}, + dictWord{135, 11, 208}, + dictWord{6, 11, 79}, + dictWord{7, 11, 1021}, + dictWord{135, 11, 1519}, + dictWord{11, 11, 704}, + dictWord{141, 11, 396}, + dictWord{5, 10, 346}, + dictWord{5, 10, 711}, + dictWord{136, 10, 390}, + dictWord{136, 11, 741}, + dictWord{134, 11, 376}, + dictWord{ + 134, + 0, + 1427, + }, + dictWord{6, 0, 1033}, + dictWord{6, 0, 1217}, + dictWord{136, 0, 300}, + dictWord{133, 10, 624}, + dictWord{6, 11, 100}, + dictWord{7, 11, 244}, + dictWord{ + 7, + 11, + 632, + }, + dictWord{7, 11, 1609}, + dictWord{8, 11, 178}, + dictWord{8, 11, 638}, + dictWord{141, 11, 58}, + dictWord{6, 0, 584}, + dictWord{5, 10, 783}, + dictWord{ + 7, + 10, + 1998, + }, + dictWord{135, 10, 2047}, + dictWord{5, 0, 427}, + dictWord{5, 0, 734}, + dictWord{7, 0, 478}, + dictWord{136, 0, 52}, + dictWord{7, 0, 239}, + dictWord{ + 11, + 0, + 217, + }, + dictWord{142, 0, 165}, + dictWord{134, 0, 1129}, + dictWord{6, 0, 168}, + dictWord{6, 0, 1734}, + dictWord{7, 0, 20}, + dictWord{7, 0, 1056}, + dictWord{8, 0, 732}, + dictWord{9, 0, 406}, + dictWord{9, 0, 911}, + dictWord{138, 0, 694}, + dictWord{132, 10, 594}, + dictWord{133, 11, 791}, + dictWord{7, 11, 686}, + dictWord{8, 11, 33}, + dictWord{8, 11, 238}, + dictWord{10, 11, 616}, + dictWord{11, 11, 467}, + dictWord{11, 11, 881}, + dictWord{13, 11, 217}, + dictWord{13, 11, 253}, + dictWord{ + 142, + 11, + 268, + }, + dictWord{137, 11, 476}, + dictWord{134, 0, 418}, + dictWord{133, 0, 613}, + dictWord{132, 0, 632}, + dictWord{132, 11, 447}, + dictWord{7, 0, 32}, + dictWord{ + 7, + 0, + 984, + }, + dictWord{8, 0, 85}, + dictWord{8, 0, 709}, + dictWord{9, 0, 579}, + dictWord{9, 0, 847}, + dictWord{9, 0, 856}, + dictWord{10, 0, 799}, + dictWord{11, 0, 258}, + dictWord{ + 11, + 0, + 1007, + }, + dictWord{12, 0, 331}, + dictWord{12, 0, 615}, + dictWord{13, 0, 188}, + dictWord{13, 0, 435}, + dictWord{14, 0, 8}, + dictWord{15, 0, 165}, + dictWord{ + 16, + 0, + 27, + }, + dictWord{20, 0, 40}, + dictWord{144, 11, 35}, + dictWord{4, 11, 128}, + dictWord{5, 11, 415}, + dictWord{6, 11, 462}, + dictWord{7, 11, 294}, + dictWord{7, 11, 578}, + dictWord{10, 11, 710}, + dictWord{139, 11, 86}, + dictWord{5, 0, 694}, + dictWord{136, 0, 909}, + dictWord{7, 0, 1109}, + dictWord{11, 0, 7}, + dictWord{5, 10, 37}, + dictWord{ + 6, + 10, + 39, + }, + dictWord{6, 10, 451}, + dictWord{7, 10, 218}, + dictWord{7, 10, 1166}, + dictWord{7, 10, 1687}, + dictWord{8, 10, 662}, + dictWord{144, 10, 2}, + dictWord{ + 136, + 11, + 587, + }, + dictWord{6, 11, 427}, + dictWord{7, 11, 1018}, + dictWord{138, 11, 692}, + dictWord{4, 11, 195}, + dictWord{6, 10, 508}, + dictWord{135, 11, 802}, + dictWord{4, 0, 167}, + dictWord{135, 0, 82}, + dictWord{5, 0, 62}, + dictWord{6, 0, 24}, + dictWord{6, 0, 534}, + dictWord{7, 0, 74}, + dictWord{7, 0, 678}, + dictWord{7, 0, 684}, + dictWord{ + 7, + 0, + 1043, + }, + dictWord{7, 0, 1072}, + dictWord{8, 0, 280}, + dictWord{8, 0, 541}, + dictWord{8, 0, 686}, + dictWord{9, 0, 258}, + dictWord{10, 0, 519}, + dictWord{11, 0, 252}, + dictWord{140, 0, 282}, + dictWord{138, 0, 33}, + dictWord{4, 0, 359}, + dictWord{133, 11, 738}, + dictWord{7, 0, 980}, + dictWord{9, 0, 328}, + dictWord{13, 0, 186}, + dictWord{13, 0, 364}, + dictWord{7, 10, 635}, + dictWord{7, 10, 796}, + dictWord{8, 10, 331}, + dictWord{9, 10, 330}, + dictWord{9, 10, 865}, + dictWord{10, 10, 119}, + dictWord{ + 10, + 10, + 235, + }, + dictWord{11, 10, 111}, + dictWord{11, 10, 129}, + dictWord{11, 10, 240}, + dictWord{12, 10, 31}, + dictWord{12, 10, 66}, + dictWord{12, 10, 222}, + dictWord{12, 10, 269}, + dictWord{12, 10, 599}, + dictWord{12, 10, 684}, + dictWord{12, 10, 689}, + dictWord{12, 10, 691}, + dictWord{142, 10, 345}, + dictWord{ + 137, + 10, + 527, + }, + dictWord{6, 0, 596}, + dictWord{7, 0, 585}, + dictWord{135, 10, 702}, + dictWord{134, 11, 1683}, + dictWord{133, 0, 211}, + dictWord{6, 0, 145}, + dictWord{ + 141, + 0, + 336, + }, + dictWord{134, 0, 1130}, + dictWord{7, 0, 873}, + dictWord{6, 10, 37}, + dictWord{7, 10, 1666}, + dictWord{8, 10, 195}, + dictWord{8, 10, 316}, + dictWord{ + 9, + 10, + 178, + }, + dictWord{9, 10, 276}, + dictWord{9, 10, 339}, + dictWord{9, 10, 536}, + dictWord{10, 10, 102}, + dictWord{10, 10, 362}, + dictWord{10, 10, 785}, + dictWord{ + 11, + 10, + 55, + }, + dictWord{11, 10, 149}, + dictWord{11, 10, 773}, + dictWord{13, 10, 416}, + dictWord{13, 10, 419}, + dictWord{14, 10, 38}, + dictWord{14, 10, 41}, + dictWord{ + 142, + 10, + 210, + }, + dictWord{8, 0, 840}, + dictWord{136, 0, 841}, + dictWord{132, 0, 263}, + dictWord{5, 11, 3}, + dictWord{8, 11, 578}, + dictWord{9, 11, 118}, + dictWord{ + 10, + 11, + 705, + }, + dictWord{12, 11, 383}, + dictWord{141, 11, 279}, + dictWord{132, 0, 916}, + dictWord{133, 11, 229}, + dictWord{133, 10, 645}, + dictWord{15, 0, 155}, + dictWord{16, 0, 79}, + dictWord{8, 11, 102}, + dictWord{10, 11, 578}, + dictWord{10, 11, 672}, + dictWord{12, 11, 496}, + dictWord{13, 11, 408}, + dictWord{14, 11, 121}, + dictWord{145, 11, 106}, + dictWord{4, 0, 599}, + dictWord{5, 0, 592}, + dictWord{6, 0, 1634}, + dictWord{7, 0, 5}, + dictWord{7, 0, 55}, + dictWord{7, 0, 67}, + dictWord{7, 0, 97}, + dictWord{7, 0, 691}, + dictWord{7, 0, 979}, + dictWord{7, 0, 1600}, + dictWord{7, 0, 1697}, + dictWord{8, 0, 207}, + dictWord{8, 0, 214}, + dictWord{8, 0, 231}, + dictWord{8, 0, 294}, + dictWord{8, 0, 336}, + dictWord{8, 0, 428}, + dictWord{8, 0, 471}, + dictWord{8, 0, 622}, + dictWord{8, 0, 626}, + dictWord{8, 0, 679}, + dictWord{8, 0, 759}, + dictWord{8, 0, 829}, + dictWord{9, 0, 11}, + dictWord{9, 0, 246}, + dictWord{9, 0, 484}, + dictWord{9, 0, 573}, + dictWord{9, 0, 706}, + dictWord{9, 0, 762}, + dictWord{9, 0, 798}, + dictWord{9, 0, 855}, + dictWord{9, 0, 870}, + dictWord{9, 0, 912}, + dictWord{10, 0, 303}, + dictWord{10, 0, 335}, + dictWord{10, 0, 424}, + dictWord{10, 0, 461}, + dictWord{10, 0, 543}, + dictWord{ + 10, + 0, + 759, + }, + dictWord{10, 0, 814}, + dictWord{11, 0, 59}, + dictWord{11, 0, 199}, + dictWord{11, 0, 235}, + dictWord{11, 0, 590}, + dictWord{11, 0, 631}, + dictWord{11, 0, 929}, + dictWord{11, 0, 963}, + dictWord{11, 0, 987}, + dictWord{12, 0, 114}, + dictWord{12, 0, 182}, + dictWord{12, 0, 226}, + dictWord{12, 0, 332}, + dictWord{12, 0, 439}, + dictWord{12, 0, 575}, + dictWord{12, 0, 598}, + dictWord{12, 0, 675}, + dictWord{13, 0, 8}, + dictWord{13, 0, 125}, + dictWord{13, 0, 194}, + dictWord{13, 0, 287}, + dictWord{ + 14, + 0, + 197, + }, + dictWord{14, 0, 383}, + dictWord{15, 0, 53}, + dictWord{17, 0, 63}, + dictWord{19, 0, 46}, + dictWord{19, 0, 98}, + dictWord{19, 0, 106}, + dictWord{148, 0, 85}, + dictWord{ + 7, + 0, + 1356, + }, + dictWord{132, 10, 290}, + dictWord{6, 10, 70}, + dictWord{7, 10, 1292}, + dictWord{10, 10, 762}, + dictWord{139, 10, 288}, + dictWord{150, 11, 55}, + dictWord{4, 0, 593}, + dictWord{8, 11, 115}, + dictWord{8, 11, 350}, + dictWord{9, 11, 489}, + dictWord{10, 11, 128}, + dictWord{11, 11, 306}, + dictWord{12, 11, 373}, + dictWord{14, 11, 30}, + dictWord{17, 11, 79}, + dictWord{147, 11, 80}, + dictWord{135, 11, 1235}, + dictWord{134, 0, 1392}, + dictWord{4, 11, 230}, + dictWord{ + 133, + 11, + 702, + }, + dictWord{147, 0, 126}, + dictWord{7, 10, 131}, + dictWord{7, 10, 422}, + dictWord{8, 10, 210}, + dictWord{140, 10, 573}, + dictWord{134, 0, 1179}, + dictWord{ + 139, + 11, + 435, + }, + dictWord{139, 10, 797}, + dictWord{134, 11, 1728}, + dictWord{4, 0, 162}, + dictWord{18, 11, 26}, + dictWord{19, 11, 42}, + dictWord{20, 11, 43}, + dictWord{21, 11, 0}, + dictWord{23, 11, 27}, + dictWord{152, 11, 14}, + dictWord{132, 10, 936}, + dictWord{6, 0, 765}, + dictWord{5, 10, 453}, + dictWord{134, 10, 441}, + dictWord{133, 0, 187}, + dictWord{135, 0, 1286}, + dictWord{6, 0, 635}, + dictWord{6, 0, 904}, + dictWord{6, 0, 1210}, + dictWord{134, 0, 1489}, + dictWord{4, 0, 215}, + dictWord{ + 8, + 0, + 890, + }, + dictWord{9, 0, 38}, + dictWord{10, 0, 923}, + dictWord{11, 0, 23}, + dictWord{11, 0, 127}, + dictWord{139, 0, 796}, + dictWord{6, 0, 1165}, + dictWord{ + 134, + 0, + 1306, + }, + dictWord{7, 0, 716}, + dictWord{13, 0, 97}, + dictWord{141, 0, 251}, + dictWord{132, 10, 653}, + dictWord{136, 0, 657}, + dictWord{146, 10, 80}, + dictWord{ + 5, + 11, + 622, + }, + dictWord{7, 11, 1032}, + dictWord{11, 11, 26}, + dictWord{11, 11, 213}, + dictWord{11, 11, 707}, + dictWord{12, 11, 380}, + dictWord{13, 11, 226}, + dictWord{141, 11, 355}, + dictWord{6, 0, 299}, + dictWord{5, 11, 70}, + dictWord{6, 11, 334}, + dictWord{9, 11, 171}, + dictWord{11, 11, 637}, + dictWord{12, 11, 202}, + dictWord{14, 11, 222}, + dictWord{145, 11, 42}, + dictWord{142, 0, 134}, + dictWord{4, 11, 23}, + dictWord{5, 11, 313}, + dictWord{5, 11, 1014}, + dictWord{6, 11, 50}, + dictWord{ + 6, + 11, + 51, + }, + dictWord{7, 11, 142}, + dictWord{7, 11, 384}, + dictWord{9, 11, 783}, + dictWord{139, 11, 741}, + dictWord{4, 11, 141}, + dictWord{7, 11, 559}, + dictWord{ + 8, + 11, + 640, + }, + dictWord{9, 11, 460}, + dictWord{12, 11, 183}, + dictWord{141, 11, 488}, + dictWord{136, 11, 614}, + dictWord{7, 10, 1368}, + dictWord{8, 10, 232}, + dictWord{8, 10, 361}, + dictWord{10, 10, 682}, + dictWord{138, 10, 742}, + dictWord{137, 10, 534}, + dictWord{6, 0, 1082}, + dictWord{140, 0, 658}, + dictWord{ + 137, + 10, + 27, + }, + dictWord{135, 0, 2002}, + dictWord{142, 10, 12}, + dictWord{4, 0, 28}, + dictWord{5, 0, 440}, + dictWord{7, 0, 248}, + dictWord{11, 0, 833}, + dictWord{140, 0, 344}, + dictWord{7, 10, 736}, + dictWord{139, 10, 264}, + dictWord{134, 10, 1657}, + dictWord{134, 0, 1654}, + dictWord{138, 0, 531}, + dictWord{5, 11, 222}, + dictWord{ + 9, + 11, + 140, + }, + dictWord{138, 11, 534}, + dictWord{6, 0, 634}, + dictWord{6, 0, 798}, + dictWord{134, 0, 840}, + dictWord{138, 11, 503}, + dictWord{135, 10, 127}, + dictWord{133, 0, 853}, + dictWord{5, 11, 154}, + dictWord{7, 11, 1491}, + dictWord{10, 11, 379}, + dictWord{138, 11, 485}, + dictWord{6, 0, 249}, + dictWord{7, 0, 1234}, + dictWord{139, 0, 573}, + dictWord{133, 11, 716}, + dictWord{7, 11, 1570}, + dictWord{140, 11, 542}, + dictWord{136, 10, 364}, + dictWord{138, 0, 527}, + dictWord{ + 4, + 11, + 91, + }, + dictWord{5, 11, 388}, + dictWord{5, 11, 845}, + dictWord{6, 11, 206}, + dictWord{6, 11, 252}, + dictWord{6, 11, 365}, + dictWord{7, 11, 136}, + dictWord{7, 11, 531}, + dictWord{8, 11, 264}, + dictWord{136, 11, 621}, + dictWord{134, 0, 1419}, + dictWord{135, 11, 1441}, + dictWord{7, 0, 49}, + dictWord{7, 0, 392}, + dictWord{8, 0, 20}, + dictWord{8, 0, 172}, + dictWord{8, 0, 690}, + dictWord{9, 0, 383}, + dictWord{9, 0, 845}, + dictWord{10, 0, 48}, + dictWord{11, 0, 293}, + dictWord{11, 0, 832}, + dictWord{ + 11, + 0, + 920, + }, + dictWord{11, 0, 984}, + dictWord{141, 0, 221}, + dictWord{5, 0, 858}, + dictWord{133, 0, 992}, + dictWord{5, 0, 728}, + dictWord{137, 10, 792}, + dictWord{ + 5, + 10, + 909, + }, + dictWord{9, 10, 849}, + dictWord{138, 10, 805}, + dictWord{7, 0, 525}, + dictWord{7, 0, 1579}, + dictWord{8, 0, 497}, + dictWord{136, 0, 573}, + dictWord{6, 0, 268}, + dictWord{137, 0, 62}, + dictWord{135, 11, 576}, + dictWord{134, 0, 1201}, + dictWord{5, 11, 771}, + dictWord{5, 11, 863}, + dictWord{5, 11, 898}, + dictWord{ + 6, + 11, + 1632, + }, + dictWord{6, 11, 1644}, + dictWord{134, 11, 1780}, + dictWord{133, 11, 331}, + dictWord{7, 0, 193}, + dictWord{7, 0, 1105}, + dictWord{10, 0, 495}, + dictWord{ + 7, + 10, + 397, + }, + dictWord{8, 10, 124}, + dictWord{8, 10, 619}, + dictWord{9, 10, 305}, + dictWord{11, 10, 40}, + dictWord{12, 10, 349}, + dictWord{13, 10, 134}, + dictWord{ + 13, + 10, + 295, + }, + dictWord{14, 10, 155}, + dictWord{15, 10, 120}, + dictWord{146, 10, 105}, + dictWord{138, 0, 106}, + dictWord{6, 0, 859}, + dictWord{5, 11, 107}, + dictWord{ + 7, + 11, + 201, + }, + dictWord{136, 11, 518}, + dictWord{6, 11, 446}, + dictWord{135, 11, 1817}, + dictWord{13, 0, 23}, + dictWord{4, 10, 262}, + dictWord{135, 10, 342}, + dictWord{133, 10, 641}, + dictWord{137, 11, 851}, + dictWord{6, 0, 925}, + dictWord{137, 0, 813}, + dictWord{132, 11, 504}, + dictWord{6, 0, 613}, + dictWord{ + 136, + 0, + 223, + }, + dictWord{4, 10, 99}, + dictWord{6, 10, 250}, + dictWord{6, 10, 346}, + dictWord{8, 10, 127}, + dictWord{138, 10, 81}, + dictWord{136, 0, 953}, + dictWord{ + 132, + 10, + 915, + }, + dictWord{139, 11, 892}, + dictWord{5, 10, 75}, + dictWord{9, 10, 517}, + dictWord{10, 10, 470}, + dictWord{12, 10, 155}, + dictWord{141, 10, 224}, + dictWord{ + 4, + 0, + 666, + }, + dictWord{7, 0, 1017}, + dictWord{7, 11, 996}, + dictWord{138, 11, 390}, + dictWord{5, 11, 883}, + dictWord{133, 11, 975}, + dictWord{14, 10, 83}, + dictWord{ + 142, + 11, + 83, + }, + dictWord{4, 0, 670}, + dictWord{5, 11, 922}, + dictWord{134, 11, 1707}, + dictWord{135, 0, 216}, + dictWord{9, 0, 40}, + dictWord{11, 0, 136}, + dictWord{ + 135, + 11, + 787, + }, + dictWord{5, 10, 954}, + dictWord{5, 11, 993}, + dictWord{7, 11, 515}, + dictWord{137, 11, 91}, + dictWord{139, 0, 259}, + dictWord{7, 0, 1114}, + dictWord{ + 9, + 0, + 310, + }, + dictWord{9, 0, 682}, + dictWord{10, 0, 440}, + dictWord{13, 0, 40}, + dictWord{6, 10, 304}, + dictWord{8, 10, 418}, + dictWord{11, 10, 341}, + dictWord{ + 139, + 10, + 675, + }, + dictWord{14, 0, 296}, + dictWord{9, 10, 410}, + dictWord{139, 10, 425}, + dictWord{10, 11, 377}, + dictWord{12, 11, 363}, + dictWord{13, 11, 68}, + dictWord{ + 13, + 11, + 94, + }, + dictWord{14, 11, 108}, + dictWord{142, 11, 306}, + dictWord{7, 0, 1401}, + dictWord{135, 0, 1476}, + dictWord{4, 0, 296}, + dictWord{6, 0, 475}, + dictWord{ + 7, + 0, + 401, + }, + dictWord{7, 0, 1410}, + dictWord{7, 0, 1594}, + dictWord{7, 0, 1674}, + dictWord{8, 0, 63}, + dictWord{8, 0, 660}, + dictWord{137, 0, 74}, + dictWord{4, 0, 139}, + dictWord{4, 0, 388}, + dictWord{140, 0, 188}, + dictWord{132, 0, 797}, + dictWord{132, 11, 766}, + dictWord{5, 11, 103}, + dictWord{7, 11, 921}, + dictWord{8, 11, 580}, + dictWord{8, 11, 593}, + dictWord{8, 11, 630}, + dictWord{138, 11, 28}, + dictWord{4, 11, 911}, + dictWord{5, 11, 867}, + dictWord{133, 11, 1013}, + dictWord{134, 10, 14}, + dictWord{134, 0, 1572}, + dictWord{134, 10, 1708}, + dictWord{21, 0, 39}, + dictWord{5, 10, 113}, + dictWord{6, 10, 243}, + dictWord{7, 10, 1865}, + dictWord{ + 11, + 10, + 161, + }, + dictWord{16, 10, 37}, + dictWord{145, 10, 99}, + dictWord{7, 11, 1563}, + dictWord{141, 11, 182}, + dictWord{5, 11, 135}, + dictWord{6, 11, 519}, + dictWord{ + 7, + 11, + 1722, + }, + dictWord{10, 11, 271}, + dictWord{11, 11, 261}, + dictWord{145, 11, 54}, + dictWord{132, 10, 274}, + dictWord{134, 0, 1594}, + dictWord{4, 11, 300}, + dictWord{5, 11, 436}, + dictWord{135, 11, 484}, + dictWord{4, 0, 747}, + dictWord{6, 0, 290}, + dictWord{7, 0, 649}, + dictWord{7, 0, 1479}, + dictWord{135, 0, 1583}, + dictWord{133, 11, 535}, + dictWord{147, 11, 82}, + dictWord{133, 0, 232}, + dictWord{137, 0, 887}, + dictWord{135, 10, 166}, + dictWord{136, 0, 521}, + dictWord{4, 0, 14}, + dictWord{7, 0, 472}, + dictWord{7, 0, 1801}, + dictWord{10, 0, 748}, + dictWord{141, 0, 458}, + dictWord{134, 0, 741}, + dictWord{134, 0, 992}, + dictWord{16, 0, 111}, + dictWord{137, 10, 304}, + dictWord{4, 0, 425}, + dictWord{5, 11, 387}, + dictWord{7, 11, 557}, + dictWord{12, 11, 547}, + dictWord{142, 11, 86}, + dictWord{ + 135, + 11, + 1747, + }, + dictWord{5, 10, 654}, + dictWord{135, 11, 1489}, + dictWord{7, 0, 789}, + dictWord{4, 11, 6}, + dictWord{5, 11, 708}, + dictWord{136, 11, 75}, + dictWord{ + 6, + 10, + 273, + }, + dictWord{10, 10, 188}, + dictWord{13, 10, 377}, + dictWord{146, 10, 77}, + dictWord{6, 0, 1593}, + dictWord{4, 11, 303}, + dictWord{7, 11, 619}, + dictWord{ + 10, + 11, + 547, + }, + dictWord{10, 11, 687}, + dictWord{11, 11, 122}, + dictWord{140, 11, 601}, + dictWord{134, 0, 1768}, + dictWord{135, 10, 410}, + dictWord{138, 11, 772}, + dictWord{11, 0, 233}, + dictWord{139, 10, 524}, + dictWord{5, 0, 943}, + dictWord{134, 0, 1779}, + dictWord{134, 10, 1785}, + dictWord{136, 11, 529}, + dictWord{ + 132, + 0, + 955, + }, + dictWord{5, 0, 245}, + dictWord{6, 0, 576}, + dictWord{7, 0, 582}, + dictWord{136, 0, 225}, + dictWord{132, 10, 780}, + dictWord{142, 0, 241}, + dictWord{ + 134, + 0, + 1943, + }, + dictWord{4, 11, 106}, + dictWord{7, 11, 310}, + dictWord{7, 11, 1785}, + dictWord{10, 11, 690}, + dictWord{139, 11, 717}, + dictWord{134, 0, 1284}, + dictWord{5, 11, 890}, + dictWord{133, 11, 988}, + dictWord{6, 11, 626}, + dictWord{142, 11, 431}, + dictWord{10, 11, 706}, + dictWord{145, 11, 32}, + dictWord{ + 137, + 11, + 332, + }, + dictWord{132, 11, 698}, + dictWord{135, 0, 709}, + dictWord{5, 10, 948}, + dictWord{138, 11, 17}, + dictWord{136, 0, 554}, + dictWord{134, 0, 1564}, + dictWord{139, 10, 941}, + dictWord{132, 0, 443}, + dictWord{134, 0, 909}, + dictWord{134, 11, 84}, + dictWord{142, 0, 280}, + dictWord{4, 10, 532}, + dictWord{5, 10, 706}, + dictWord{135, 10, 662}, + dictWord{132, 0, 729}, + dictWord{5, 10, 837}, + dictWord{6, 10, 1651}, + dictWord{139, 10, 985}, + dictWord{135, 10, 1861}, + dictWord{ + 4, + 0, + 348, + }, + dictWord{152, 11, 3}, + dictWord{5, 11, 986}, + dictWord{6, 11, 130}, + dictWord{7, 11, 1582}, + dictWord{8, 11, 458}, + dictWord{10, 11, 101}, + dictWord{ + 10, + 11, + 318, + }, + dictWord{138, 11, 823}, + dictWord{134, 0, 758}, + dictWord{4, 0, 298}, + dictWord{137, 0, 848}, + dictWord{4, 10, 330}, + dictWord{7, 10, 933}, + dictWord{ + 7, + 10, + 2012, + }, + dictWord{136, 10, 292}, + dictWord{7, 11, 1644}, + dictWord{137, 11, 129}, + dictWord{6, 0, 1422}, + dictWord{9, 0, 829}, + dictWord{135, 10, 767}, + dictWord{5, 0, 164}, + dictWord{7, 0, 121}, + dictWord{142, 0, 189}, + dictWord{7, 0, 812}, + dictWord{7, 0, 1261}, + dictWord{7, 0, 1360}, + dictWord{9, 0, 632}, + dictWord{ + 140, + 0, + 352, + }, + dictWord{135, 11, 1788}, + dictWord{139, 0, 556}, + dictWord{135, 11, 997}, + dictWord{145, 10, 114}, + dictWord{4, 0, 172}, + dictWord{9, 0, 611}, + dictWord{10, 0, 436}, + dictWord{12, 0, 673}, + dictWord{13, 0, 255}, + dictWord{137, 10, 883}, + dictWord{11, 0, 530}, + dictWord{138, 10, 274}, + dictWord{133, 0, 844}, + dictWord{134, 0, 984}, + dictWord{13, 0, 232}, + dictWord{18, 0, 35}, + dictWord{4, 10, 703}, + dictWord{135, 10, 207}, + dictWord{132, 10, 571}, + dictWord{9, 0, 263}, + dictWord{10, 0, 147}, + dictWord{138, 0, 492}, + dictWord{7, 11, 1756}, + dictWord{137, 11, 98}, + dictWord{5, 10, 873}, + dictWord{5, 10, 960}, + dictWord{8, 10, 823}, + dictWord{137, 10, 881}, + dictWord{133, 0, 537}, + dictWord{132, 0, 859}, + dictWord{7, 11, 1046}, + dictWord{139, 11, 160}, + dictWord{137, 0, 842}, + dictWord{ + 139, + 10, + 283, + }, + dictWord{5, 10, 33}, + dictWord{6, 10, 470}, + dictWord{139, 10, 424}, + dictWord{6, 11, 45}, + dictWord{7, 11, 433}, + dictWord{8, 11, 129}, + dictWord{ + 9, + 11, + 21, + }, + dictWord{10, 11, 392}, + dictWord{11, 11, 79}, + dictWord{12, 11, 499}, + dictWord{13, 11, 199}, + dictWord{141, 11, 451}, + dictWord{135, 0, 1291}, + dictWord{135, 10, 1882}, + dictWord{7, 11, 558}, + dictWord{136, 11, 353}, + dictWord{134, 0, 1482}, + dictWord{5, 0, 230}, + dictWord{5, 0, 392}, + dictWord{6, 0, 420}, + dictWord{9, 0, 568}, + dictWord{140, 0, 612}, + dictWord{6, 0, 262}, + dictWord{7, 10, 90}, + dictWord{7, 10, 664}, + dictWord{7, 10, 830}, + dictWord{7, 10, 1380}, + dictWord{ + 7, + 10, + 2025, + }, + dictWord{8, 11, 81}, + dictWord{8, 10, 448}, + dictWord{8, 10, 828}, + dictWord{9, 11, 189}, + dictWord{9, 11, 201}, + dictWord{11, 11, 478}, + dictWord{ + 11, + 11, + 712, + }, + dictWord{141, 11, 338}, + dictWord{142, 0, 31}, + dictWord{5, 11, 353}, + dictWord{151, 11, 26}, + dictWord{132, 0, 753}, + dictWord{4, 0, 0}, + dictWord{ + 5, + 0, + 41, + }, + dictWord{7, 0, 1459}, + dictWord{7, 0, 1469}, + dictWord{7, 0, 1859}, + dictWord{9, 0, 549}, + dictWord{139, 0, 905}, + dictWord{9, 10, 417}, + dictWord{ + 137, + 10, + 493, + }, + dictWord{135, 11, 1113}, + dictWord{133, 0, 696}, + dictWord{141, 11, 448}, + dictWord{134, 10, 295}, + dictWord{132, 0, 834}, + dictWord{4, 0, 771}, + dictWord{5, 10, 1019}, + dictWord{6, 11, 25}, + dictWord{7, 11, 855}, + dictWord{7, 11, 1258}, + dictWord{144, 11, 32}, + dictWord{134, 0, 1076}, + dictWord{133, 0, 921}, + dictWord{133, 0, 674}, + dictWord{4, 11, 4}, + dictWord{7, 11, 1118}, + dictWord{7, 11, 1320}, + dictWord{7, 11, 1706}, + dictWord{8, 11, 277}, + dictWord{9, 11, 622}, + dictWord{10, 11, 9}, + dictWord{11, 11, 724}, + dictWord{12, 11, 350}, + dictWord{12, 11, 397}, + dictWord{13, 11, 28}, + dictWord{13, 11, 159}, + dictWord{15, 11, 89}, + dictWord{18, 11, 5}, + dictWord{19, 11, 9}, + dictWord{20, 11, 34}, + dictWord{150, 11, 47}, + dictWord{134, 10, 208}, + dictWord{6, 0, 444}, + dictWord{136, 0, 308}, + dictWord{ + 6, + 0, + 180, + }, + dictWord{7, 0, 1137}, + dictWord{8, 0, 751}, + dictWord{139, 0, 805}, + dictWord{4, 0, 183}, + dictWord{7, 0, 271}, + dictWord{11, 0, 824}, + dictWord{ + 11, + 0, + 952, + }, + dictWord{13, 0, 278}, + dictWord{13, 0, 339}, + dictWord{13, 0, 482}, + dictWord{14, 0, 424}, + dictWord{148, 0, 99}, + dictWord{7, 11, 317}, + dictWord{ + 135, + 11, + 569, + }, + dictWord{4, 0, 19}, + dictWord{5, 0, 477}, + dictWord{5, 0, 596}, + dictWord{6, 0, 505}, + dictWord{7, 0, 1221}, + dictWord{11, 0, 907}, + dictWord{12, 0, 209}, + dictWord{141, 0, 214}, + dictWord{135, 0, 1215}, + dictWord{6, 0, 271}, + dictWord{7, 0, 398}, + dictWord{8, 0, 387}, + dictWord{10, 0, 344}, + dictWord{7, 10, 448}, + dictWord{ + 7, + 10, + 1629, + }, + dictWord{7, 10, 1813}, + dictWord{8, 10, 442}, + dictWord{9, 10, 710}, + dictWord{10, 10, 282}, + dictWord{138, 10, 722}, + dictWord{11, 10, 844}, + dictWord{12, 10, 104}, + dictWord{140, 10, 625}, + dictWord{134, 11, 255}, + dictWord{133, 10, 787}, + dictWord{134, 0, 1645}, + dictWord{11, 11, 956}, + dictWord{ + 151, + 11, + 3, + }, + dictWord{6, 0, 92}, + dictWord{6, 0, 188}, + dictWord{7, 0, 209}, + dictWord{7, 0, 1269}, + dictWord{7, 0, 1524}, + dictWord{7, 0, 1876}, + dictWord{8, 0, 661}, + dictWord{10, 0, 42}, + dictWord{10, 0, 228}, + dictWord{11, 0, 58}, + dictWord{11, 0, 1020}, + dictWord{12, 0, 58}, + dictWord{12, 0, 118}, + dictWord{141, 0, 32}, + dictWord{ + 4, + 0, + 459, + }, + dictWord{133, 0, 966}, + dictWord{4, 11, 536}, + dictWord{7, 11, 1141}, + dictWord{10, 11, 723}, + dictWord{139, 11, 371}, + dictWord{140, 0, 330}, + dictWord{134, 0, 1557}, + dictWord{7, 11, 285}, + dictWord{135, 11, 876}, + dictWord{136, 10, 491}, + dictWord{135, 11, 560}, + dictWord{6, 0, 18}, + dictWord{7, 0, 179}, + dictWord{7, 0, 932}, + dictWord{8, 0, 548}, + dictWord{8, 0, 757}, + dictWord{9, 0, 54}, + dictWord{9, 0, 65}, + dictWord{9, 0, 532}, + dictWord{9, 0, 844}, + dictWord{10, 0, 113}, + dictWord{10, 0, 117}, + dictWord{10, 0, 315}, + dictWord{10, 0, 560}, + dictWord{10, 0, 622}, + dictWord{10, 0, 798}, + dictWord{11, 0, 153}, + dictWord{11, 0, 351}, + dictWord{ + 11, + 0, + 375, + }, + dictWord{12, 0, 78}, + dictWord{12, 0, 151}, + dictWord{12, 0, 392}, + dictWord{12, 0, 666}, + dictWord{14, 0, 248}, + dictWord{143, 0, 23}, + dictWord{ + 6, + 0, + 1742, + }, + dictWord{132, 11, 690}, + dictWord{4, 10, 403}, + dictWord{5, 10, 441}, + dictWord{7, 10, 450}, + dictWord{10, 10, 840}, + dictWord{11, 10, 101}, + dictWord{ + 12, + 10, + 193, + }, + dictWord{141, 10, 430}, + dictWord{133, 0, 965}, + dictWord{134, 0, 182}, + dictWord{10, 0, 65}, + dictWord{10, 0, 488}, + dictWord{138, 0, 497}, + dictWord{135, 11, 1346}, + dictWord{6, 0, 973}, + dictWord{6, 0, 1158}, + dictWord{10, 11, 200}, + dictWord{19, 11, 2}, + dictWord{151, 11, 22}, + dictWord{4, 11, 190}, + dictWord{133, 11, 554}, + dictWord{133, 10, 679}, + dictWord{7, 0, 328}, + dictWord{137, 10, 326}, + dictWord{133, 11, 1001}, + dictWord{9, 0, 588}, + dictWord{ + 138, + 0, + 260, + }, + dictWord{133, 11, 446}, + dictWord{135, 10, 1128}, + dictWord{135, 10, 1796}, + dictWord{147, 11, 119}, + dictWord{134, 0, 1786}, + dictWord{ + 6, + 0, + 1328, + }, + dictWord{6, 0, 1985}, + dictWord{8, 0, 962}, + dictWord{138, 0, 1017}, + dictWord{135, 0, 308}, + dictWord{11, 0, 508}, + dictWord{4, 10, 574}, + dictWord{ + 7, + 10, + 350, + }, + dictWord{7, 10, 1024}, + dictWord{8, 10, 338}, + dictWord{9, 10, 677}, + dictWord{138, 10, 808}, + dictWord{138, 11, 752}, + dictWord{135, 10, 1081}, + dictWord{137, 11, 96}, + dictWord{7, 10, 1676}, + dictWord{135, 10, 2037}, + dictWord{136, 0, 588}, + dictWord{132, 11, 304}, + dictWord{133, 0, 614}, + dictWord{ + 140, + 0, + 793, + }, + dictWord{136, 0, 287}, + dictWord{137, 10, 297}, + dictWord{141, 10, 37}, + dictWord{6, 11, 53}, + dictWord{6, 11, 199}, + dictWord{7, 11, 1408}, + dictWord{ + 8, + 11, + 32, + }, + dictWord{8, 11, 93}, + dictWord{9, 11, 437}, + dictWord{10, 11, 397}, + dictWord{10, 11, 629}, + dictWord{11, 11, 593}, + dictWord{11, 11, 763}, + dictWord{ + 13, + 11, + 326, + }, + dictWord{145, 11, 35}, + dictWord{134, 11, 105}, + dictWord{9, 11, 320}, + dictWord{10, 11, 506}, + dictWord{138, 11, 794}, + dictWord{5, 11, 114}, + dictWord{5, 11, 255}, + dictWord{141, 11, 285}, + dictWord{140, 0, 290}, + dictWord{7, 11, 2035}, + dictWord{8, 11, 19}, + dictWord{9, 11, 89}, + dictWord{138, 11, 831}, + dictWord{134, 0, 1136}, + dictWord{7, 0, 719}, + dictWord{8, 0, 796}, + dictWord{8, 0, 809}, + dictWord{8, 0, 834}, + dictWord{6, 10, 306}, + dictWord{7, 10, 1140}, + dictWord{ + 7, + 10, + 1340, + }, + dictWord{8, 10, 133}, + dictWord{138, 10, 449}, + dictWord{139, 10, 1011}, + dictWord{5, 0, 210}, + dictWord{6, 0, 213}, + dictWord{7, 0, 60}, + dictWord{ + 10, + 0, + 364, + }, + dictWord{139, 0, 135}, + dictWord{5, 0, 607}, + dictWord{8, 0, 326}, + dictWord{136, 0, 490}, + dictWord{138, 11, 176}, + dictWord{132, 0, 701}, + dictWord{ + 5, + 0, + 472, + }, + dictWord{7, 0, 380}, + dictWord{137, 0, 758}, + dictWord{135, 0, 1947}, + dictWord{6, 0, 1079}, + dictWord{138, 0, 278}, + dictWord{138, 11, 391}, + dictWord{ + 5, + 10, + 329, + }, + dictWord{8, 10, 260}, + dictWord{139, 11, 156}, + dictWord{4, 0, 386}, + dictWord{7, 0, 41}, + dictWord{8, 0, 405}, + dictWord{8, 0, 728}, + dictWord{9, 0, 497}, + dictWord{11, 0, 110}, + dictWord{11, 0, 360}, + dictWord{15, 0, 37}, + dictWord{144, 0, 84}, + dictWord{5, 0, 46}, + dictWord{7, 0, 1452}, + dictWord{7, 0, 1480}, + dictWord{ + 8, + 0, + 634, + }, + dictWord{140, 0, 472}, + dictWord{136, 0, 961}, + dictWord{4, 0, 524}, + dictWord{136, 0, 810}, + dictWord{10, 0, 238}, + dictWord{141, 0, 33}, + dictWord{ + 132, + 10, + 657, + }, + dictWord{152, 10, 7}, + dictWord{133, 0, 532}, + dictWord{5, 0, 997}, + dictWord{135, 10, 1665}, + dictWord{7, 11, 594}, + dictWord{7, 11, 851}, + dictWord{ + 7, + 11, + 1858, + }, + dictWord{9, 11, 411}, + dictWord{9, 11, 574}, + dictWord{9, 11, 666}, + dictWord{9, 11, 737}, + dictWord{10, 11, 346}, + dictWord{10, 11, 712}, + dictWord{ + 11, + 11, + 246, + }, + dictWord{11, 11, 432}, + dictWord{11, 11, 517}, + dictWord{11, 11, 647}, + dictWord{11, 11, 679}, + dictWord{11, 11, 727}, + dictWord{12, 11, 304}, + dictWord{12, 11, 305}, + dictWord{12, 11, 323}, + dictWord{12, 11, 483}, + dictWord{12, 11, 572}, + dictWord{12, 11, 593}, + dictWord{12, 11, 602}, + dictWord{ + 13, + 11, + 95, + }, + dictWord{13, 11, 101}, + dictWord{13, 11, 171}, + dictWord{13, 11, 315}, + dictWord{13, 11, 378}, + dictWord{13, 11, 425}, + dictWord{13, 11, 475}, + dictWord{ + 14, + 11, + 63, + }, + dictWord{14, 11, 380}, + dictWord{14, 11, 384}, + dictWord{15, 11, 133}, + dictWord{18, 11, 112}, + dictWord{148, 11, 72}, + dictWord{5, 11, 955}, + dictWord{136, 11, 814}, + dictWord{134, 0, 1301}, + dictWord{5, 10, 66}, + dictWord{7, 10, 1896}, + dictWord{136, 10, 288}, + dictWord{133, 11, 56}, + dictWord{ + 134, + 10, + 1643, + }, + dictWord{6, 0, 1298}, + dictWord{148, 11, 100}, + dictWord{5, 0, 782}, + dictWord{5, 0, 829}, + dictWord{6, 0, 671}, + dictWord{6, 0, 1156}, + dictWord{6, 0, 1738}, + dictWord{137, 11, 621}, + dictWord{4, 0, 306}, + dictWord{5, 0, 570}, + dictWord{7, 0, 1347}, + dictWord{5, 10, 91}, + dictWord{5, 10, 648}, + dictWord{5, 10, 750}, + dictWord{ + 5, + 10, + 781, + }, + dictWord{6, 10, 54}, + dictWord{6, 10, 112}, + dictWord{6, 10, 402}, + dictWord{6, 10, 1732}, + dictWord{7, 10, 315}, + dictWord{7, 10, 749}, + dictWord{ + 7, + 10, + 1900, + }, + dictWord{9, 10, 78}, + dictWord{9, 10, 508}, + dictWord{10, 10, 611}, + dictWord{10, 10, 811}, + dictWord{11, 10, 510}, + dictWord{11, 10, 728}, + dictWord{ + 13, + 10, + 36, + }, + dictWord{14, 10, 39}, + dictWord{16, 10, 83}, + dictWord{17, 10, 124}, + dictWord{148, 10, 30}, + dictWord{8, 10, 570}, + dictWord{9, 11, 477}, + dictWord{ + 141, + 11, + 78, + }, + dictWord{4, 11, 639}, + dictWord{10, 11, 4}, + dictWord{10, 10, 322}, + dictWord{10, 10, 719}, + dictWord{11, 10, 407}, + dictWord{11, 11, 638}, + dictWord{ + 12, + 11, + 177, + }, + dictWord{148, 11, 57}, + dictWord{7, 0, 1823}, + dictWord{139, 0, 693}, + dictWord{7, 0, 759}, + dictWord{5, 11, 758}, + dictWord{8, 10, 125}, + dictWord{ + 8, + 10, + 369, + }, + dictWord{8, 10, 524}, + dictWord{10, 10, 486}, + dictWord{11, 10, 13}, + dictWord{11, 10, 381}, + dictWord{11, 10, 736}, + dictWord{11, 10, 766}, + dictWord{ + 11, + 10, + 845, + }, + dictWord{13, 10, 114}, + dictWord{13, 10, 292}, + dictWord{142, 10, 47}, + dictWord{7, 0, 1932}, + dictWord{6, 10, 1684}, + dictWord{6, 10, 1731}, + dictWord{7, 10, 356}, + dictWord{8, 10, 54}, + dictWord{8, 10, 221}, + dictWord{9, 10, 225}, + dictWord{9, 10, 356}, + dictWord{10, 10, 77}, + dictWord{10, 10, 446}, + dictWord{ + 10, + 10, + 731, + }, + dictWord{12, 10, 404}, + dictWord{141, 10, 491}, + dictWord{135, 11, 552}, + dictWord{135, 11, 1112}, + dictWord{4, 0, 78}, + dictWord{5, 0, 96}, + dictWord{ + 5, + 0, + 182, + }, + dictWord{6, 0, 1257}, + dictWord{7, 0, 1724}, + dictWord{7, 0, 1825}, + dictWord{10, 0, 394}, + dictWord{10, 0, 471}, + dictWord{11, 0, 532}, + dictWord{ + 14, + 0, + 340, + }, + dictWord{145, 0, 88}, + dictWord{139, 11, 328}, + dictWord{135, 0, 1964}, + dictWord{132, 10, 411}, + dictWord{4, 10, 80}, + dictWord{5, 10, 44}, + dictWord{ + 137, + 11, + 133, + }, + dictWord{5, 11, 110}, + dictWord{6, 11, 169}, + dictWord{6, 11, 1702}, + dictWord{7, 11, 400}, + dictWord{8, 11, 538}, + dictWord{9, 11, 184}, + dictWord{ + 9, + 11, + 524, + }, + dictWord{140, 11, 218}, + dictWord{4, 0, 521}, + dictWord{5, 10, 299}, + dictWord{7, 10, 1083}, + dictWord{140, 11, 554}, + dictWord{6, 11, 133}, + dictWord{ + 9, + 11, + 353, + }, + dictWord{12, 11, 628}, + dictWord{146, 11, 79}, + dictWord{6, 0, 215}, + dictWord{7, 0, 584}, + dictWord{7, 0, 1028}, + dictWord{7, 0, 1473}, + dictWord{ + 7, + 0, + 1721, + }, + dictWord{9, 0, 424}, + dictWord{138, 0, 779}, + dictWord{7, 0, 857}, + dictWord{7, 0, 1209}, + dictWord{7, 10, 1713}, + dictWord{9, 10, 537}, + dictWord{ + 10, + 10, + 165, + }, + dictWord{12, 10, 219}, + dictWord{140, 10, 561}, + dictWord{4, 10, 219}, + dictWord{6, 11, 93}, + dictWord{7, 11, 1422}, + dictWord{7, 10, 1761}, + dictWord{ + 7, + 11, + 1851, + }, + dictWord{8, 11, 673}, + dictWord{9, 10, 86}, + dictWord{9, 11, 529}, + dictWord{140, 11, 43}, + dictWord{137, 11, 371}, + dictWord{136, 0, 671}, + dictWord{ + 5, + 0, + 328, + }, + dictWord{135, 0, 918}, + dictWord{132, 0, 529}, + dictWord{9, 11, 25}, + dictWord{10, 11, 467}, + dictWord{138, 11, 559}, + dictWord{4, 11, 335}, + dictWord{ + 135, + 11, + 942, + }, + dictWord{134, 0, 716}, + dictWord{134, 0, 1509}, + dictWord{6, 0, 67}, + dictWord{7, 0, 258}, + dictWord{7, 0, 1630}, + dictWord{9, 0, 354}, + dictWord{ + 9, + 0, + 675, + }, + dictWord{10, 0, 830}, + dictWord{14, 0, 80}, + dictWord{17, 0, 80}, + dictWord{140, 10, 428}, + dictWord{134, 0, 1112}, + dictWord{6, 0, 141}, + dictWord{7, 0, 225}, + dictWord{9, 0, 59}, + dictWord{9, 0, 607}, + dictWord{10, 0, 312}, + dictWord{11, 0, 687}, + dictWord{12, 0, 555}, + dictWord{13, 0, 373}, + dictWord{13, 0, 494}, + dictWord{ + 148, + 0, + 58, + }, + dictWord{133, 10, 514}, + dictWord{8, 11, 39}, + dictWord{10, 11, 773}, + dictWord{11, 11, 84}, + dictWord{12, 11, 205}, + dictWord{142, 11, 1}, + dictWord{ + 8, + 0, + 783, + }, + dictWord{5, 11, 601}, + dictWord{133, 11, 870}, + dictWord{136, 11, 594}, + dictWord{4, 10, 55}, + dictWord{5, 10, 301}, + dictWord{6, 10, 571}, + dictWord{ + 14, + 10, + 49, + }, + dictWord{146, 10, 102}, + dictWord{132, 11, 181}, + dictWord{134, 11, 1652}, + dictWord{133, 10, 364}, + dictWord{4, 11, 97}, + dictWord{5, 11, 147}, + dictWord{6, 11, 286}, + dictWord{7, 11, 1362}, + dictWord{141, 11, 176}, + dictWord{4, 10, 76}, + dictWord{7, 10, 1550}, + dictWord{9, 10, 306}, + dictWord{9, 10, 430}, + dictWord{9, 10, 663}, + dictWord{10, 10, 683}, + dictWord{11, 10, 427}, + dictWord{11, 10, 753}, + dictWord{12, 10, 334}, + dictWord{12, 10, 442}, + dictWord{ + 14, + 10, + 258, + }, + dictWord{14, 10, 366}, + dictWord{143, 10, 131}, + dictWord{137, 10, 52}, + dictWord{6, 0, 955}, + dictWord{134, 0, 1498}, + dictWord{6, 11, 375}, + dictWord{ + 7, + 11, + 169, + }, + dictWord{7, 11, 254}, + dictWord{136, 11, 780}, + dictWord{7, 0, 430}, + dictWord{11, 0, 46}, + dictWord{14, 0, 343}, + dictWord{142, 11, 343}, + dictWord{ + 135, + 0, + 1183, + }, + dictWord{5, 0, 602}, + dictWord{7, 0, 2018}, + dictWord{9, 0, 418}, + dictWord{9, 0, 803}, + dictWord{135, 11, 1447}, + dictWord{8, 0, 677}, + dictWord{ + 135, + 11, + 1044, + }, + dictWord{139, 11, 285}, + dictWord{4, 10, 656}, + dictWord{135, 10, 779}, + dictWord{135, 10, 144}, + dictWord{5, 11, 629}, + dictWord{ + 135, + 11, + 1549, + }, + dictWord{135, 10, 1373}, + dictWord{138, 11, 209}, + dictWord{7, 10, 554}, + dictWord{7, 10, 605}, + dictWord{141, 10, 10}, + dictWord{5, 10, 838}, + dictWord{ + 5, + 10, + 841, + }, + dictWord{134, 10, 1649}, + dictWord{133, 10, 1012}, + dictWord{6, 0, 1357}, + dictWord{134, 0, 1380}, + dictWord{144, 0, 53}, + dictWord{6, 0, 590}, + dictWord{7, 10, 365}, + dictWord{7, 10, 1357}, + dictWord{7, 10, 1497}, + dictWord{8, 10, 154}, + dictWord{141, 10, 281}, + dictWord{133, 10, 340}, + dictWord{ + 132, + 11, + 420, + }, + dictWord{135, 0, 329}, + dictWord{147, 11, 32}, + dictWord{4, 0, 469}, + dictWord{10, 11, 429}, + dictWord{139, 10, 495}, + dictWord{8, 10, 261}, + dictWord{ + 9, + 10, + 144, + }, + dictWord{9, 10, 466}, + dictWord{10, 10, 370}, + dictWord{12, 10, 470}, + dictWord{13, 10, 144}, + dictWord{142, 10, 348}, + dictWord{142, 0, 460}, + dictWord{4, 11, 325}, + dictWord{9, 10, 897}, + dictWord{138, 11, 125}, + dictWord{6, 0, 1743}, + dictWord{6, 10, 248}, + dictWord{9, 10, 546}, + dictWord{10, 10, 535}, + dictWord{11, 10, 681}, + dictWord{141, 10, 135}, + dictWord{4, 0, 990}, + dictWord{5, 0, 929}, + dictWord{6, 0, 340}, + dictWord{8, 0, 376}, + dictWord{8, 0, 807}, + dictWord{ + 8, + 0, + 963, + }, + dictWord{8, 0, 980}, + dictWord{138, 0, 1007}, + dictWord{134, 0, 1603}, + dictWord{140, 0, 250}, + dictWord{4, 11, 714}, + dictWord{133, 11, 469}, + dictWord{134, 10, 567}, + dictWord{136, 10, 445}, + dictWord{5, 0, 218}, + dictWord{7, 0, 1610}, + dictWord{8, 0, 646}, + dictWord{10, 0, 83}, + dictWord{11, 11, 138}, + dictWord{140, 11, 40}, + dictWord{7, 0, 1512}, + dictWord{135, 0, 1794}, + dictWord{135, 11, 1216}, + dictWord{11, 0, 0}, + dictWord{16, 0, 78}, + dictWord{132, 11, 718}, + dictWord{133, 0, 571}, + dictWord{132, 0, 455}, + dictWord{134, 0, 1012}, + dictWord{5, 11, 124}, + dictWord{5, 11, 144}, + dictWord{6, 11, 548}, + dictWord{7, 11, 15}, + dictWord{7, 11, 153}, + dictWord{137, 11, 629}, + dictWord{142, 11, 10}, + dictWord{6, 11, 75}, + dictWord{7, 11, 1531}, + dictWord{8, 11, 416}, + dictWord{9, 11, 240}, + dictWord{9, 11, 275}, + dictWord{10, 11, 100}, + dictWord{11, 11, 658}, + dictWord{11, 11, 979}, + dictWord{12, 11, 86}, + dictWord{13, 11, 468}, + dictWord{14, 11, 66}, + dictWord{14, 11, 207}, + dictWord{15, 11, 20}, + dictWord{15, 11, 25}, + dictWord{144, 11, 58}, + dictWord{132, 10, 577}, + dictWord{5, 11, 141}, + dictWord{ + 5, + 11, + 915, + }, + dictWord{6, 11, 1783}, + dictWord{7, 11, 211}, + dictWord{7, 11, 698}, + dictWord{7, 11, 1353}, + dictWord{9, 11, 83}, + dictWord{9, 11, 281}, + dictWord{ + 10, + 11, + 376, + }, + dictWord{10, 11, 431}, + dictWord{11, 11, 543}, + dictWord{12, 11, 664}, + dictWord{13, 11, 280}, + dictWord{13, 11, 428}, + dictWord{14, 11, 61}, + dictWord{ + 14, + 11, + 128, + }, + dictWord{17, 11, 52}, + dictWord{145, 11, 81}, + dictWord{6, 0, 161}, + dictWord{7, 0, 372}, + dictWord{137, 0, 597}, + dictWord{132, 0, 349}, + dictWord{ + 10, + 11, + 702, + }, + dictWord{139, 11, 245}, + dictWord{134, 0, 524}, + dictWord{134, 10, 174}, + dictWord{6, 0, 432}, + dictWord{9, 0, 751}, + dictWord{139, 0, 322}, + dictWord{147, 11, 94}, + dictWord{4, 11, 338}, + dictWord{133, 11, 400}, + dictWord{5, 0, 468}, + dictWord{10, 0, 325}, + dictWord{11, 0, 856}, + dictWord{12, 0, 345}, + dictWord{143, 0, 104}, + dictWord{133, 0, 223}, + dictWord{132, 0, 566}, + dictWord{4, 11, 221}, + dictWord{5, 11, 659}, + dictWord{5, 11, 989}, + dictWord{7, 11, 697}, + dictWord{7, 11, 1211}, + dictWord{138, 11, 284}, + dictWord{135, 11, 1070}, + dictWord{4, 0, 59}, + dictWord{135, 0, 1394}, + dictWord{6, 0, 436}, + dictWord{11, 0, 481}, + dictWord{5, 10, 878}, + dictWord{133, 10, 972}, + dictWord{4, 0, 48}, + dictWord{5, 0, 271}, + dictWord{135, 0, 953}, + dictWord{5, 0, 610}, + dictWord{136, 0, 457}, + dictWord{ + 4, + 0, + 773, + }, + dictWord{5, 0, 618}, + dictWord{137, 0, 756}, + dictWord{133, 0, 755}, + dictWord{135, 0, 1217}, + dictWord{138, 11, 507}, + dictWord{132, 10, 351}, + dictWord{132, 0, 197}, + dictWord{143, 11, 78}, + dictWord{4, 11, 188}, + dictWord{7, 11, 805}, + dictWord{11, 11, 276}, + dictWord{142, 11, 293}, + dictWord{ + 5, + 11, + 884, + }, + dictWord{139, 11, 991}, + dictWord{132, 10, 286}, + dictWord{10, 0, 259}, + dictWord{10, 0, 428}, + dictWord{7, 10, 438}, + dictWord{7, 10, 627}, + dictWord{ + 7, + 10, + 1516, + }, + dictWord{8, 10, 40}, + dictWord{9, 10, 56}, + dictWord{9, 10, 294}, + dictWord{11, 10, 969}, + dictWord{11, 10, 995}, + dictWord{146, 10, 148}, + dictWord{ + 4, + 0, + 356, + }, + dictWord{5, 0, 217}, + dictWord{5, 0, 492}, + dictWord{5, 0, 656}, + dictWord{8, 0, 544}, + dictWord{136, 11, 544}, + dictWord{5, 0, 259}, + dictWord{6, 0, 1230}, + dictWord{7, 0, 414}, + dictWord{7, 0, 854}, + dictWord{142, 0, 107}, + dictWord{132, 0, 1007}, + dictWord{15, 0, 14}, + dictWord{144, 0, 5}, + dictWord{6, 0, 1580}, + dictWord{ + 132, + 10, + 738, + }, + dictWord{132, 11, 596}, + dictWord{132, 0, 673}, + dictWord{133, 10, 866}, + dictWord{6, 0, 1843}, + dictWord{135, 11, 1847}, + dictWord{4, 0, 165}, + dictWord{7, 0, 1398}, + dictWord{135, 0, 1829}, + dictWord{135, 11, 1634}, + dictWord{147, 11, 65}, + dictWord{6, 0, 885}, + dictWord{6, 0, 1009}, + dictWord{ + 137, + 0, + 809, + }, + dictWord{133, 10, 116}, + dictWord{132, 10, 457}, + dictWord{136, 11, 770}, + dictWord{9, 0, 498}, + dictWord{12, 0, 181}, + dictWord{10, 11, 361}, + dictWord{142, 11, 316}, + dictWord{134, 11, 595}, + dictWord{5, 0, 9}, + dictWord{7, 0, 297}, + dictWord{7, 0, 966}, + dictWord{140, 0, 306}, + dictWord{4, 11, 89}, + dictWord{ + 5, + 11, + 489, + }, + dictWord{6, 11, 315}, + dictWord{7, 11, 553}, + dictWord{7, 11, 1745}, + dictWord{138, 11, 243}, + dictWord{134, 0, 1487}, + dictWord{132, 0, 437}, + dictWord{ + 5, + 0, + 146, + }, + dictWord{6, 0, 411}, + dictWord{138, 0, 721}, + dictWord{5, 10, 527}, + dictWord{6, 10, 189}, + dictWord{135, 10, 859}, + dictWord{11, 10, 104}, + dictWord{ + 11, + 10, + 554, + }, + dictWord{15, 10, 60}, + dictWord{143, 10, 125}, + dictWord{6, 11, 1658}, + dictWord{9, 11, 3}, + dictWord{10, 11, 154}, + dictWord{11, 11, 641}, + dictWord{13, 11, 85}, + dictWord{13, 11, 201}, + dictWord{141, 11, 346}, + dictWord{6, 0, 177}, + dictWord{135, 0, 467}, + dictWord{134, 0, 1377}, + dictWord{ + 134, + 10, + 116, + }, + dictWord{136, 11, 645}, + dictWord{4, 11, 166}, + dictWord{5, 11, 505}, + dictWord{6, 11, 1670}, + dictWord{137, 11, 110}, + dictWord{133, 10, 487}, + dictWord{ + 4, + 10, + 86, + }, + dictWord{5, 10, 667}, + dictWord{5, 10, 753}, + dictWord{6, 10, 316}, + dictWord{6, 10, 455}, + dictWord{135, 10, 946}, + dictWord{133, 0, 200}, + dictWord{132, 0, 959}, + dictWord{6, 0, 1928}, + dictWord{134, 0, 1957}, + dictWord{139, 11, 203}, + dictWord{150, 10, 45}, + dictWord{4, 10, 79}, + dictWord{7, 10, 1773}, + dictWord{10, 10, 450}, + dictWord{11, 10, 589}, + dictWord{13, 10, 332}, + dictWord{13, 10, 493}, + dictWord{14, 10, 183}, + dictWord{14, 10, 334}, + dictWord{ + 14, + 10, + 362, + }, + dictWord{14, 10, 368}, + dictWord{14, 10, 376}, + dictWord{14, 10, 379}, + dictWord{19, 10, 90}, + dictWord{19, 10, 103}, + dictWord{19, 10, 127}, + dictWord{148, 10, 90}, + dictWord{6, 0, 1435}, + dictWord{135, 11, 1275}, + dictWord{134, 0, 481}, + dictWord{7, 11, 445}, + dictWord{8, 11, 307}, + dictWord{8, 11, 704}, + dictWord{10, 11, 41}, + dictWord{10, 11, 439}, + dictWord{11, 11, 237}, + dictWord{11, 11, 622}, + dictWord{140, 11, 201}, + dictWord{135, 11, 869}, + dictWord{ + 4, + 0, + 84, + }, + dictWord{7, 0, 1482}, + dictWord{10, 0, 76}, + dictWord{138, 0, 142}, + dictWord{11, 11, 277}, + dictWord{144, 11, 14}, + dictWord{135, 11, 1977}, + dictWord{ + 4, + 11, + 189, + }, + dictWord{5, 11, 713}, + dictWord{136, 11, 57}, + dictWord{133, 0, 1015}, + dictWord{138, 11, 371}, + dictWord{4, 0, 315}, + dictWord{5, 0, 507}, + dictWord{ + 135, + 0, + 1370, + }, + dictWord{4, 11, 552}, + dictWord{142, 10, 381}, + dictWord{9, 0, 759}, + dictWord{16, 0, 31}, + dictWord{16, 0, 39}, + dictWord{16, 0, 75}, + dictWord{18, 0, 24}, + dictWord{20, 0, 42}, + dictWord{152, 0, 1}, + dictWord{134, 0, 712}, + dictWord{134, 0, 1722}, + dictWord{133, 10, 663}, + dictWord{133, 10, 846}, + dictWord{ + 8, + 0, + 222, + }, + dictWord{8, 0, 476}, + dictWord{9, 0, 238}, + dictWord{11, 0, 516}, + dictWord{11, 0, 575}, + dictWord{15, 0, 109}, + dictWord{146, 0, 100}, + dictWord{7, 0, 1402}, + dictWord{7, 0, 1414}, + dictWord{12, 0, 456}, + dictWord{5, 10, 378}, + dictWord{8, 10, 465}, + dictWord{9, 10, 286}, + dictWord{10, 10, 185}, + dictWord{10, 10, 562}, + dictWord{10, 10, 635}, + dictWord{11, 10, 31}, + dictWord{11, 10, 393}, + dictWord{13, 10, 312}, + dictWord{18, 10, 65}, + dictWord{18, 10, 96}, + dictWord{147, 10, 89}, + dictWord{4, 0, 986}, + dictWord{6, 0, 1958}, + dictWord{6, 0, 2032}, + dictWord{8, 0, 934}, + dictWord{138, 0, 985}, + dictWord{7, 10, 1880}, + dictWord{9, 10, 680}, + dictWord{139, 10, 798}, + dictWord{134, 10, 1770}, + dictWord{145, 11, 49}, + dictWord{132, 11, 614}, + dictWord{132, 10, 648}, + dictWord{5, 10, 945}, + dictWord{ + 6, + 10, + 1656, + }, + dictWord{6, 10, 1787}, + dictWord{7, 10, 167}, + dictWord{8, 10, 824}, + dictWord{9, 10, 391}, + dictWord{10, 10, 375}, + dictWord{139, 10, 185}, + dictWord{138, 11, 661}, + dictWord{7, 0, 1273}, + dictWord{135, 11, 1945}, + dictWord{7, 0, 706}, + dictWord{7, 0, 1058}, + dictWord{138, 0, 538}, + dictWord{7, 10, 1645}, + dictWord{8, 10, 352}, + dictWord{137, 10, 249}, + dictWord{132, 10, 152}, + dictWord{11, 0, 92}, + dictWord{11, 0, 196}, + dictWord{11, 0, 409}, + dictWord{11, 0, 450}, + dictWord{11, 0, 666}, + dictWord{11, 0, 777}, + dictWord{12, 0, 262}, + dictWord{13, 0, 385}, + dictWord{13, 0, 393}, + dictWord{15, 0, 115}, + dictWord{16, 0, 45}, + dictWord{145, 0, 82}, + dictWord{133, 10, 1006}, + dictWord{6, 0, 40}, + dictWord{135, 0, 1781}, + dictWord{9, 11, 614}, + dictWord{139, 11, 327}, + dictWord{5, 10, 420}, + dictWord{135, 10, 1449}, + dictWord{135, 0, 431}, + dictWord{10, 0, 97}, + dictWord{135, 10, 832}, + dictWord{6, 0, 423}, + dictWord{7, 0, 665}, + dictWord{ + 135, + 0, + 1210, + }, + dictWord{7, 0, 237}, + dictWord{8, 0, 664}, + dictWord{9, 0, 42}, + dictWord{9, 0, 266}, + dictWord{9, 0, 380}, + dictWord{9, 0, 645}, + dictWord{10, 0, 177}, + dictWord{ + 138, + 0, + 276, + }, + dictWord{7, 0, 264}, + dictWord{133, 10, 351}, + dictWord{8, 0, 213}, + dictWord{5, 10, 40}, + dictWord{7, 10, 598}, + dictWord{7, 10, 1638}, + dictWord{ + 9, + 10, + 166, + }, + dictWord{9, 10, 640}, + dictWord{9, 10, 685}, + dictWord{9, 10, 773}, + dictWord{11, 10, 215}, + dictWord{13, 10, 65}, + dictWord{14, 10, 172}, + dictWord{ + 14, + 10, + 317, + }, + dictWord{145, 10, 6}, + dictWord{5, 11, 84}, + dictWord{134, 11, 163}, + dictWord{8, 10, 60}, + dictWord{9, 10, 343}, + dictWord{139, 10, 769}, + dictWord{ + 137, + 0, + 455, + }, + dictWord{133, 11, 410}, + dictWord{8, 0, 906}, + dictWord{12, 0, 700}, + dictWord{12, 0, 706}, + dictWord{140, 0, 729}, + dictWord{21, 11, 33}, + dictWord{ + 150, + 11, + 40, + }, + dictWord{7, 10, 1951}, + dictWord{8, 10, 765}, + dictWord{8, 10, 772}, + dictWord{140, 10, 671}, + dictWord{7, 10, 108}, + dictWord{8, 10, 219}, + dictWord{ + 8, + 10, + 388, + }, + dictWord{9, 10, 639}, + dictWord{9, 10, 775}, + dictWord{11, 10, 275}, + dictWord{140, 10, 464}, + dictWord{5, 11, 322}, + dictWord{7, 11, 1941}, + dictWord{ + 8, + 11, + 186, + }, + dictWord{9, 11, 262}, + dictWord{10, 11, 187}, + dictWord{14, 11, 208}, + dictWord{146, 11, 130}, + dictWord{139, 0, 624}, + dictWord{8, 0, 574}, + dictWord{ + 5, + 11, + 227, + }, + dictWord{140, 11, 29}, + dictWord{7, 11, 1546}, + dictWord{11, 11, 299}, + dictWord{142, 11, 407}, + dictWord{5, 10, 15}, + dictWord{6, 10, 56}, + dictWord{ + 7, + 10, + 1758, + }, + dictWord{8, 10, 500}, + dictWord{9, 10, 730}, + dictWord{11, 10, 331}, + dictWord{13, 10, 150}, + dictWord{142, 10, 282}, + dictWord{7, 11, 1395}, + dictWord{8, 11, 486}, + dictWord{9, 11, 236}, + dictWord{9, 11, 878}, + dictWord{10, 11, 218}, + dictWord{11, 11, 95}, + dictWord{19, 11, 17}, + dictWord{147, 11, 31}, + dictWord{135, 11, 2043}, + dictWord{4, 0, 354}, + dictWord{146, 11, 4}, + dictWord{140, 11, 80}, + dictWord{135, 0, 1558}, + dictWord{134, 10, 1886}, + dictWord{ + 5, + 10, + 205, + }, + dictWord{6, 10, 438}, + dictWord{137, 10, 711}, + dictWord{133, 11, 522}, + dictWord{133, 10, 534}, + dictWord{7, 0, 235}, + dictWord{7, 0, 1475}, + dictWord{ + 15, + 0, + 68, + }, + dictWord{146, 0, 120}, + dictWord{137, 10, 691}, + dictWord{4, 0, 942}, + dictWord{6, 0, 1813}, + dictWord{8, 0, 917}, + dictWord{10, 0, 884}, + dictWord{ + 12, + 0, + 696, + }, + dictWord{12, 0, 717}, + dictWord{12, 0, 723}, + dictWord{12, 0, 738}, + dictWord{12, 0, 749}, + dictWord{12, 0, 780}, + dictWord{16, 0, 97}, + dictWord{146, 0, 169}, + dictWord{6, 10, 443}, + dictWord{8, 11, 562}, + dictWord{9, 10, 237}, + dictWord{9, 10, 571}, + dictWord{9, 10, 695}, + dictWord{10, 10, 139}, + dictWord{11, 10, 715}, + dictWord{12, 10, 417}, + dictWord{141, 10, 421}, + dictWord{135, 0, 957}, + dictWord{133, 0, 830}, + dictWord{134, 11, 1771}, + dictWord{146, 0, 23}, + dictWord{ + 5, + 0, + 496, + }, + dictWord{6, 0, 694}, + dictWord{7, 0, 203}, + dictWord{7, 11, 1190}, + dictWord{137, 11, 620}, + dictWord{137, 11, 132}, + dictWord{6, 0, 547}, + dictWord{ + 134, + 0, + 1549, + }, + dictWord{8, 11, 258}, + dictWord{9, 11, 208}, + dictWord{137, 11, 359}, + dictWord{4, 0, 864}, + dictWord{5, 0, 88}, + dictWord{137, 0, 239}, + dictWord{ + 135, + 11, + 493, + }, + dictWord{4, 11, 317}, + dictWord{135, 11, 1279}, + dictWord{132, 11, 477}, + dictWord{4, 10, 578}, + dictWord{5, 11, 63}, + dictWord{133, 11, 509}, + dictWord{ + 7, + 0, + 650, + }, + dictWord{135, 0, 1310}, + dictWord{7, 0, 1076}, + dictWord{9, 0, 80}, + dictWord{11, 0, 78}, + dictWord{11, 0, 421}, + dictWord{11, 0, 534}, + dictWord{ + 140, + 0, + 545, + }, + dictWord{132, 11, 288}, + dictWord{12, 0, 553}, + dictWord{14, 0, 118}, + dictWord{133, 10, 923}, + dictWord{7, 0, 274}, + dictWord{11, 0, 479}, + dictWord{ + 139, + 0, + 507, + }, + dictWord{8, 11, 89}, + dictWord{8, 11, 620}, + dictWord{9, 11, 49}, + dictWord{10, 11, 774}, + dictWord{11, 11, 628}, + dictWord{12, 11, 322}, + dictWord{ + 143, + 11, + 124, + }, + dictWord{4, 0, 497}, + dictWord{135, 0, 1584}, + dictWord{7, 0, 261}, + dictWord{7, 0, 1115}, + dictWord{7, 0, 1354}, + dictWord{7, 0, 1404}, + dictWord{ + 7, + 0, + 1588, + }, + dictWord{7, 0, 1705}, + dictWord{7, 0, 1902}, + dictWord{9, 0, 465}, + dictWord{10, 0, 248}, + dictWord{10, 0, 349}, + dictWord{10, 0, 647}, + dictWord{11, 0, 527}, + dictWord{11, 0, 660}, + dictWord{11, 0, 669}, + dictWord{12, 0, 529}, + dictWord{13, 0, 305}, + dictWord{132, 10, 924}, + dictWord{133, 10, 665}, + dictWord{ + 136, + 0, + 13, + }, + dictWord{6, 0, 791}, + dictWord{138, 11, 120}, + dictWord{7, 0, 642}, + dictWord{8, 0, 250}, + dictWord{11, 0, 123}, + dictWord{11, 0, 137}, + dictWord{13, 0, 48}, + dictWord{142, 0, 95}, + dictWord{4, 10, 265}, + dictWord{7, 10, 807}, + dictWord{135, 10, 950}, + dictWord{5, 10, 93}, + dictWord{140, 10, 267}, + dictWord{135, 0, 1429}, + dictWord{4, 0, 949}, + dictWord{10, 0, 885}, + dictWord{10, 0, 891}, + dictWord{10, 0, 900}, + dictWord{10, 0, 939}, + dictWord{12, 0, 760}, + dictWord{142, 0, 449}, + dictWord{139, 11, 366}, + dictWord{132, 0, 818}, + dictWord{134, 11, 85}, + dictWord{135, 10, 994}, + dictWord{7, 0, 330}, + dictWord{5, 10, 233}, + dictWord{5, 10, 320}, + dictWord{6, 10, 140}, + dictWord{136, 10, 295}, + dictWord{4, 0, 1004}, + dictWord{8, 0, 982}, + dictWord{136, 0, 993}, + dictWord{133, 10, 978}, + dictWord{4, 10, 905}, + dictWord{6, 10, 1701}, + dictWord{137, 10, 843}, + dictWord{10, 0, 545}, + dictWord{140, 0, 301}, + dictWord{6, 0, 947}, + dictWord{134, 0, 1062}, + dictWord{ + 134, + 0, + 1188, + }, + dictWord{4, 0, 904}, + dictWord{5, 0, 794}, + dictWord{152, 10, 6}, + dictWord{134, 0, 1372}, + dictWord{135, 11, 608}, + dictWord{5, 11, 279}, + dictWord{ + 6, + 11, + 235, + }, + dictWord{7, 11, 468}, + dictWord{8, 11, 446}, + dictWord{9, 11, 637}, + dictWord{10, 11, 717}, + dictWord{11, 11, 738}, + dictWord{140, 11, 514}, + dictWord{ + 132, + 10, + 509, + }, + dictWord{5, 11, 17}, + dictWord{6, 11, 371}, + dictWord{137, 11, 528}, + dictWord{132, 0, 693}, + dictWord{4, 11, 115}, + dictWord{5, 11, 669}, + dictWord{ + 6, + 11, + 407, + }, + dictWord{8, 11, 311}, + dictWord{11, 11, 10}, + dictWord{141, 11, 5}, + dictWord{11, 0, 377}, + dictWord{7, 10, 273}, + dictWord{137, 11, 381}, + dictWord{ + 135, + 0, + 695, + }, + dictWord{7, 0, 386}, + dictWord{138, 0, 713}, + dictWord{135, 10, 1041}, + dictWord{134, 0, 1291}, + dictWord{6, 0, 7}, + dictWord{6, 0, 35}, + dictWord{ + 7, + 0, + 147, + }, + dictWord{7, 0, 1069}, + dictWord{7, 0, 1568}, + dictWord{7, 0, 1575}, + dictWord{7, 0, 1917}, + dictWord{8, 0, 43}, + dictWord{8, 0, 208}, + dictWord{9, 0, 128}, + dictWord{ + 9, + 0, + 866, + }, + dictWord{10, 0, 20}, + dictWord{11, 0, 981}, + dictWord{147, 0, 33}, + dictWord{7, 0, 893}, + dictWord{141, 0, 424}, + dictWord{139, 10, 234}, + dictWord{ + 150, + 11, + 56, + }, + dictWord{5, 11, 779}, + dictWord{5, 11, 807}, + dictWord{6, 11, 1655}, + dictWord{134, 11, 1676}, + dictWord{5, 10, 802}, + dictWord{7, 10, 2021}, + dictWord{136, 10, 805}, + dictWord{4, 11, 196}, + dictWord{5, 10, 167}, + dictWord{5, 11, 558}, + dictWord{5, 10, 899}, + dictWord{5, 11, 949}, + dictWord{6, 10, 410}, + dictWord{137, 10, 777}, + dictWord{137, 10, 789}, + dictWord{134, 10, 1705}, + dictWord{8, 0, 904}, + dictWord{140, 0, 787}, + dictWord{6, 0, 322}, + dictWord{9, 0, 552}, + dictWord{11, 0, 274}, + dictWord{13, 0, 209}, + dictWord{13, 0, 499}, + dictWord{14, 0, 85}, + dictWord{15, 0, 126}, + dictWord{145, 0, 70}, + dictWord{135, 10, 10}, + dictWord{ + 5, + 10, + 11, + }, + dictWord{6, 10, 117}, + dictWord{6, 10, 485}, + dictWord{7, 10, 1133}, + dictWord{9, 10, 582}, + dictWord{9, 10, 594}, + dictWord{11, 10, 21}, + dictWord{ + 11, + 10, + 818, + }, + dictWord{12, 10, 535}, + dictWord{141, 10, 86}, + dictWord{4, 10, 264}, + dictWord{7, 10, 1067}, + dictWord{8, 10, 204}, + dictWord{8, 10, 385}, + dictWord{139, 10, 953}, + dictWord{132, 11, 752}, + dictWord{138, 10, 56}, + dictWord{133, 10, 470}, + dictWord{6, 0, 1808}, + dictWord{8, 0, 83}, + dictWord{8, 0, 742}, + dictWord{8, 0, 817}, + dictWord{9, 0, 28}, + dictWord{9, 0, 29}, + dictWord{9, 0, 885}, + dictWord{10, 0, 387}, + dictWord{11, 0, 633}, + dictWord{11, 0, 740}, + dictWord{13, 0, 235}, + dictWord{13, 0, 254}, + dictWord{15, 0, 143}, + dictWord{143, 0, 146}, + dictWord{140, 0, 49}, + dictWord{134, 0, 1832}, + dictWord{4, 11, 227}, + dictWord{5, 11, 159}, + dictWord{5, 11, 409}, + dictWord{7, 11, 80}, + dictWord{10, 11, 294}, + dictWord{10, 11, 479}, + dictWord{12, 11, 418}, + dictWord{14, 11, 50}, + dictWord{14, 11, 249}, + dictWord{142, 11, 295}, + dictWord{7, 11, 1470}, + dictWord{8, 11, 66}, + dictWord{8, 11, 137}, + dictWord{8, 11, 761}, + dictWord{9, 11, 638}, + dictWord{11, 11, 80}, + dictWord{11, 11, 212}, + dictWord{11, 11, 368}, + dictWord{11, 11, 418}, + dictWord{12, 11, 8}, + dictWord{13, 11, 15}, + dictWord{16, 11, 61}, + dictWord{17, 11, 59}, + dictWord{19, 11, 28}, + dictWord{148, 11, 84}, + dictWord{139, 10, 1015}, + dictWord{138, 11, 468}, + dictWord{135, 0, 421}, + dictWord{6, 0, 415}, + dictWord{ + 7, + 0, + 1049, + }, + dictWord{137, 0, 442}, + dictWord{6, 11, 38}, + dictWord{7, 11, 1220}, + dictWord{8, 11, 185}, + dictWord{8, 11, 256}, + dictWord{9, 11, 22}, + dictWord{ + 9, + 11, + 331, + }, + dictWord{10, 11, 738}, + dictWord{11, 11, 205}, + dictWord{11, 11, 540}, + dictWord{11, 11, 746}, + dictWord{13, 11, 399}, + dictWord{13, 11, 465}, + dictWord{ + 14, + 11, + 88, + }, + dictWord{142, 11, 194}, + dictWord{139, 0, 289}, + dictWord{133, 10, 715}, + dictWord{4, 0, 110}, + dictWord{10, 0, 415}, + dictWord{10, 0, 597}, + dictWord{142, 0, 206}, + dictWord{4, 11, 159}, + dictWord{6, 11, 115}, + dictWord{7, 11, 252}, + dictWord{7, 11, 257}, + dictWord{7, 11, 1928}, + dictWord{8, 11, 69}, + dictWord{ + 9, + 11, + 384, + }, + dictWord{10, 11, 91}, + dictWord{10, 11, 615}, + dictWord{12, 11, 375}, + dictWord{14, 11, 235}, + dictWord{18, 11, 117}, + dictWord{147, 11, 123}, + dictWord{5, 11, 911}, + dictWord{136, 11, 278}, + dictWord{7, 0, 205}, + dictWord{7, 0, 2000}, + dictWord{8, 10, 794}, + dictWord{9, 10, 400}, + dictWord{10, 10, 298}, + dictWord{142, 10, 228}, + dictWord{135, 11, 1774}, + dictWord{4, 11, 151}, + dictWord{7, 11, 1567}, + dictWord{8, 11, 351}, + dictWord{137, 11, 322}, + dictWord{ + 136, + 10, + 724, + }, + dictWord{133, 11, 990}, + dictWord{7, 0, 1539}, + dictWord{11, 0, 512}, + dictWord{13, 0, 205}, + dictWord{19, 0, 30}, + dictWord{22, 0, 36}, + dictWord{23, 0, 19}, + dictWord{135, 11, 1539}, + dictWord{5, 11, 194}, + dictWord{7, 11, 1662}, + dictWord{9, 11, 90}, + dictWord{140, 11, 180}, + dictWord{6, 10, 190}, + dictWord{ + 7, + 10, + 768, + }, + dictWord{135, 10, 1170}, + dictWord{134, 0, 1340}, + dictWord{4, 0, 283}, + dictWord{135, 0, 1194}, + dictWord{133, 11, 425}, + dictWord{133, 11, 971}, + dictWord{12, 0, 549}, + dictWord{14, 10, 67}, + dictWord{147, 10, 60}, + dictWord{135, 10, 1023}, + dictWord{134, 0, 1720}, + dictWord{138, 11, 587}, + dictWord{ + 5, + 11, + 72, + }, + dictWord{6, 11, 264}, + dictWord{7, 11, 21}, + dictWord{7, 11, 46}, + dictWord{7, 11, 2013}, + dictWord{8, 11, 215}, + dictWord{8, 11, 513}, + dictWord{10, 11, 266}, + dictWord{139, 11, 22}, + dictWord{5, 0, 319}, + dictWord{135, 0, 534}, + dictWord{6, 10, 137}, + dictWord{9, 10, 75}, + dictWord{9, 10, 253}, + dictWord{10, 10, 194}, + dictWord{138, 10, 444}, + dictWord{7, 0, 1180}, + dictWord{20, 0, 112}, + dictWord{6, 11, 239}, + dictWord{7, 11, 118}, + dictWord{10, 11, 95}, + dictWord{11, 11, 603}, + dictWord{13, 11, 443}, + dictWord{14, 11, 160}, + dictWord{143, 11, 4}, + dictWord{134, 11, 431}, + dictWord{5, 11, 874}, + dictWord{6, 11, 1677}, + dictWord{ + 11, + 10, + 643, + }, + dictWord{12, 10, 115}, + dictWord{143, 11, 0}, + dictWord{134, 0, 967}, + dictWord{6, 11, 65}, + dictWord{7, 11, 939}, + dictWord{7, 11, 1172}, + dictWord{ + 7, + 11, + 1671, + }, + dictWord{9, 11, 540}, + dictWord{10, 11, 696}, + dictWord{11, 11, 265}, + dictWord{11, 11, 732}, + dictWord{11, 11, 928}, + dictWord{11, 11, 937}, + dictWord{ + 12, + 11, + 399, + }, + dictWord{13, 11, 438}, + dictWord{149, 11, 19}, + dictWord{137, 11, 200}, + dictWord{135, 0, 1940}, + dictWord{5, 10, 760}, + dictWord{7, 10, 542}, + dictWord{8, 10, 135}, + dictWord{136, 10, 496}, + dictWord{140, 11, 44}, + dictWord{7, 11, 1655}, + dictWord{136, 11, 305}, + dictWord{7, 10, 319}, + dictWord{ + 7, + 10, + 355, + }, + dictWord{7, 10, 763}, + dictWord{10, 10, 389}, + dictWord{145, 10, 43}, + dictWord{136, 0, 735}, + dictWord{138, 10, 786}, + dictWord{137, 11, 19}, + dictWord{132, 11, 696}, + dictWord{5, 0, 132}, + dictWord{9, 0, 486}, + dictWord{9, 0, 715}, + dictWord{10, 0, 458}, + dictWord{11, 0, 373}, + dictWord{11, 0, 668}, + dictWord{ + 11, + 0, + 795, + }, + dictWord{11, 0, 897}, + dictWord{12, 0, 272}, + dictWord{12, 0, 424}, + dictWord{12, 0, 539}, + dictWord{12, 0, 558}, + dictWord{14, 0, 245}, + dictWord{ + 14, + 0, + 263, + }, + dictWord{14, 0, 264}, + dictWord{14, 0, 393}, + dictWord{142, 0, 403}, + dictWord{10, 0, 38}, + dictWord{139, 0, 784}, + dictWord{132, 0, 838}, + dictWord{ + 4, + 11, + 302, + }, + dictWord{135, 11, 1766}, + dictWord{133, 0, 379}, + dictWord{5, 0, 8}, + dictWord{6, 0, 89}, + dictWord{6, 0, 400}, + dictWord{7, 0, 1569}, + dictWord{7, 0, 1623}, + dictWord{7, 0, 1850}, + dictWord{8, 0, 218}, + dictWord{8, 0, 422}, + dictWord{9, 0, 570}, + dictWord{10, 0, 626}, + dictWord{4, 11, 726}, + dictWord{133, 11, 630}, + dictWord{ + 4, + 0, + 1017, + }, + dictWord{138, 0, 660}, + dictWord{6, 0, 387}, + dictWord{7, 0, 882}, + dictWord{141, 0, 111}, + dictWord{6, 0, 224}, + dictWord{7, 0, 877}, + dictWord{ + 137, + 0, + 647, + }, + dictWord{4, 10, 58}, + dictWord{5, 10, 286}, + dictWord{6, 10, 319}, + dictWord{7, 10, 402}, + dictWord{7, 10, 1254}, + dictWord{7, 10, 1903}, + dictWord{ + 8, + 10, + 356, + }, + dictWord{140, 10, 408}, + dictWord{135, 0, 790}, + dictWord{9, 0, 510}, + dictWord{10, 0, 53}, + dictWord{4, 10, 389}, + dictWord{9, 10, 181}, + dictWord{ + 10, + 10, + 29, + }, + dictWord{10, 10, 816}, + dictWord{11, 10, 311}, + dictWord{11, 10, 561}, + dictWord{12, 10, 67}, + dictWord{141, 10, 181}, + dictWord{142, 0, 458}, + dictWord{ + 6, + 11, + 118, + }, + dictWord{7, 11, 215}, + dictWord{7, 11, 1521}, + dictWord{140, 11, 11}, + dictWord{134, 0, 954}, + dictWord{135, 0, 394}, + dictWord{134, 0, 1367}, + dictWord{5, 11, 225}, + dictWord{133, 10, 373}, + dictWord{132, 0, 882}, + dictWord{7, 0, 1409}, + dictWord{135, 10, 1972}, + dictWord{135, 10, 1793}, + dictWord{ + 4, + 11, + 370, + }, + dictWord{5, 11, 756}, + dictWord{135, 11, 1326}, + dictWord{150, 11, 13}, + dictWord{7, 11, 354}, + dictWord{10, 11, 410}, + dictWord{139, 11, 815}, + dictWord{6, 11, 1662}, + dictWord{7, 11, 48}, + dictWord{8, 11, 771}, + dictWord{10, 11, 116}, + dictWord{13, 11, 104}, + dictWord{14, 11, 105}, + dictWord{14, 11, 184}, + dictWord{15, 11, 168}, + dictWord{19, 11, 92}, + dictWord{148, 11, 68}, + dictWord{7, 0, 124}, + dictWord{136, 0, 38}, + dictWord{5, 0, 261}, + dictWord{7, 0, 78}, + dictWord{ + 7, + 0, + 199, + }, + dictWord{8, 0, 815}, + dictWord{9, 0, 126}, + dictWord{10, 0, 342}, + dictWord{140, 0, 647}, + dictWord{4, 0, 628}, + dictWord{140, 0, 724}, + dictWord{7, 0, 266}, + dictWord{8, 0, 804}, + dictWord{7, 10, 1651}, + dictWord{145, 10, 89}, + dictWord{135, 0, 208}, + dictWord{134, 0, 1178}, + dictWord{6, 0, 79}, + dictWord{135, 0, 1519}, + dictWord{132, 10, 672}, + dictWord{133, 10, 737}, + dictWord{136, 0, 741}, + dictWord{132, 11, 120}, + dictWord{4, 0, 710}, + dictWord{6, 0, 376}, + dictWord{ + 134, + 0, + 606, + }, + dictWord{134, 0, 1347}, + dictWord{134, 0, 1494}, + dictWord{6, 0, 850}, + dictWord{6, 0, 1553}, + dictWord{137, 0, 821}, + dictWord{5, 10, 145}, + dictWord{ + 134, + 11, + 593, + }, + dictWord{7, 0, 1311}, + dictWord{140, 0, 135}, + dictWord{4, 0, 467}, + dictWord{5, 0, 405}, + dictWord{134, 0, 544}, + dictWord{5, 11, 820}, + dictWord{ + 135, + 11, + 931, + }, + dictWord{6, 0, 100}, + dictWord{7, 0, 244}, + dictWord{7, 0, 632}, + dictWord{7, 0, 1609}, + dictWord{8, 0, 178}, + dictWord{8, 0, 638}, + dictWord{141, 0, 58}, + dictWord{4, 10, 387}, + dictWord{135, 10, 1288}, + dictWord{6, 11, 151}, + dictWord{6, 11, 1675}, + dictWord{7, 11, 383}, + dictWord{151, 11, 10}, + dictWord{ + 132, + 0, + 481, + }, + dictWord{135, 10, 550}, + dictWord{134, 0, 1378}, + dictWord{6, 11, 1624}, + dictWord{11, 11, 11}, + dictWord{12, 11, 422}, + dictWord{13, 11, 262}, + dictWord{142, 11, 360}, + dictWord{133, 0, 791}, + dictWord{4, 11, 43}, + dictWord{5, 11, 344}, + dictWord{133, 11, 357}, + dictWord{7, 0, 1227}, + dictWord{140, 0, 978}, + dictWord{7, 0, 686}, + dictWord{8, 0, 33}, + dictWord{8, 0, 238}, + dictWord{10, 0, 616}, + dictWord{11, 0, 467}, + dictWord{11, 0, 881}, + dictWord{13, 0, 217}, + dictWord{ + 13, + 0, + 253, + }, + dictWord{142, 0, 268}, + dictWord{137, 0, 857}, + dictWord{8, 0, 467}, + dictWord{8, 0, 1006}, + dictWord{7, 11, 148}, + dictWord{8, 11, 284}, + dictWord{ + 141, + 11, + 63, + }, + dictWord{4, 10, 576}, + dictWord{135, 10, 1263}, + dictWord{133, 11, 888}, + dictWord{5, 10, 919}, + dictWord{134, 10, 1673}, + dictWord{20, 10, 37}, + dictWord{148, 11, 37}, + dictWord{132, 0, 447}, + dictWord{132, 11, 711}, + dictWord{4, 0, 128}, + dictWord{5, 0, 415}, + dictWord{6, 0, 462}, + dictWord{7, 0, 294}, + dictWord{ + 7, + 0, + 578, + }, + dictWord{10, 0, 710}, + dictWord{139, 0, 86}, + dictWord{4, 10, 82}, + dictWord{5, 10, 333}, + dictWord{5, 10, 904}, + dictWord{6, 10, 207}, + dictWord{7, 10, 325}, + dictWord{7, 10, 1726}, + dictWord{8, 10, 101}, + dictWord{10, 10, 778}, + dictWord{139, 10, 220}, + dictWord{136, 0, 587}, + dictWord{137, 11, 440}, + dictWord{ + 133, + 10, + 903, + }, + dictWord{6, 0, 427}, + dictWord{7, 0, 1018}, + dictWord{138, 0, 692}, + dictWord{4, 0, 195}, + dictWord{135, 0, 802}, + dictWord{140, 10, 147}, + dictWord{ + 134, + 0, + 1546, + }, + dictWord{134, 0, 684}, + dictWord{132, 10, 705}, + dictWord{136, 0, 345}, + dictWord{11, 11, 678}, + dictWord{140, 11, 307}, + dictWord{ + 133, + 0, + 365, + }, + dictWord{134, 0, 1683}, + dictWord{4, 11, 65}, + dictWord{5, 11, 479}, + dictWord{5, 11, 1004}, + dictWord{7, 11, 1913}, + dictWord{8, 11, 317}, + dictWord{ + 9, + 11, + 302, + }, + dictWord{10, 11, 612}, + dictWord{141, 11, 22}, + dictWord{138, 0, 472}, + dictWord{4, 11, 261}, + dictWord{135, 11, 510}, + dictWord{134, 10, 90}, + dictWord{142, 0, 433}, + dictWord{151, 0, 28}, + dictWord{4, 11, 291}, + dictWord{7, 11, 101}, + dictWord{9, 11, 515}, + dictWord{12, 11, 152}, + dictWord{12, 11, 443}, + dictWord{13, 11, 392}, + dictWord{142, 11, 357}, + dictWord{140, 0, 997}, + dictWord{5, 0, 3}, + dictWord{8, 0, 578}, + dictWord{9, 0, 118}, + dictWord{10, 0, 705}, + dictWord{ + 141, + 0, + 279, + }, + dictWord{135, 11, 1266}, + dictWord{7, 10, 813}, + dictWord{12, 10, 497}, + dictWord{141, 10, 56}, + dictWord{133, 0, 229}, + dictWord{6, 10, 125}, + dictWord{135, 10, 1277}, + dictWord{8, 0, 102}, + dictWord{10, 0, 578}, + dictWord{10, 0, 672}, + dictWord{12, 0, 496}, + dictWord{13, 0, 408}, + dictWord{14, 0, 121}, + dictWord{17, 0, 106}, + dictWord{151, 10, 12}, + dictWord{6, 0, 866}, + dictWord{134, 0, 1080}, + dictWord{136, 0, 1022}, + dictWord{4, 11, 130}, + dictWord{135, 11, 843}, + dictWord{5, 11, 42}, + dictWord{5, 11, 879}, + dictWord{7, 11, 245}, + dictWord{7, 11, 324}, + dictWord{7, 11, 1532}, + dictWord{11, 11, 463}, + dictWord{11, 11, 472}, + dictWord{13, 11, 363}, + dictWord{144, 11, 52}, + dictWord{150, 0, 55}, + dictWord{8, 0, 115}, + dictWord{8, 0, 350}, + dictWord{9, 0, 489}, + dictWord{10, 0, 128}, + dictWord{ + 11, + 0, + 306, + }, + dictWord{12, 0, 373}, + dictWord{14, 0, 30}, + dictWord{17, 0, 79}, + dictWord{19, 0, 80}, + dictWord{4, 11, 134}, + dictWord{133, 11, 372}, + dictWord{ + 134, + 0, + 657, + }, + dictWord{134, 0, 933}, + dictWord{135, 11, 1147}, + dictWord{4, 0, 230}, + dictWord{133, 0, 702}, + dictWord{134, 0, 1728}, + dictWord{4, 0, 484}, + dictWord{ + 18, + 0, + 26, + }, + dictWord{19, 0, 42}, + dictWord{20, 0, 43}, + dictWord{21, 0, 0}, + dictWord{23, 0, 27}, + dictWord{152, 0, 14}, + dictWord{7, 0, 185}, + dictWord{135, 0, 703}, + dictWord{ + 6, + 0, + 417, + }, + dictWord{10, 0, 618}, + dictWord{7, 10, 1106}, + dictWord{9, 10, 770}, + dictWord{11, 10, 112}, + dictWord{140, 10, 413}, + dictWord{134, 0, 803}, + dictWord{132, 11, 644}, + dictWord{134, 0, 1262}, + dictWord{7, 11, 540}, + dictWord{12, 10, 271}, + dictWord{145, 10, 109}, + dictWord{135, 11, 123}, + dictWord{ + 132, + 0, + 633, + }, + dictWord{134, 11, 623}, + dictWord{4, 11, 908}, + dictWord{5, 11, 359}, + dictWord{5, 11, 508}, + dictWord{6, 11, 1723}, + dictWord{7, 11, 343}, + dictWord{ + 7, + 11, + 1996, + }, + dictWord{135, 11, 2026}, + dictWord{135, 0, 479}, + dictWord{10, 0, 262}, + dictWord{7, 10, 304}, + dictWord{9, 10, 646}, + dictWord{9, 10, 862}, + dictWord{ + 11, + 10, + 696, + }, + dictWord{12, 10, 208}, + dictWord{15, 10, 79}, + dictWord{147, 10, 108}, + dictWord{4, 11, 341}, + dictWord{135, 11, 480}, + dictWord{134, 0, 830}, + dictWord{5, 0, 70}, + dictWord{5, 0, 622}, + dictWord{6, 0, 334}, + dictWord{7, 0, 1032}, + dictWord{9, 0, 171}, + dictWord{11, 0, 26}, + dictWord{11, 0, 213}, + dictWord{ + 11, + 0, + 637, + }, + dictWord{11, 0, 707}, + dictWord{12, 0, 202}, + dictWord{12, 0, 380}, + dictWord{13, 0, 226}, + dictWord{13, 0, 355}, + dictWord{14, 0, 222}, + dictWord{145, 0, 42}, + dictWord{135, 10, 981}, + dictWord{143, 0, 217}, + dictWord{137, 11, 114}, + dictWord{4, 0, 23}, + dictWord{4, 0, 141}, + dictWord{5, 0, 313}, + dictWord{5, 0, 1014}, + dictWord{6, 0, 50}, + dictWord{6, 0, 51}, + dictWord{7, 0, 142}, + dictWord{7, 0, 384}, + dictWord{7, 0, 559}, + dictWord{8, 0, 640}, + dictWord{9, 0, 460}, + dictWord{9, 0, 783}, + dictWord{11, 0, 741}, + dictWord{12, 0, 183}, + dictWord{141, 0, 488}, + dictWord{141, 0, 360}, + dictWord{7, 0, 1586}, + dictWord{7, 11, 1995}, + dictWord{8, 11, 299}, + dictWord{11, 11, 890}, + dictWord{140, 11, 674}, + dictWord{132, 10, 434}, + dictWord{7, 0, 652}, + dictWord{134, 10, 550}, + dictWord{7, 0, 766}, + dictWord{5, 10, 553}, + dictWord{138, 10, 824}, + dictWord{7, 0, 737}, + dictWord{8, 0, 298}, + dictWord{136, 10, 452}, + dictWord{4, 11, 238}, + dictWord{5, 11, 503}, + dictWord{6, 11, 179}, + dictWord{7, 11, 2003}, + dictWord{8, 11, 381}, + dictWord{8, 11, 473}, + dictWord{9, 11, 149}, + dictWord{10, 11, 183}, + dictWord{15, 11, 45}, + dictWord{143, 11, 86}, + dictWord{133, 10, 292}, + dictWord{5, 0, 222}, + dictWord{9, 0, 655}, + dictWord{138, 0, 534}, + dictWord{138, 10, 135}, + dictWord{4, 11, 121}, + dictWord{5, 11, 156}, + dictWord{5, 11, 349}, + dictWord{9, 11, 136}, + dictWord{10, 11, 605}, + dictWord{14, 11, 342}, + dictWord{147, 11, 107}, + dictWord{137, 0, 906}, + dictWord{6, 0, 1013}, + dictWord{134, 0, 1250}, + dictWord{6, 0, 1956}, + dictWord{6, 0, 2009}, + dictWord{8, 0, 991}, + dictWord{144, 0, 120}, + dictWord{135, 11, 1192}, + dictWord{ + 138, + 0, + 503, + }, + dictWord{5, 0, 154}, + dictWord{7, 0, 1491}, + dictWord{10, 0, 379}, + dictWord{138, 0, 485}, + dictWord{6, 0, 1867}, + dictWord{6, 0, 1914}, + dictWord{6, 0, 1925}, + dictWord{9, 0, 917}, + dictWord{9, 0, 925}, + dictWord{9, 0, 932}, + dictWord{9, 0, 951}, + dictWord{9, 0, 1007}, + dictWord{9, 0, 1013}, + dictWord{12, 0, 806}, + dictWord{ + 12, + 0, + 810, + }, + dictWord{12, 0, 814}, + dictWord{12, 0, 816}, + dictWord{12, 0, 824}, + dictWord{12, 0, 832}, + dictWord{12, 0, 837}, + dictWord{12, 0, 863}, + dictWord{ + 12, + 0, + 868, + }, + dictWord{12, 0, 870}, + dictWord{12, 0, 889}, + dictWord{12, 0, 892}, + dictWord{12, 0, 900}, + dictWord{12, 0, 902}, + dictWord{12, 0, 908}, + dictWord{12, 0, 933}, + dictWord{12, 0, 942}, + dictWord{12, 0, 949}, + dictWord{12, 0, 954}, + dictWord{15, 0, 175}, + dictWord{15, 0, 203}, + dictWord{15, 0, 213}, + dictWord{15, 0, 218}, + dictWord{15, 0, 225}, + dictWord{15, 0, 231}, + dictWord{15, 0, 239}, + dictWord{15, 0, 248}, + dictWord{15, 0, 252}, + dictWord{18, 0, 190}, + dictWord{18, 0, 204}, + dictWord{ + 18, + 0, + 215, + }, + dictWord{18, 0, 216}, + dictWord{18, 0, 222}, + dictWord{18, 0, 225}, + dictWord{18, 0, 230}, + dictWord{18, 0, 239}, + dictWord{18, 0, 241}, + dictWord{ + 21, + 0, + 42, + }, + dictWord{21, 0, 43}, + dictWord{21, 0, 44}, + dictWord{21, 0, 45}, + dictWord{21, 0, 46}, + dictWord{21, 0, 53}, + dictWord{24, 0, 27}, + dictWord{152, 0, 31}, + dictWord{ + 133, + 0, + 716, + }, + dictWord{135, 0, 844}, + dictWord{4, 0, 91}, + dictWord{5, 0, 388}, + dictWord{5, 0, 845}, + dictWord{6, 0, 206}, + dictWord{6, 0, 252}, + dictWord{6, 0, 365}, + dictWord{ + 7, + 0, + 136, + }, + dictWord{7, 0, 531}, + dictWord{136, 0, 621}, + dictWord{7, 10, 393}, + dictWord{10, 10, 603}, + dictWord{139, 10, 206}, + dictWord{6, 11, 80}, + dictWord{ + 6, + 11, + 1694, + }, + dictWord{7, 11, 173}, + dictWord{7, 11, 1974}, + dictWord{9, 11, 547}, + dictWord{10, 11, 730}, + dictWord{14, 11, 18}, + dictWord{150, 11, 39}, + dictWord{137, 0, 748}, + dictWord{4, 11, 923}, + dictWord{134, 11, 1711}, + dictWord{4, 10, 912}, + dictWord{137, 10, 232}, + dictWord{7, 10, 98}, + dictWord{7, 10, 1973}, + dictWord{136, 10, 716}, + dictWord{14, 0, 103}, + dictWord{133, 10, 733}, + dictWord{132, 11, 595}, + dictWord{12, 0, 158}, + dictWord{18, 0, 8}, + dictWord{19, 0, 62}, + dictWord{20, 0, 6}, + dictWord{22, 0, 4}, + dictWord{23, 0, 2}, + dictWord{23, 0, 9}, + dictWord{5, 11, 240}, + dictWord{6, 11, 459}, + dictWord{7, 11, 12}, + dictWord{7, 11, 114}, + dictWord{7, 11, 502}, + dictWord{7, 11, 1751}, + dictWord{7, 11, 1753}, + dictWord{7, 11, 1805}, + dictWord{8, 11, 658}, + dictWord{9, 11, 1}, + dictWord{11, 11, 959}, + dictWord{13, 11, 446}, + dictWord{142, 11, 211}, + dictWord{135, 0, 576}, + dictWord{5, 0, 771}, + dictWord{5, 0, 863}, + dictWord{5, 0, 898}, + dictWord{6, 0, 648}, + dictWord{ + 6, + 0, + 1632, + }, + dictWord{6, 0, 1644}, + dictWord{134, 0, 1780}, + dictWord{133, 0, 331}, + dictWord{7, 11, 633}, + dictWord{7, 11, 905}, + dictWord{7, 11, 909}, + dictWord{ + 7, + 11, + 1538, + }, + dictWord{9, 11, 767}, + dictWord{140, 11, 636}, + dictWord{140, 0, 632}, + dictWord{5, 0, 107}, + dictWord{7, 0, 201}, + dictWord{136, 0, 518}, + dictWord{ + 6, + 0, + 446, + }, + dictWord{7, 0, 1817}, + dictWord{134, 11, 490}, + dictWord{9, 0, 851}, + dictWord{141, 0, 510}, + dictWord{7, 11, 250}, + dictWord{8, 11, 506}, + dictWord{ + 136, + 11, + 507, + }, + dictWord{4, 0, 504}, + dictWord{137, 10, 72}, + dictWord{132, 11, 158}, + dictWord{4, 11, 140}, + dictWord{7, 11, 362}, + dictWord{8, 11, 209}, + dictWord{ + 9, + 11, + 10, + }, + dictWord{9, 11, 160}, + dictWord{9, 11, 503}, + dictWord{10, 11, 689}, + dictWord{11, 11, 350}, + dictWord{11, 11, 553}, + dictWord{11, 11, 725}, + dictWord{ + 12, + 11, + 252, + }, + dictWord{12, 11, 583}, + dictWord{13, 11, 192}, + dictWord{13, 11, 352}, + dictWord{14, 11, 269}, + dictWord{14, 11, 356}, + dictWord{148, 11, 50}, + dictWord{6, 11, 597}, + dictWord{135, 11, 1318}, + dictWord{135, 10, 1454}, + dictWord{5, 0, 883}, + dictWord{5, 0, 975}, + dictWord{8, 0, 392}, + dictWord{148, 0, 7}, + dictWord{6, 11, 228}, + dictWord{7, 11, 1341}, + dictWord{9, 11, 408}, + dictWord{138, 11, 343}, + dictWord{11, 11, 348}, + dictWord{11, 10, 600}, + dictWord{12, 11, 99}, + dictWord{13, 10, 245}, + dictWord{18, 11, 1}, + dictWord{18, 11, 11}, + dictWord{147, 11, 4}, + dictWord{134, 11, 296}, + dictWord{5, 0, 922}, + dictWord{134, 0, 1707}, + dictWord{132, 11, 557}, + dictWord{4, 11, 548}, + dictWord{7, 10, 164}, + dictWord{7, 10, 1571}, + dictWord{9, 10, 107}, + dictWord{140, 10, 225}, + dictWord{ + 7, + 11, + 197, + }, + dictWord{8, 11, 142}, + dictWord{8, 11, 325}, + dictWord{9, 11, 150}, + dictWord{9, 11, 596}, + dictWord{10, 11, 350}, + dictWord{10, 11, 353}, + dictWord{ + 11, + 11, + 74, + }, + dictWord{11, 11, 315}, + dictWord{14, 11, 423}, + dictWord{143, 11, 141}, + dictWord{5, 0, 993}, + dictWord{7, 0, 515}, + dictWord{137, 0, 91}, + dictWord{4, 0, 131}, + dictWord{8, 0, 200}, + dictWord{5, 10, 484}, + dictWord{5, 10, 510}, + dictWord{6, 10, 434}, + dictWord{7, 10, 1000}, + dictWord{7, 10, 1098}, + dictWord{136, 10, 2}, + dictWord{152, 0, 10}, + dictWord{4, 11, 62}, + dictWord{5, 11, 83}, + dictWord{6, 11, 399}, + dictWord{6, 11, 579}, + dictWord{7, 11, 692}, + dictWord{7, 11, 846}, + dictWord{ + 7, + 11, + 1015, + }, + dictWord{7, 11, 1799}, + dictWord{8, 11, 403}, + dictWord{9, 11, 394}, + dictWord{10, 11, 133}, + dictWord{12, 11, 4}, + dictWord{12, 11, 297}, + dictWord{ + 12, + 11, + 452, + }, + dictWord{16, 11, 81}, + dictWord{18, 11, 19}, + dictWord{18, 11, 25}, + dictWord{21, 11, 14}, + dictWord{22, 11, 12}, + dictWord{151, 11, 18}, + dictWord{ + 140, + 11, + 459, + }, + dictWord{132, 11, 177}, + dictWord{7, 0, 1433}, + dictWord{9, 0, 365}, + dictWord{137, 11, 365}, + dictWord{132, 10, 460}, + dictWord{5, 0, 103}, + dictWord{ + 6, + 0, + 2004, + }, + dictWord{7, 0, 921}, + dictWord{8, 0, 580}, + dictWord{8, 0, 593}, + dictWord{8, 0, 630}, + dictWord{10, 0, 28}, + dictWord{5, 11, 411}, + dictWord{ + 135, + 11, + 653, + }, + dictWord{4, 10, 932}, + dictWord{133, 10, 891}, + dictWord{4, 0, 911}, + dictWord{5, 0, 867}, + dictWord{5, 0, 1013}, + dictWord{7, 0, 2034}, + dictWord{8, 0, 798}, + dictWord{136, 0, 813}, + dictWord{7, 11, 439}, + dictWord{10, 11, 727}, + dictWord{11, 11, 260}, + dictWord{139, 11, 684}, + dictWord{136, 10, 625}, + dictWord{ + 5, + 11, + 208, + }, + dictWord{7, 11, 753}, + dictWord{135, 11, 1528}, + dictWord{5, 0, 461}, + dictWord{7, 0, 1925}, + dictWord{12, 0, 39}, + dictWord{13, 0, 265}, + dictWord{ + 13, + 0, + 439, + }, + dictWord{134, 10, 76}, + dictWord{6, 0, 853}, + dictWord{8, 10, 92}, + dictWord{137, 10, 221}, + dictWord{5, 0, 135}, + dictWord{6, 0, 519}, + dictWord{7, 0, 1722}, + dictWord{10, 0, 271}, + dictWord{11, 0, 261}, + dictWord{145, 0, 54}, + dictWord{139, 11, 814}, + dictWord{14, 0, 338}, + dictWord{148, 0, 81}, + dictWord{4, 0, 300}, + dictWord{133, 0, 436}, + dictWord{5, 0, 419}, + dictWord{5, 0, 687}, + dictWord{7, 0, 864}, + dictWord{9, 0, 470}, + dictWord{135, 11, 864}, + dictWord{9, 0, 836}, + dictWord{ + 133, + 11, + 242, + }, + dictWord{134, 0, 1937}, + dictWord{4, 10, 763}, + dictWord{133, 11, 953}, + dictWord{132, 10, 622}, + dictWord{132, 0, 393}, + dictWord{ + 133, + 10, + 253, + }, + dictWord{8, 0, 357}, + dictWord{10, 0, 745}, + dictWord{14, 0, 426}, + dictWord{17, 0, 94}, + dictWord{19, 0, 57}, + dictWord{135, 10, 546}, + dictWord{5, 11, 615}, + dictWord{146, 11, 37}, + dictWord{9, 10, 73}, + dictWord{10, 10, 110}, + dictWord{14, 10, 185}, + dictWord{145, 10, 119}, + dictWord{11, 0, 703}, + dictWord{7, 10, 624}, + dictWord{7, 10, 916}, + dictWord{10, 10, 256}, + dictWord{139, 10, 87}, + dictWord{133, 11, 290}, + dictWord{5, 10, 212}, + dictWord{12, 10, 35}, + dictWord{ + 141, + 10, + 382, + }, + dictWord{132, 11, 380}, + dictWord{5, 11, 52}, + dictWord{7, 11, 277}, + dictWord{9, 11, 368}, + dictWord{139, 11, 791}, + dictWord{133, 0, 387}, + dictWord{ + 10, + 11, + 138, + }, + dictWord{139, 11, 476}, + dictWord{4, 0, 6}, + dictWord{5, 0, 708}, + dictWord{136, 0, 75}, + dictWord{7, 0, 1351}, + dictWord{9, 0, 581}, + dictWord{10, 0, 639}, + dictWord{11, 0, 453}, + dictWord{140, 0, 584}, + dictWord{132, 0, 303}, + dictWord{138, 0, 772}, + dictWord{135, 10, 1175}, + dictWord{4, 0, 749}, + dictWord{ + 5, + 10, + 816, + }, + dictWord{6, 11, 256}, + dictWord{7, 11, 307}, + dictWord{7, 11, 999}, + dictWord{7, 11, 1481}, + dictWord{7, 11, 1732}, + dictWord{7, 11, 1738}, + dictWord{ + 8, + 11, + 265, + }, + dictWord{9, 11, 414}, + dictWord{11, 11, 316}, + dictWord{12, 11, 52}, + dictWord{13, 11, 420}, + dictWord{147, 11, 100}, + dictWord{135, 11, 1296}, + dictWord{ + 6, + 0, + 1065, + }, + dictWord{5, 10, 869}, + dictWord{5, 10, 968}, + dictWord{6, 10, 1626}, + dictWord{8, 10, 734}, + dictWord{136, 10, 784}, + dictWord{4, 10, 542}, + dictWord{ + 6, + 10, + 1716, + }, + dictWord{6, 10, 1727}, + dictWord{7, 10, 1082}, + dictWord{7, 10, 1545}, + dictWord{8, 10, 56}, + dictWord{8, 10, 118}, + dictWord{8, 10, 412}, + dictWord{ + 8, + 10, + 564, + }, + dictWord{9, 10, 888}, + dictWord{9, 10, 908}, + dictWord{10, 10, 50}, + dictWord{10, 10, 423}, + dictWord{11, 10, 685}, + dictWord{11, 10, 697}, + dictWord{11, 10, 933}, + dictWord{12, 10, 299}, + dictWord{13, 10, 126}, + dictWord{13, 10, 136}, + dictWord{13, 10, 170}, + dictWord{141, 10, 190}, + dictWord{ + 134, + 0, + 226, + }, + dictWord{4, 0, 106}, + dictWord{7, 0, 310}, + dictWord{11, 0, 717}, + dictWord{133, 11, 723}, + dictWord{5, 0, 890}, + dictWord{5, 0, 988}, + dictWord{4, 10, 232}, + dictWord{9, 10, 202}, + dictWord{10, 10, 474}, + dictWord{140, 10, 433}, + dictWord{6, 0, 626}, + dictWord{142, 0, 431}, + dictWord{10, 0, 706}, + dictWord{150, 0, 44}, + dictWord{13, 0, 51}, + dictWord{6, 10, 108}, + dictWord{7, 10, 1003}, + dictWord{7, 10, 1181}, + dictWord{8, 10, 111}, + dictWord{136, 10, 343}, + dictWord{132, 0, 698}, + dictWord{5, 11, 109}, + dictWord{6, 11, 1784}, + dictWord{7, 11, 1895}, + dictWord{12, 11, 296}, + dictWord{140, 11, 302}, + dictWord{134, 0, 828}, + dictWord{ + 134, + 10, + 1712, + }, + dictWord{138, 0, 17}, + dictWord{7, 0, 1929}, + dictWord{4, 10, 133}, + dictWord{5, 11, 216}, + dictWord{7, 10, 711}, + dictWord{7, 10, 1298}, + dictWord{ + 7, + 10, + 1585, + }, + dictWord{7, 11, 1879}, + dictWord{9, 11, 141}, + dictWord{9, 11, 270}, + dictWord{9, 11, 679}, + dictWord{10, 11, 159}, + dictWord{10, 11, 553}, + dictWord{ + 11, + 11, + 197, + }, + dictWord{11, 11, 438}, + dictWord{12, 11, 538}, + dictWord{12, 11, 559}, + dictWord{13, 11, 193}, + dictWord{13, 11, 423}, + dictWord{14, 11, 144}, + dictWord{14, 11, 166}, + dictWord{14, 11, 167}, + dictWord{15, 11, 67}, + dictWord{147, 11, 84}, + dictWord{141, 11, 127}, + dictWord{7, 11, 1872}, + dictWord{ + 137, + 11, + 81, + }, + dictWord{6, 10, 99}, + dictWord{7, 10, 1808}, + dictWord{145, 10, 57}, + dictWord{134, 11, 391}, + dictWord{5, 0, 689}, + dictWord{6, 0, 84}, + dictWord{7, 0, 1250}, + dictWord{6, 10, 574}, + dictWord{7, 10, 428}, + dictWord{10, 10, 669}, + dictWord{11, 10, 485}, + dictWord{11, 10, 840}, + dictWord{12, 10, 300}, + dictWord{ + 142, + 10, + 250, + }, + dictWord{7, 11, 322}, + dictWord{136, 11, 249}, + dictWord{7, 11, 432}, + dictWord{135, 11, 1649}, + dictWord{135, 10, 1871}, + dictWord{137, 10, 252}, + dictWord{6, 11, 155}, + dictWord{140, 11, 234}, + dictWord{7, 0, 871}, + dictWord{19, 0, 27}, + dictWord{147, 11, 27}, + dictWord{140, 0, 498}, + dictWord{5, 0, 986}, + dictWord{6, 0, 130}, + dictWord{138, 0, 823}, + dictWord{6, 0, 1793}, + dictWord{7, 0, 1582}, + dictWord{8, 0, 458}, + dictWord{10, 0, 101}, + dictWord{10, 0, 318}, + dictWord{ + 10, + 0, + 945, + }, + dictWord{12, 0, 734}, + dictWord{16, 0, 104}, + dictWord{18, 0, 177}, + dictWord{6, 10, 323}, + dictWord{135, 10, 1564}, + dictWord{5, 11, 632}, + dictWord{ + 138, + 11, + 526, + }, + dictWord{10, 0, 435}, + dictWord{7, 10, 461}, + dictWord{136, 10, 775}, + dictWord{6, 11, 144}, + dictWord{7, 11, 948}, + dictWord{7, 11, 1042}, + dictWord{ + 7, + 11, + 1857, + }, + dictWord{8, 11, 235}, + dictWord{8, 11, 461}, + dictWord{9, 11, 453}, + dictWord{9, 11, 530}, + dictWord{10, 11, 354}, + dictWord{17, 11, 77}, + dictWord{ + 19, + 11, + 99, + }, + dictWord{148, 11, 79}, + dictWord{138, 0, 966}, + dictWord{7, 0, 1644}, + dictWord{137, 0, 129}, + dictWord{135, 0, 997}, + dictWord{136, 0, 502}, + dictWord{ + 5, + 11, + 196, + }, + dictWord{6, 11, 486}, + dictWord{7, 11, 212}, + dictWord{8, 11, 309}, + dictWord{136, 11, 346}, + dictWord{7, 10, 727}, + dictWord{146, 10, 73}, + dictWord{132, 0, 823}, + dictWord{132, 11, 686}, + dictWord{135, 0, 1927}, + dictWord{4, 0, 762}, + dictWord{7, 0, 1756}, + dictWord{137, 0, 98}, + dictWord{136, 10, 577}, + dictWord{24, 0, 8}, + dictWord{4, 11, 30}, + dictWord{5, 11, 43}, + dictWord{152, 11, 8}, + dictWord{7, 0, 1046}, + dictWord{139, 0, 160}, + dictWord{7, 0, 492}, + dictWord{ + 4, + 10, + 413, + }, + dictWord{5, 10, 677}, + dictWord{7, 11, 492}, + dictWord{8, 10, 432}, + dictWord{140, 10, 280}, + dictWord{6, 0, 45}, + dictWord{7, 0, 433}, + dictWord{8, 0, 129}, + dictWord{9, 0, 21}, + dictWord{10, 0, 392}, + dictWord{11, 0, 79}, + dictWord{12, 0, 499}, + dictWord{13, 0, 199}, + dictWord{141, 0, 451}, + dictWord{7, 0, 558}, + dictWord{ + 136, + 0, + 353, + }, + dictWord{4, 11, 220}, + dictWord{7, 11, 1535}, + dictWord{9, 11, 93}, + dictWord{139, 11, 474}, + dictWord{7, 10, 646}, + dictWord{7, 10, 1730}, + dictWord{ + 11, + 10, + 446, + }, + dictWord{141, 10, 178}, + dictWord{133, 0, 785}, + dictWord{134, 0, 1145}, + dictWord{8, 0, 81}, + dictWord{9, 0, 189}, + dictWord{9, 0, 201}, + dictWord{ + 11, + 0, + 478, + }, + dictWord{11, 0, 712}, + dictWord{141, 0, 338}, + dictWord{5, 0, 353}, + dictWord{151, 0, 26}, + dictWord{11, 0, 762}, + dictWord{132, 10, 395}, + dictWord{ + 134, + 0, + 2024, + }, + dictWord{4, 0, 611}, + dictWord{133, 0, 606}, + dictWord{9, 10, 174}, + dictWord{10, 10, 164}, + dictWord{11, 10, 440}, + dictWord{11, 10, 841}, + dictWord{ + 143, + 10, + 98, + }, + dictWord{134, 10, 426}, + dictWord{10, 10, 608}, + dictWord{139, 10, 1002}, + dictWord{138, 10, 250}, + dictWord{6, 0, 25}, + dictWord{7, 0, 855}, + dictWord{7, 0, 1258}, + dictWord{144, 0, 32}, + dictWord{7, 11, 1725}, + dictWord{138, 11, 393}, + dictWord{5, 11, 263}, + dictWord{134, 11, 414}, + dictWord{6, 0, 2011}, + dictWord{133, 10, 476}, + dictWord{4, 0, 4}, + dictWord{7, 0, 1118}, + dictWord{7, 0, 1320}, + dictWord{7, 0, 1706}, + dictWord{8, 0, 277}, + dictWord{9, 0, 622}, + dictWord{ + 10, + 0, + 9, + }, + dictWord{11, 0, 724}, + dictWord{12, 0, 350}, + dictWord{12, 0, 397}, + dictWord{13, 0, 28}, + dictWord{13, 0, 159}, + dictWord{15, 0, 89}, + dictWord{18, 0, 5}, + dictWord{ + 19, + 0, + 9, + }, + dictWord{20, 0, 34}, + dictWord{22, 0, 47}, + dictWord{6, 11, 178}, + dictWord{6, 11, 1750}, + dictWord{8, 11, 251}, + dictWord{9, 11, 690}, + dictWord{ + 10, + 11, + 155, + }, + dictWord{10, 11, 196}, + dictWord{10, 11, 373}, + dictWord{11, 11, 698}, + dictWord{13, 11, 155}, + dictWord{148, 11, 93}, + dictWord{5, 11, 97}, + dictWord{ + 137, + 11, + 393, + }, + dictWord{7, 0, 764}, + dictWord{11, 0, 461}, + dictWord{12, 0, 172}, + dictWord{5, 10, 76}, + dictWord{6, 10, 458}, + dictWord{6, 10, 497}, + dictWord{ + 7, + 10, + 868, + }, + dictWord{9, 10, 658}, + dictWord{10, 10, 594}, + dictWord{11, 10, 566}, + dictWord{12, 10, 338}, + dictWord{141, 10, 200}, + dictWord{134, 0, 1449}, + dictWord{138, 11, 40}, + dictWord{134, 11, 1639}, + dictWord{134, 0, 1445}, + dictWord{6, 0, 1168}, + dictWord{4, 10, 526}, + dictWord{7, 10, 1029}, + dictWord{ + 135, + 10, + 1054, + }, + dictWord{4, 11, 191}, + dictWord{7, 11, 934}, + dictWord{8, 11, 647}, + dictWord{145, 11, 97}, + dictWord{132, 10, 636}, + dictWord{6, 0, 233}, + dictWord{ + 7, + 10, + 660, + }, + dictWord{7, 10, 1124}, + dictWord{17, 10, 31}, + dictWord{19, 10, 22}, + dictWord{151, 10, 14}, + dictWord{6, 10, 1699}, + dictWord{136, 11, 110}, + dictWord{ + 12, + 11, + 246, + }, + dictWord{15, 11, 162}, + dictWord{19, 11, 64}, + dictWord{20, 11, 8}, + dictWord{20, 11, 95}, + dictWord{22, 11, 24}, + dictWord{152, 11, 17}, + dictWord{ + 5, + 11, + 165, + }, + dictWord{9, 11, 346}, + dictWord{138, 11, 655}, + dictWord{5, 11, 319}, + dictWord{135, 11, 534}, + dictWord{134, 0, 255}, + dictWord{9, 0, 216}, + dictWord{ + 8, + 11, + 128, + }, + dictWord{139, 11, 179}, + dictWord{9, 0, 183}, + dictWord{139, 0, 286}, + dictWord{11, 0, 956}, + dictWord{151, 0, 3}, + dictWord{4, 0, 536}, + dictWord{ + 7, + 0, + 1141, + }, + dictWord{10, 0, 723}, + dictWord{139, 0, 371}, + dictWord{4, 10, 279}, + dictWord{7, 10, 301}, + dictWord{137, 10, 362}, + dictWord{7, 0, 285}, + dictWord{ + 5, + 11, + 57, + }, + dictWord{6, 11, 101}, + dictWord{6, 11, 1663}, + dictWord{7, 11, 132}, + dictWord{7, 11, 1048}, + dictWord{7, 11, 1154}, + dictWord{7, 11, 1415}, + dictWord{ + 7, + 11, + 1507, + }, + dictWord{12, 11, 493}, + dictWord{15, 11, 105}, + dictWord{151, 11, 15}, + dictWord{5, 11, 459}, + dictWord{7, 11, 1073}, + dictWord{7, 10, 1743}, + dictWord{ + 8, + 11, + 241, + }, + dictWord{136, 11, 334}, + dictWord{4, 10, 178}, + dictWord{133, 10, 399}, + dictWord{135, 0, 560}, + dictWord{132, 0, 690}, + dictWord{135, 0, 1246}, + dictWord{18, 0, 157}, + dictWord{147, 0, 63}, + dictWord{10, 0, 599}, + dictWord{11, 0, 33}, + dictWord{12, 0, 571}, + dictWord{149, 0, 1}, + dictWord{6, 11, 324}, + dictWord{ + 6, + 11, + 520, + }, + dictWord{7, 11, 338}, + dictWord{7, 11, 1616}, + dictWord{7, 11, 1729}, + dictWord{8, 11, 228}, + dictWord{9, 11, 69}, + dictWord{139, 11, 750}, + dictWord{ + 7, + 0, + 1862, + }, + dictWord{12, 0, 491}, + dictWord{12, 0, 520}, + dictWord{13, 0, 383}, + dictWord{142, 0, 244}, + dictWord{135, 11, 734}, + dictWord{134, 10, 1692}, + dictWord{10, 0, 448}, + dictWord{11, 0, 630}, + dictWord{17, 0, 117}, + dictWord{6, 10, 202}, + dictWord{7, 11, 705}, + dictWord{12, 10, 360}, + dictWord{17, 10, 118}, + dictWord{18, 10, 27}, + dictWord{148, 10, 67}, + dictWord{4, 11, 73}, + dictWord{6, 11, 612}, + dictWord{7, 11, 927}, + dictWord{7, 11, 1822}, + dictWord{8, 11, 217}, + dictWord{ + 9, + 11, + 472, + }, + dictWord{9, 11, 765}, + dictWord{9, 11, 766}, + dictWord{10, 11, 408}, + dictWord{11, 11, 51}, + dictWord{11, 11, 793}, + dictWord{12, 11, 266}, + dictWord{ + 15, + 11, + 158, + }, + dictWord{20, 11, 89}, + dictWord{150, 11, 32}, + dictWord{4, 0, 190}, + dictWord{133, 0, 554}, + dictWord{133, 0, 1001}, + dictWord{5, 11, 389}, + dictWord{ + 8, + 11, + 636, + }, + dictWord{137, 11, 229}, + dictWord{5, 0, 446}, + dictWord{7, 10, 872}, + dictWord{10, 10, 516}, + dictWord{139, 10, 167}, + dictWord{137, 10, 313}, + dictWord{132, 10, 224}, + dictWord{134, 0, 1313}, + dictWord{5, 10, 546}, + dictWord{7, 10, 35}, + dictWord{8, 10, 11}, + dictWord{8, 10, 12}, + dictWord{9, 10, 315}, + dictWord{9, 10, 533}, + dictWord{10, 10, 802}, + dictWord{11, 10, 166}, + dictWord{12, 10, 525}, + dictWord{142, 10, 243}, + dictWord{6, 0, 636}, + dictWord{137, 0, 837}, + dictWord{5, 10, 241}, + dictWord{8, 10, 242}, + dictWord{9, 10, 451}, + dictWord{10, 10, 667}, + dictWord{11, 10, 598}, + dictWord{140, 10, 429}, + dictWord{22, 10, 46}, + dictWord{150, 11, 46}, + dictWord{136, 11, 472}, + dictWord{11, 0, 278}, + dictWord{142, 0, 73}, + dictWord{141, 11, 185}, + dictWord{132, 0, 868}, + dictWord{ + 134, + 0, + 972, + }, + dictWord{4, 10, 366}, + dictWord{137, 10, 516}, + dictWord{138, 0, 1010}, + dictWord{5, 11, 189}, + dictWord{6, 10, 1736}, + dictWord{7, 11, 442}, + dictWord{ + 7, + 11, + 443, + }, + dictWord{8, 11, 281}, + dictWord{12, 11, 174}, + dictWord{13, 11, 83}, + dictWord{141, 11, 261}, + dictWord{139, 11, 384}, + dictWord{6, 11, 2}, + dictWord{ + 7, + 11, + 191, + }, + dictWord{7, 11, 446}, + dictWord{7, 11, 758}, + dictWord{7, 11, 1262}, + dictWord{7, 11, 1737}, + dictWord{8, 11, 22}, + dictWord{8, 11, 270}, + dictWord{ + 8, + 11, + 612, + }, + dictWord{9, 11, 4}, + dictWord{9, 11, 167}, + dictWord{9, 11, 312}, + dictWord{9, 11, 436}, + dictWord{10, 11, 156}, + dictWord{10, 11, 216}, + dictWord{ + 10, + 11, + 311, + }, + dictWord{10, 11, 623}, + dictWord{11, 11, 72}, + dictWord{11, 11, 330}, + dictWord{11, 11, 455}, + dictWord{12, 11, 101}, + dictWord{12, 11, 321}, + dictWord{ + 12, + 11, + 504, + }, + dictWord{12, 11, 530}, + dictWord{12, 11, 543}, + dictWord{13, 11, 17}, + dictWord{13, 11, 156}, + dictWord{13, 11, 334}, + dictWord{14, 11, 48}, + dictWord{15, 11, 70}, + dictWord{17, 11, 60}, + dictWord{148, 11, 64}, + dictWord{6, 10, 331}, + dictWord{136, 10, 623}, + dictWord{135, 0, 1231}, + dictWord{132, 0, 304}, + dictWord{6, 11, 60}, + dictWord{7, 11, 670}, + dictWord{7, 11, 1327}, + dictWord{8, 11, 411}, + dictWord{8, 11, 435}, + dictWord{9, 11, 653}, + dictWord{9, 11, 740}, + dictWord{10, 11, 385}, + dictWord{11, 11, 222}, + dictWord{11, 11, 324}, + dictWord{11, 11, 829}, + dictWord{140, 11, 611}, + dictWord{7, 0, 506}, + dictWord{6, 11, 166}, + dictWord{7, 11, 374}, + dictWord{135, 11, 1174}, + dictWord{14, 11, 43}, + dictWord{146, 11, 21}, + dictWord{135, 11, 1694}, + dictWord{135, 10, 1888}, + dictWord{ + 5, + 11, + 206, + }, + dictWord{134, 11, 398}, + dictWord{135, 11, 50}, + dictWord{150, 0, 26}, + dictWord{6, 0, 53}, + dictWord{6, 0, 199}, + dictWord{7, 0, 1408}, + dictWord{ + 8, + 0, + 32, + }, + dictWord{8, 0, 93}, + dictWord{10, 0, 397}, + dictWord{10, 0, 629}, + dictWord{11, 0, 593}, + dictWord{11, 0, 763}, + dictWord{13, 0, 326}, + dictWord{145, 0, 35}, + dictWord{134, 0, 105}, + dictWord{132, 10, 394}, + dictWord{4, 0, 843}, + dictWord{138, 0, 794}, + dictWord{11, 0, 704}, + dictWord{141, 0, 396}, + dictWord{5, 0, 114}, + dictWord{5, 0, 255}, + dictWord{141, 0, 285}, + dictWord{6, 0, 619}, + dictWord{7, 0, 898}, + dictWord{7, 0, 1092}, + dictWord{8, 0, 485}, + dictWord{18, 0, 28}, + dictWord{ + 19, + 0, + 116, + }, + dictWord{135, 10, 1931}, + dictWord{9, 0, 145}, + dictWord{7, 10, 574}, + dictWord{135, 10, 1719}, + dictWord{7, 0, 2035}, + dictWord{8, 0, 19}, + dictWord{ + 9, + 0, + 89, + }, + dictWord{138, 0, 831}, + dictWord{132, 10, 658}, + dictWord{6, 11, 517}, + dictWord{7, 11, 1159}, + dictWord{10, 11, 621}, + dictWord{139, 11, 192}, + dictWord{ + 7, + 0, + 1933, + }, + dictWord{7, 11, 1933}, + dictWord{9, 10, 781}, + dictWord{10, 10, 144}, + dictWord{11, 10, 385}, + dictWord{13, 10, 161}, + dictWord{13, 10, 228}, + dictWord{13, 10, 268}, + dictWord{148, 10, 107}, + dictWord{136, 10, 374}, + dictWord{10, 11, 223}, + dictWord{139, 11, 645}, + dictWord{135, 0, 1728}, + dictWord{ + 7, + 11, + 64, + }, + dictWord{7, 11, 289}, + dictWord{136, 11, 245}, + dictWord{4, 10, 344}, + dictWord{6, 10, 498}, + dictWord{139, 10, 323}, + dictWord{136, 0, 746}, + dictWord{ + 135, + 10, + 1063, + }, + dictWord{137, 10, 155}, + dictWord{4, 0, 987}, + dictWord{6, 0, 1964}, + dictWord{6, 0, 1974}, + dictWord{6, 0, 1990}, + dictWord{136, 0, 995}, + dictWord{133, 11, 609}, + dictWord{133, 10, 906}, + dictWord{134, 0, 1550}, + dictWord{134, 0, 874}, + dictWord{5, 11, 129}, + dictWord{6, 11, 61}, + dictWord{ + 135, + 11, + 947, + }, + dictWord{4, 0, 1018}, + dictWord{6, 0, 1938}, + dictWord{6, 0, 2021}, + dictWord{134, 0, 2039}, + dictWord{132, 0, 814}, + dictWord{11, 0, 126}, + dictWord{ + 139, + 0, + 287, + }, + dictWord{134, 0, 1264}, + dictWord{5, 0, 955}, + dictWord{136, 0, 814}, + dictWord{141, 11, 506}, + dictWord{132, 11, 314}, + dictWord{6, 0, 981}, + dictWord{139, 11, 1000}, + dictWord{5, 0, 56}, + dictWord{8, 0, 892}, + dictWord{8, 0, 915}, + dictWord{140, 0, 776}, + dictWord{148, 0, 100}, + dictWord{10, 0, 4}, + dictWord{ + 10, + 0, + 13, + }, + dictWord{11, 0, 638}, + dictWord{148, 0, 57}, + dictWord{148, 11, 74}, + dictWord{5, 0, 738}, + dictWord{132, 10, 616}, + dictWord{133, 11, 637}, + dictWord{ + 136, + 10, + 692, + }, + dictWord{133, 0, 758}, + dictWord{132, 10, 305}, + dictWord{137, 11, 590}, + dictWord{5, 11, 280}, + dictWord{135, 11, 1226}, + dictWord{ + 134, + 11, + 494, + }, + dictWord{135, 0, 1112}, + dictWord{133, 11, 281}, + dictWord{13, 0, 44}, + dictWord{14, 0, 214}, + dictWord{5, 10, 214}, + dictWord{7, 10, 603}, + dictWord{ + 8, + 10, + 611, + }, + dictWord{9, 10, 686}, + dictWord{10, 10, 88}, + dictWord{11, 10, 459}, + dictWord{11, 10, 496}, + dictWord{12, 10, 463}, + dictWord{140, 10, 590}, + dictWord{ + 139, + 0, + 328, + }, + dictWord{135, 11, 1064}, + dictWord{137, 0, 133}, + dictWord{7, 0, 168}, + dictWord{13, 0, 196}, + dictWord{141, 0, 237}, + dictWord{134, 10, 1703}, + dictWord{134, 0, 1152}, + dictWord{135, 0, 1245}, + dictWord{5, 0, 110}, + dictWord{6, 0, 169}, + dictWord{6, 0, 1702}, + dictWord{7, 0, 400}, + dictWord{8, 0, 538}, + dictWord{ + 9, + 0, + 184, + }, + dictWord{9, 0, 524}, + dictWord{140, 0, 218}, + dictWord{6, 0, 1816}, + dictWord{10, 0, 871}, + dictWord{12, 0, 769}, + dictWord{140, 0, 785}, + dictWord{ + 132, + 11, + 630, + }, + dictWord{7, 11, 33}, + dictWord{7, 11, 120}, + dictWord{8, 11, 489}, + dictWord{9, 11, 319}, + dictWord{10, 11, 820}, + dictWord{11, 11, 1004}, + dictWord{ + 12, + 11, + 379, + }, + dictWord{13, 11, 117}, + dictWord{13, 11, 412}, + dictWord{14, 11, 25}, + dictWord{15, 11, 52}, + dictWord{15, 11, 161}, + dictWord{16, 11, 47}, + dictWord{149, 11, 2}, + dictWord{6, 0, 133}, + dictWord{8, 0, 413}, + dictWord{9, 0, 353}, + dictWord{139, 0, 993}, + dictWord{145, 10, 19}, + dictWord{4, 11, 937}, + dictWord{ + 133, + 11, + 801, + }, + dictWord{134, 0, 978}, + dictWord{6, 0, 93}, + dictWord{6, 0, 1508}, + dictWord{7, 0, 1422}, + dictWord{7, 0, 1851}, + dictWord{8, 0, 673}, + dictWord{9, 0, 529}, + dictWord{140, 0, 43}, + dictWord{6, 0, 317}, + dictWord{10, 0, 512}, + dictWord{4, 10, 737}, + dictWord{11, 10, 294}, + dictWord{12, 10, 60}, + dictWord{12, 10, 437}, + dictWord{13, 10, 64}, + dictWord{13, 10, 380}, + dictWord{142, 10, 430}, + dictWord{9, 0, 371}, + dictWord{7, 11, 1591}, + dictWord{144, 11, 43}, + dictWord{6, 10, 1758}, + dictWord{8, 10, 520}, + dictWord{9, 10, 345}, + dictWord{9, 10, 403}, + dictWord{142, 10, 350}, + dictWord{5, 0, 526}, + dictWord{10, 10, 242}, + dictWord{ + 138, + 10, + 579, + }, + dictWord{9, 0, 25}, + dictWord{10, 0, 467}, + dictWord{138, 0, 559}, + dictWord{5, 10, 139}, + dictWord{7, 10, 1168}, + dictWord{138, 10, 539}, + dictWord{ + 4, + 0, + 335, + }, + dictWord{135, 0, 942}, + dictWord{140, 0, 754}, + dictWord{132, 11, 365}, + dictWord{11, 0, 182}, + dictWord{142, 0, 195}, + dictWord{142, 11, 29}, + dictWord{ + 5, + 11, + 7, + }, + dictWord{139, 11, 774}, + dictWord{4, 11, 746}, + dictWord{135, 11, 1090}, + dictWord{8, 0, 39}, + dictWord{10, 0, 773}, + dictWord{11, 0, 84}, + dictWord{ + 12, + 0, + 205, + }, + dictWord{142, 0, 1}, + dictWord{5, 0, 601}, + dictWord{5, 0, 870}, + dictWord{5, 11, 360}, + dictWord{136, 11, 237}, + dictWord{132, 0, 181}, + dictWord{ + 136, + 0, + 370, + }, + dictWord{134, 0, 1652}, + dictWord{8, 0, 358}, + dictWord{4, 10, 107}, + dictWord{7, 10, 613}, + dictWord{8, 10, 439}, + dictWord{8, 10, 504}, + dictWord{ + 9, + 10, + 501, + }, + dictWord{10, 10, 383}, + dictWord{139, 10, 477}, + dictWord{132, 10, 229}, + dictWord{137, 11, 785}, + dictWord{4, 0, 97}, + dictWord{5, 0, 147}, + dictWord{ + 6, + 0, + 286, + }, + dictWord{7, 0, 1362}, + dictWord{141, 0, 176}, + dictWord{6, 0, 537}, + dictWord{7, 0, 788}, + dictWord{7, 0, 1816}, + dictWord{132, 10, 903}, + dictWord{ + 140, + 10, + 71, + }, + dictWord{6, 0, 743}, + dictWord{134, 0, 1223}, + dictWord{6, 0, 375}, + dictWord{7, 0, 169}, + dictWord{7, 0, 254}, + dictWord{8, 0, 780}, + dictWord{135, 11, 1493}, + dictWord{7, 0, 1714}, + dictWord{4, 10, 47}, + dictWord{6, 10, 373}, + dictWord{7, 10, 452}, + dictWord{7, 10, 543}, + dictWord{7, 10, 1856}, + dictWord{9, 10, 6}, + dictWord{ + 11, + 10, + 257, + }, + dictWord{139, 10, 391}, + dictWord{6, 0, 896}, + dictWord{136, 0, 1003}, + dictWord{135, 0, 1447}, + dictWord{137, 11, 341}, + dictWord{5, 10, 980}, + dictWord{134, 10, 1754}, + dictWord{145, 11, 22}, + dictWord{4, 11, 277}, + dictWord{5, 11, 608}, + dictWord{6, 11, 493}, + dictWord{7, 11, 457}, + dictWord{ + 140, + 11, + 384, + }, + dictWord{7, 10, 536}, + dictWord{7, 10, 1331}, + dictWord{136, 10, 143}, + dictWord{140, 0, 744}, + dictWord{7, 11, 27}, + dictWord{135, 11, 316}, + dictWord{ + 18, + 0, + 126, + }, + dictWord{5, 10, 19}, + dictWord{134, 10, 533}, + dictWord{4, 0, 788}, + dictWord{11, 0, 41}, + dictWord{5, 11, 552}, + dictWord{5, 11, 586}, + dictWord{ + 5, + 11, + 676, + }, + dictWord{6, 11, 448}, + dictWord{8, 11, 244}, + dictWord{11, 11, 1}, + dictWord{11, 11, 41}, + dictWord{13, 11, 3}, + dictWord{16, 11, 54}, + dictWord{17, 11, 4}, + dictWord{146, 11, 13}, + dictWord{4, 0, 985}, + dictWord{6, 0, 1801}, + dictWord{4, 11, 401}, + dictWord{137, 11, 264}, + dictWord{5, 10, 395}, + dictWord{5, 10, 951}, + dictWord{134, 10, 1776}, + dictWord{5, 0, 629}, + dictWord{135, 0, 1549}, + dictWord{11, 10, 663}, + dictWord{12, 10, 210}, + dictWord{13, 10, 166}, + dictWord{ + 13, + 10, + 310, + }, + dictWord{14, 10, 373}, + dictWord{147, 10, 43}, + dictWord{9, 11, 543}, + dictWord{10, 11, 524}, + dictWord{11, 11, 30}, + dictWord{12, 11, 524}, + dictWord{ + 14, + 11, + 315, + }, + dictWord{16, 11, 18}, + dictWord{20, 11, 26}, + dictWord{148, 11, 65}, + dictWord{4, 11, 205}, + dictWord{5, 11, 623}, + dictWord{7, 11, 104}, + dictWord{ + 136, + 11, + 519, + }, + dictWord{5, 0, 293}, + dictWord{134, 0, 601}, + dictWord{7, 11, 579}, + dictWord{9, 11, 41}, + dictWord{9, 11, 244}, + dictWord{9, 11, 669}, + dictWord{ + 10, + 11, + 5, + }, + dictWord{11, 11, 861}, + dictWord{11, 11, 951}, + dictWord{139, 11, 980}, + dictWord{132, 11, 717}, + dictWord{132, 10, 695}, + dictWord{7, 10, 497}, + dictWord{ + 9, + 10, + 387, + }, + dictWord{147, 10, 81}, + dictWord{132, 0, 420}, + dictWord{142, 0, 37}, + dictWord{6, 0, 1134}, + dictWord{6, 0, 1900}, + dictWord{12, 0, 830}, + dictWord{ + 12, + 0, + 878, + }, + dictWord{12, 0, 894}, + dictWord{15, 0, 221}, + dictWord{143, 0, 245}, + dictWord{132, 11, 489}, + dictWord{7, 0, 1570}, + dictWord{140, 0, 542}, + dictWord{ + 8, + 0, + 933, + }, + dictWord{136, 0, 957}, + dictWord{6, 0, 1371}, + dictWord{7, 0, 31}, + dictWord{8, 0, 373}, + dictWord{5, 10, 284}, + dictWord{6, 10, 49}, + dictWord{6, 10, 350}, + dictWord{7, 10, 377}, + dictWord{7, 10, 1693}, + dictWord{8, 10, 678}, + dictWord{9, 10, 161}, + dictWord{9, 10, 585}, + dictWord{9, 10, 671}, + dictWord{9, 10, 839}, + dictWord{11, 10, 912}, + dictWord{141, 10, 427}, + dictWord{135, 11, 892}, + dictWord{4, 0, 325}, + dictWord{138, 0, 125}, + dictWord{139, 11, 47}, + dictWord{ + 132, + 10, + 597, + }, + dictWord{138, 0, 323}, + dictWord{6, 0, 1547}, + dictWord{7, 11, 1605}, + dictWord{9, 11, 473}, + dictWord{11, 11, 962}, + dictWord{146, 11, 139}, + dictWord{ + 139, + 10, + 908, + }, + dictWord{7, 11, 819}, + dictWord{9, 11, 26}, + dictWord{9, 11, 392}, + dictWord{10, 11, 152}, + dictWord{10, 11, 226}, + dictWord{11, 11, 19}, + dictWord{ + 12, + 11, + 276, + }, + dictWord{12, 11, 426}, + dictWord{12, 11, 589}, + dictWord{13, 11, 460}, + dictWord{15, 11, 97}, + dictWord{19, 11, 48}, + dictWord{148, 11, 104}, + dictWord{135, 11, 51}, + dictWord{4, 0, 718}, + dictWord{135, 0, 1216}, + dictWord{6, 0, 1896}, + dictWord{6, 0, 1905}, + dictWord{6, 0, 1912}, + dictWord{9, 0, 947}, + dictWord{ + 9, + 0, + 974, + }, + dictWord{12, 0, 809}, + dictWord{12, 0, 850}, + dictWord{12, 0, 858}, + dictWord{12, 0, 874}, + dictWord{12, 0, 887}, + dictWord{12, 0, 904}, + dictWord{ + 12, + 0, + 929, + }, + dictWord{12, 0, 948}, + dictWord{12, 0, 952}, + dictWord{15, 0, 198}, + dictWord{15, 0, 206}, + dictWord{15, 0, 220}, + dictWord{15, 0, 227}, + dictWord{15, 0, 247}, + dictWord{18, 0, 188}, + dictWord{21, 0, 48}, + dictWord{21, 0, 50}, + dictWord{24, 0, 25}, + dictWord{24, 0, 29}, + dictWord{7, 11, 761}, + dictWord{7, 11, 1051}, + dictWord{ + 137, + 11, + 545, + }, + dictWord{5, 0, 124}, + dictWord{5, 0, 144}, + dictWord{6, 0, 548}, + dictWord{7, 0, 15}, + dictWord{7, 0, 153}, + dictWord{137, 0, 629}, + dictWord{ + 135, + 11, + 606, + }, + dictWord{135, 10, 2014}, + dictWord{7, 10, 2007}, + dictWord{9, 11, 46}, + dictWord{9, 10, 101}, + dictWord{9, 10, 450}, + dictWord{10, 10, 66}, + dictWord{ + 10, + 10, + 842, + }, + dictWord{11, 10, 536}, + dictWord{140, 10, 587}, + dictWord{6, 0, 75}, + dictWord{7, 0, 1531}, + dictWord{8, 0, 416}, + dictWord{9, 0, 240}, + dictWord{9, 0, 275}, + dictWord{10, 0, 100}, + dictWord{11, 0, 658}, + dictWord{11, 0, 979}, + dictWord{12, 0, 86}, + dictWord{14, 0, 207}, + dictWord{15, 0, 20}, + dictWord{143, 0, 25}, + dictWord{ + 5, + 0, + 141, + }, + dictWord{5, 0, 915}, + dictWord{6, 0, 1783}, + dictWord{7, 0, 211}, + dictWord{7, 0, 698}, + dictWord{7, 0, 1353}, + dictWord{9, 0, 83}, + dictWord{9, 0, 281}, + dictWord{ + 10, + 0, + 376, + }, + dictWord{10, 0, 431}, + dictWord{11, 0, 543}, + dictWord{12, 0, 664}, + dictWord{13, 0, 280}, + dictWord{13, 0, 428}, + dictWord{14, 0, 61}, + dictWord{ + 14, + 0, + 128, + }, + dictWord{17, 0, 52}, + dictWord{145, 0, 81}, + dictWord{132, 11, 674}, + dictWord{135, 0, 533}, + dictWord{149, 0, 6}, + dictWord{132, 11, 770}, + dictWord{ + 133, + 0, + 538, + }, + dictWord{5, 11, 79}, + dictWord{7, 11, 1027}, + dictWord{7, 11, 1477}, + dictWord{139, 11, 52}, + dictWord{139, 10, 62}, + dictWord{4, 0, 338}, + dictWord{ + 133, + 0, + 400, + }, + dictWord{5, 11, 789}, + dictWord{134, 11, 195}, + dictWord{4, 11, 251}, + dictWord{4, 11, 688}, + dictWord{7, 11, 513}, + dictWord{7, 11, 1284}, + dictWord{ + 9, + 11, + 87, + }, + dictWord{138, 11, 365}, + dictWord{134, 10, 1766}, + dictWord{6, 0, 0}, + dictWord{7, 0, 84}, + dictWord{11, 0, 895}, + dictWord{145, 0, 11}, + dictWord{ + 139, + 0, + 892, + }, + dictWord{4, 0, 221}, + dictWord{5, 0, 659}, + dictWord{7, 0, 697}, + dictWord{7, 0, 1211}, + dictWord{138, 0, 284}, + dictWord{133, 0, 989}, + dictWord{ + 133, + 11, + 889, + }, + dictWord{4, 11, 160}, + dictWord{5, 11, 330}, + dictWord{7, 11, 1434}, + dictWord{136, 11, 174}, + dictWord{6, 10, 1665}, + dictWord{7, 10, 256}, + dictWord{ + 7, + 10, + 1388, + }, + dictWord{10, 10, 499}, + dictWord{139, 10, 670}, + dictWord{7, 0, 848}, + dictWord{4, 10, 22}, + dictWord{5, 10, 10}, + dictWord{136, 10, 97}, + dictWord{ + 138, + 0, + 507, + }, + dictWord{133, 10, 481}, + dictWord{4, 0, 188}, + dictWord{135, 0, 805}, + dictWord{5, 0, 884}, + dictWord{6, 0, 732}, + dictWord{139, 0, 991}, + dictWord{ + 135, + 11, + 968, + }, + dictWord{11, 11, 636}, + dictWord{15, 11, 145}, + dictWord{17, 11, 34}, + dictWord{19, 11, 50}, + dictWord{151, 11, 20}, + dictWord{7, 0, 959}, + dictWord{ + 16, + 0, + 60, + }, + dictWord{6, 10, 134}, + dictWord{7, 10, 437}, + dictWord{9, 10, 37}, + dictWord{14, 10, 285}, + dictWord{142, 10, 371}, + dictWord{7, 10, 486}, + dictWord{ + 8, + 10, + 155, + }, + dictWord{11, 10, 93}, + dictWord{140, 10, 164}, + dictWord{134, 0, 1653}, + dictWord{7, 0, 337}, + dictWord{133, 10, 591}, + dictWord{6, 0, 1989}, + dictWord{ + 8, + 0, + 922, + }, + dictWord{8, 0, 978}, + dictWord{133, 11, 374}, + dictWord{132, 0, 638}, + dictWord{138, 0, 500}, + dictWord{133, 11, 731}, + dictWord{5, 10, 380}, + dictWord{ + 5, + 10, + 650, + }, + dictWord{136, 10, 310}, + dictWord{138, 11, 381}, + dictWord{4, 10, 364}, + dictWord{7, 10, 1156}, + dictWord{7, 10, 1187}, + dictWord{137, 10, 409}, + dictWord{137, 11, 224}, + dictWord{140, 0, 166}, + dictWord{134, 10, 482}, + dictWord{4, 11, 626}, + dictWord{5, 11, 642}, + dictWord{6, 11, 425}, + dictWord{ + 10, + 11, + 202, + }, + dictWord{139, 11, 141}, + dictWord{4, 10, 781}, + dictWord{6, 10, 487}, + dictWord{7, 10, 926}, + dictWord{8, 10, 263}, + dictWord{139, 10, 500}, + dictWord{ + 135, + 0, + 418, + }, + dictWord{4, 10, 94}, + dictWord{135, 10, 1265}, + dictWord{136, 0, 760}, + dictWord{132, 10, 417}, + dictWord{136, 11, 835}, + dictWord{5, 10, 348}, + dictWord{134, 10, 522}, + dictWord{6, 0, 1277}, + dictWord{134, 0, 1538}, + dictWord{139, 11, 541}, + dictWord{135, 11, 1597}, + dictWord{5, 11, 384}, + dictWord{ + 8, + 11, + 455, + }, + dictWord{140, 11, 48}, + dictWord{136, 0, 770}, + dictWord{5, 11, 264}, + dictWord{134, 11, 184}, + dictWord{4, 0, 89}, + dictWord{5, 0, 489}, + dictWord{ + 6, + 0, + 315, + }, + dictWord{7, 0, 553}, + dictWord{7, 0, 1745}, + dictWord{138, 0, 243}, + dictWord{4, 10, 408}, + dictWord{4, 10, 741}, + dictWord{135, 10, 500}, + dictWord{ + 134, + 0, + 1396, + }, + dictWord{133, 0, 560}, + dictWord{6, 0, 1658}, + dictWord{9, 0, 3}, + dictWord{10, 0, 154}, + dictWord{11, 0, 641}, + dictWord{13, 0, 85}, + dictWord{13, 0, 201}, + dictWord{141, 0, 346}, + dictWord{135, 11, 1595}, + dictWord{5, 11, 633}, + dictWord{6, 11, 28}, + dictWord{7, 11, 219}, + dictWord{135, 11, 1323}, + dictWord{ + 9, + 11, + 769, + }, + dictWord{140, 11, 185}, + dictWord{135, 11, 785}, + dictWord{7, 11, 359}, + dictWord{8, 11, 243}, + dictWord{140, 11, 175}, + dictWord{138, 0, 586}, + dictWord{ + 7, + 0, + 1271, + }, + dictWord{134, 10, 73}, + dictWord{132, 11, 105}, + dictWord{4, 0, 166}, + dictWord{5, 0, 505}, + dictWord{134, 0, 1670}, + dictWord{133, 10, 576}, + dictWord{4, 11, 324}, + dictWord{138, 11, 104}, + dictWord{142, 10, 231}, + dictWord{6, 0, 637}, + dictWord{7, 10, 1264}, + dictWord{7, 10, 1678}, + dictWord{ + 11, + 10, + 945, + }, + dictWord{12, 10, 341}, + dictWord{12, 10, 471}, + dictWord{12, 10, 569}, + dictWord{23, 11, 21}, + dictWord{151, 11, 23}, + dictWord{8, 11, 559}, + dictWord{ + 141, + 11, + 109, + }, + dictWord{134, 0, 1947}, + dictWord{7, 0, 445}, + dictWord{8, 0, 307}, + dictWord{8, 0, 704}, + dictWord{10, 0, 41}, + dictWord{10, 0, 439}, + dictWord{ + 11, + 0, + 237, + }, + dictWord{11, 0, 622}, + dictWord{140, 0, 201}, + dictWord{135, 11, 963}, + dictWord{135, 0, 1977}, + dictWord{4, 0, 189}, + dictWord{5, 0, 713}, + dictWord{ + 136, + 0, + 57, + }, + dictWord{138, 0, 371}, + dictWord{135, 10, 538}, + dictWord{132, 0, 552}, + dictWord{6, 0, 883}, + dictWord{133, 10, 413}, + dictWord{6, 0, 923}, + dictWord{ + 132, + 11, + 758, + }, + dictWord{138, 11, 215}, + dictWord{136, 10, 495}, + dictWord{7, 10, 54}, + dictWord{8, 10, 312}, + dictWord{10, 10, 191}, + dictWord{10, 10, 614}, + dictWord{140, 10, 567}, + dictWord{7, 11, 351}, + dictWord{139, 11, 128}, + dictWord{7, 0, 875}, + dictWord{6, 10, 468}, + dictWord{7, 10, 1478}, + dictWord{8, 10, 530}, + dictWord{142, 10, 290}, + dictWord{135, 0, 1788}, + dictWord{17, 0, 49}, + dictWord{133, 11, 918}, + dictWord{12, 11, 398}, + dictWord{20, 11, 39}, + dictWord{ + 21, + 11, + 11, + }, + dictWord{150, 11, 41}, + dictWord{10, 0, 661}, + dictWord{6, 10, 484}, + dictWord{135, 10, 822}, + dictWord{135, 0, 1945}, + dictWord{134, 0, 794}, + dictWord{ + 137, + 10, + 900, + }, + dictWord{135, 10, 1335}, + dictWord{6, 10, 1724}, + dictWord{135, 10, 2022}, + dictWord{132, 11, 340}, + dictWord{134, 0, 1135}, + dictWord{ + 4, + 0, + 784, + }, + dictWord{133, 0, 745}, + dictWord{5, 0, 84}, + dictWord{134, 0, 163}, + dictWord{133, 0, 410}, + dictWord{4, 0, 976}, + dictWord{5, 11, 985}, + dictWord{7, 11, 509}, + dictWord{7, 11, 529}, + dictWord{145, 11, 96}, + dictWord{132, 10, 474}, + dictWord{134, 0, 703}, + dictWord{135, 11, 1919}, + dictWord{5, 0, 322}, + dictWord{ + 8, + 0, + 186, + }, + dictWord{9, 0, 262}, + dictWord{10, 0, 187}, + dictWord{142, 0, 208}, + dictWord{135, 10, 1504}, + dictWord{133, 0, 227}, + dictWord{9, 0, 560}, + dictWord{ + 13, + 0, + 208, + }, + dictWord{133, 10, 305}, + dictWord{132, 11, 247}, + dictWord{7, 0, 1395}, + dictWord{8, 0, 486}, + dictWord{9, 0, 236}, + dictWord{9, 0, 878}, + dictWord{ + 10, + 0, + 218, + }, + dictWord{11, 0, 95}, + dictWord{19, 0, 17}, + dictWord{147, 0, 31}, + dictWord{7, 0, 2043}, + dictWord{8, 0, 672}, + dictWord{141, 0, 448}, + dictWord{4, 11, 184}, + dictWord{5, 11, 390}, + dictWord{6, 11, 337}, + dictWord{7, 11, 23}, + dictWord{7, 11, 494}, + dictWord{7, 11, 618}, + dictWord{7, 11, 1456}, + dictWord{8, 11, 27}, + dictWord{ + 8, + 11, + 599, + }, + dictWord{10, 11, 153}, + dictWord{139, 11, 710}, + dictWord{135, 0, 466}, + dictWord{135, 10, 1236}, + dictWord{6, 0, 167}, + dictWord{7, 0, 186}, + dictWord{7, 0, 656}, + dictWord{10, 0, 643}, + dictWord{4, 10, 480}, + dictWord{6, 10, 302}, + dictWord{6, 10, 1642}, + dictWord{7, 10, 837}, + dictWord{7, 10, 1547}, + dictWord{ + 7, + 10, + 1657, + }, + dictWord{8, 10, 429}, + dictWord{9, 10, 228}, + dictWord{13, 10, 289}, + dictWord{13, 10, 343}, + dictWord{147, 10, 101}, + dictWord{134, 0, 1428}, + dictWord{134, 0, 1440}, + dictWord{5, 0, 412}, + dictWord{7, 10, 278}, + dictWord{10, 10, 739}, + dictWord{11, 10, 708}, + dictWord{141, 10, 348}, + dictWord{ + 134, + 0, + 1118, + }, + dictWord{136, 0, 562}, + dictWord{148, 11, 46}, + dictWord{9, 0, 316}, + dictWord{139, 0, 256}, + dictWord{134, 0, 1771}, + dictWord{135, 0, 1190}, + dictWord{137, 0, 132}, + dictWord{10, 11, 227}, + dictWord{11, 11, 497}, + dictWord{11, 11, 709}, + dictWord{140, 11, 415}, + dictWord{143, 0, 66}, + dictWord{6, 11, 360}, + dictWord{7, 11, 1664}, + dictWord{136, 11, 478}, + dictWord{144, 10, 28}, + dictWord{4, 0, 317}, + dictWord{135, 0, 1279}, + dictWord{5, 0, 63}, + dictWord{ + 133, + 0, + 509, + }, + dictWord{136, 11, 699}, + dictWord{145, 10, 36}, + dictWord{134, 0, 1475}, + dictWord{11, 11, 343}, + dictWord{142, 11, 127}, + dictWord{132, 11, 739}, + dictWord{132, 0, 288}, + dictWord{135, 11, 1757}, + dictWord{8, 0, 89}, + dictWord{8, 0, 620}, + dictWord{9, 0, 608}, + dictWord{11, 0, 628}, + dictWord{12, 0, 322}, + dictWord{143, 0, 124}, + dictWord{134, 0, 1225}, + dictWord{7, 0, 1189}, + dictWord{4, 11, 67}, + dictWord{5, 11, 422}, + dictWord{6, 10, 363}, + dictWord{7, 11, 1037}, + dictWord{7, 11, 1289}, + dictWord{7, 11, 1555}, + dictWord{7, 10, 1955}, + dictWord{8, 10, 725}, + dictWord{9, 11, 741}, + dictWord{145, 11, 108}, + dictWord{ + 134, + 0, + 1468, + }, + dictWord{6, 0, 689}, + dictWord{134, 0, 1451}, + dictWord{138, 0, 120}, + dictWord{151, 0, 1}, + dictWord{137, 10, 805}, + dictWord{142, 0, 329}, + dictWord{ + 5, + 10, + 813, + }, + dictWord{135, 10, 2046}, + dictWord{135, 0, 226}, + dictWord{138, 11, 96}, + dictWord{7, 0, 1855}, + dictWord{5, 10, 712}, + dictWord{11, 10, 17}, + dictWord{13, 10, 321}, + dictWord{144, 10, 67}, + dictWord{9, 0, 461}, + dictWord{6, 10, 320}, + dictWord{7, 10, 781}, + dictWord{7, 10, 1921}, + dictWord{9, 10, 55}, + dictWord{ + 10, + 10, + 186, + }, + dictWord{10, 10, 273}, + dictWord{10, 10, 664}, + dictWord{10, 10, 801}, + dictWord{11, 10, 996}, + dictWord{11, 10, 997}, + dictWord{13, 10, 157}, + dictWord{142, 10, 170}, + dictWord{8, 11, 203}, + dictWord{8, 10, 271}, + dictWord{11, 11, 823}, + dictWord{11, 11, 846}, + dictWord{12, 11, 482}, + dictWord{ + 13, + 11, + 133, + }, + dictWord{13, 11, 277}, + dictWord{13, 11, 302}, + dictWord{13, 11, 464}, + dictWord{14, 11, 205}, + dictWord{142, 11, 221}, + dictWord{135, 0, 1346}, + dictWord{4, 11, 449}, + dictWord{133, 11, 718}, + dictWord{134, 0, 85}, + dictWord{14, 0, 299}, + dictWord{7, 10, 103}, + dictWord{7, 10, 863}, + dictWord{11, 10, 184}, + dictWord{145, 10, 62}, + dictWord{4, 11, 355}, + dictWord{6, 11, 311}, + dictWord{9, 11, 256}, + dictWord{138, 11, 404}, + dictWord{137, 10, 659}, + dictWord{ + 138, + 11, + 758, + }, + dictWord{133, 11, 827}, + dictWord{5, 11, 64}, + dictWord{140, 11, 581}, + dictWord{134, 0, 1171}, + dictWord{4, 11, 442}, + dictWord{7, 11, 1047}, + dictWord{ + 7, + 11, + 1352, + }, + dictWord{135, 11, 1643}, + dictWord{132, 0, 980}, + dictWord{5, 11, 977}, + dictWord{6, 11, 288}, + dictWord{7, 11, 528}, + dictWord{135, 11, 1065}, + dictWord{5, 0, 279}, + dictWord{6, 0, 235}, + dictWord{7, 0, 468}, + dictWord{8, 0, 446}, + dictWord{9, 0, 637}, + dictWord{10, 0, 717}, + dictWord{11, 0, 738}, + dictWord{ + 140, + 0, + 514, + }, + dictWord{132, 0, 293}, + dictWord{11, 10, 337}, + dictWord{142, 10, 303}, + dictWord{136, 11, 285}, + dictWord{5, 0, 17}, + dictWord{6, 0, 371}, + dictWord{ + 9, + 0, + 528, + }, + dictWord{12, 0, 364}, + dictWord{132, 11, 254}, + dictWord{5, 10, 77}, + dictWord{7, 10, 1455}, + dictWord{10, 10, 843}, + dictWord{147, 10, 73}, + dictWord{ + 150, + 0, + 5, + }, + dictWord{132, 10, 458}, + dictWord{6, 11, 12}, + dictWord{7, 11, 1219}, + dictWord{145, 11, 73}, + dictWord{135, 10, 1420}, + dictWord{6, 10, 109}, + dictWord{138, 10, 382}, + dictWord{135, 11, 125}, + dictWord{6, 10, 330}, + dictWord{7, 10, 1084}, + dictWord{139, 10, 142}, + dictWord{6, 11, 369}, + dictWord{ + 6, + 11, + 502, + }, + dictWord{7, 11, 1036}, + dictWord{8, 11, 348}, + dictWord{9, 11, 452}, + dictWord{10, 11, 26}, + dictWord{11, 11, 224}, + dictWord{11, 11, 387}, + dictWord{ + 11, + 11, + 772, + }, + dictWord{12, 11, 95}, + dictWord{12, 11, 629}, + dictWord{13, 11, 195}, + dictWord{13, 11, 207}, + dictWord{13, 11, 241}, + dictWord{14, 11, 260}, + dictWord{ + 14, + 11, + 270, + }, + dictWord{143, 11, 140}, + dictWord{132, 11, 269}, + dictWord{5, 11, 480}, + dictWord{7, 11, 532}, + dictWord{7, 11, 1197}, + dictWord{7, 11, 1358}, + dictWord{8, 11, 291}, + dictWord{11, 11, 349}, + dictWord{142, 11, 396}, + dictWord{150, 0, 48}, + dictWord{10, 0, 601}, + dictWord{13, 0, 353}, + dictWord{141, 0, 376}, + dictWord{5, 0, 779}, + dictWord{5, 0, 807}, + dictWord{6, 0, 1655}, + dictWord{134, 0, 1676}, + dictWord{142, 11, 223}, + dictWord{4, 0, 196}, + dictWord{5, 0, 558}, + dictWord{133, 0, 949}, + dictWord{148, 11, 15}, + dictWord{135, 11, 1764}, + dictWord{134, 0, 1322}, + dictWord{132, 0, 752}, + dictWord{139, 0, 737}, + dictWord{ + 135, + 11, + 657, + }, + dictWord{136, 11, 533}, + dictWord{135, 0, 412}, + dictWord{4, 0, 227}, + dictWord{5, 0, 159}, + dictWord{5, 0, 409}, + dictWord{7, 0, 80}, + dictWord{8, 0, 556}, + dictWord{10, 0, 479}, + dictWord{12, 0, 418}, + dictWord{14, 0, 50}, + dictWord{14, 0, 123}, + dictWord{14, 0, 192}, + dictWord{14, 0, 249}, + dictWord{14, 0, 295}, + dictWord{143, 0, 27}, + dictWord{7, 0, 1470}, + dictWord{8, 0, 66}, + dictWord{8, 0, 137}, + dictWord{8, 0, 761}, + dictWord{9, 0, 638}, + dictWord{11, 0, 80}, + dictWord{11, 0, 212}, + dictWord{11, 0, 368}, + dictWord{11, 0, 418}, + dictWord{12, 0, 8}, + dictWord{13, 0, 15}, + dictWord{16, 0, 61}, + dictWord{17, 0, 59}, + dictWord{19, 0, 28}, + dictWord{ + 148, + 0, + 84, + }, + dictWord{135, 10, 1985}, + dictWord{4, 11, 211}, + dictWord{4, 11, 332}, + dictWord{5, 11, 335}, + dictWord{6, 11, 238}, + dictWord{7, 11, 269}, + dictWord{ + 7, + 11, + 811, + }, + dictWord{7, 11, 1797}, + dictWord{8, 10, 122}, + dictWord{8, 11, 836}, + dictWord{9, 11, 507}, + dictWord{141, 11, 242}, + dictWord{6, 0, 683}, + dictWord{ + 134, + 0, + 1252, + }, + dictWord{4, 0, 873}, + dictWord{132, 10, 234}, + dictWord{134, 0, 835}, + dictWord{6, 0, 38}, + dictWord{7, 0, 1220}, + dictWord{8, 0, 185}, + dictWord{8, 0, 256}, + dictWord{9, 0, 22}, + dictWord{9, 0, 331}, + dictWord{10, 0, 738}, + dictWord{11, 0, 205}, + dictWord{11, 0, 540}, + dictWord{11, 0, 746}, + dictWord{13, 0, 465}, + dictWord{ + 14, + 0, + 88, + }, + dictWord{142, 0, 194}, + dictWord{138, 0, 986}, + dictWord{5, 11, 1009}, + dictWord{12, 11, 582}, + dictWord{146, 11, 131}, + dictWord{4, 0, 159}, + dictWord{ + 6, + 0, + 115, + }, + dictWord{7, 0, 252}, + dictWord{7, 0, 257}, + dictWord{7, 0, 1928}, + dictWord{8, 0, 69}, + dictWord{9, 0, 384}, + dictWord{10, 0, 91}, + dictWord{10, 0, 615}, + dictWord{ + 12, + 0, + 375, + }, + dictWord{14, 0, 235}, + dictWord{18, 0, 117}, + dictWord{147, 0, 123}, + dictWord{133, 0, 911}, + dictWord{136, 0, 278}, + dictWord{5, 10, 430}, + dictWord{ + 5, + 10, + 932, + }, + dictWord{6, 10, 131}, + dictWord{7, 10, 417}, + dictWord{9, 10, 522}, + dictWord{11, 10, 314}, + dictWord{141, 10, 390}, + dictWord{14, 10, 149}, + dictWord{14, 10, 399}, + dictWord{143, 10, 57}, + dictWord{4, 0, 151}, + dictWord{7, 0, 1567}, + dictWord{136, 0, 749}, + dictWord{5, 11, 228}, + dictWord{6, 11, 203}, + dictWord{ + 7, + 11, + 156, + }, + dictWord{8, 11, 347}, + dictWord{137, 11, 265}, + dictWord{132, 10, 507}, + dictWord{10, 0, 989}, + dictWord{140, 0, 956}, + dictWord{133, 0, 990}, + dictWord{5, 0, 194}, + dictWord{6, 0, 927}, + dictWord{7, 0, 1662}, + dictWord{9, 0, 90}, + dictWord{140, 0, 564}, + dictWord{4, 10, 343}, + dictWord{133, 10, 511}, + dictWord{133, 0, 425}, + dictWord{7, 10, 455}, + dictWord{138, 10, 591}, + dictWord{4, 0, 774}, + dictWord{7, 11, 476}, + dictWord{7, 11, 1592}, + dictWord{138, 11, 87}, + dictWord{5, 0, 971}, + dictWord{135, 10, 1381}, + dictWord{5, 11, 318}, + dictWord{147, 11, 121}, + dictWord{5, 11, 291}, + dictWord{7, 11, 765}, + dictWord{9, 11, 389}, + dictWord{140, 11, 548}, + dictWord{134, 10, 575}, + dictWord{4, 0, 827}, + dictWord{12, 0, 646}, + dictWord{12, 0, 705}, + dictWord{12, 0, 712}, + dictWord{140, 0, 714}, + dictWord{139, 0, 752}, + dictWord{137, 0, 662}, + dictWord{5, 0, 72}, + dictWord{6, 0, 264}, + dictWord{7, 0, 21}, + dictWord{7, 0, 46}, + dictWord{7, 0, 2013}, + dictWord{ + 8, + 0, + 215, + }, + dictWord{8, 0, 513}, + dictWord{10, 0, 266}, + dictWord{139, 0, 22}, + dictWord{139, 11, 522}, + dictWord{6, 0, 239}, + dictWord{7, 0, 118}, + dictWord{10, 0, 95}, + dictWord{11, 0, 603}, + dictWord{13, 0, 443}, + dictWord{14, 0, 160}, + dictWord{143, 0, 4}, + dictWord{6, 0, 431}, + dictWord{134, 0, 669}, + dictWord{7, 10, 1127}, + dictWord{ + 7, + 10, + 1572, + }, + dictWord{10, 10, 297}, + dictWord{10, 10, 422}, + dictWord{11, 10, 764}, + dictWord{11, 10, 810}, + dictWord{12, 10, 264}, + dictWord{13, 10, 102}, + dictWord{13, 10, 300}, + dictWord{13, 10, 484}, + dictWord{14, 10, 147}, + dictWord{14, 10, 229}, + dictWord{17, 10, 71}, + dictWord{18, 10, 118}, + dictWord{ + 147, + 10, + 120, + }, + dictWord{5, 0, 874}, + dictWord{6, 0, 1677}, + dictWord{15, 0, 0}, + dictWord{10, 11, 525}, + dictWord{139, 11, 82}, + dictWord{6, 0, 65}, + dictWord{7, 0, 939}, + dictWord{ + 7, + 0, + 1172, + }, + dictWord{7, 0, 1671}, + dictWord{9, 0, 540}, + dictWord{10, 0, 696}, + dictWord{11, 0, 265}, + dictWord{11, 0, 732}, + dictWord{11, 0, 928}, + dictWord{ + 11, + 0, + 937, + }, + dictWord{141, 0, 438}, + dictWord{134, 0, 1350}, + dictWord{136, 11, 547}, + dictWord{132, 11, 422}, + dictWord{5, 11, 355}, + dictWord{145, 11, 0}, + dictWord{137, 11, 905}, + dictWord{5, 0, 682}, + dictWord{135, 0, 1887}, + dictWord{132, 0, 809}, + dictWord{4, 0, 696}, + dictWord{133, 11, 865}, + dictWord{6, 0, 1074}, + dictWord{6, 0, 1472}, + dictWord{14, 10, 35}, + dictWord{142, 10, 191}, + dictWord{5, 11, 914}, + dictWord{134, 11, 1625}, + dictWord{133, 11, 234}, + dictWord{ + 135, + 11, + 1383, + }, + dictWord{137, 11, 780}, + dictWord{132, 10, 125}, + dictWord{4, 0, 726}, + dictWord{133, 0, 630}, + dictWord{8, 0, 802}, + dictWord{136, 0, 838}, + dictWord{132, 10, 721}, + dictWord{6, 0, 1337}, + dictWord{7, 0, 776}, + dictWord{19, 0, 56}, + dictWord{136, 10, 145}, + dictWord{132, 0, 970}, + dictWord{7, 10, 792}, + dictWord{8, 10, 147}, + dictWord{10, 10, 821}, + dictWord{139, 10, 1021}, + dictWord{139, 10, 970}, + dictWord{8, 0, 940}, + dictWord{137, 0, 797}, + dictWord{ + 135, + 11, + 1312, + }, + dictWord{9, 0, 248}, + dictWord{10, 0, 400}, + dictWord{7, 11, 816}, + dictWord{7, 11, 1241}, + dictWord{7, 10, 1999}, + dictWord{9, 11, 283}, + dictWord{ + 9, + 11, + 520, + }, + dictWord{10, 11, 213}, + dictWord{10, 11, 307}, + dictWord{10, 11, 463}, + dictWord{10, 11, 671}, + dictWord{10, 11, 746}, + dictWord{11, 11, 401}, + dictWord{ + 11, + 11, + 794, + }, + dictWord{12, 11, 517}, + dictWord{18, 11, 107}, + dictWord{147, 11, 115}, + dictWord{6, 0, 1951}, + dictWord{134, 0, 2040}, + dictWord{ + 135, + 11, + 339, + }, + dictWord{13, 0, 41}, + dictWord{15, 0, 93}, + dictWord{5, 10, 168}, + dictWord{5, 10, 930}, + dictWord{8, 10, 74}, + dictWord{9, 10, 623}, + dictWord{12, 10, 500}, + dictWord{140, 10, 579}, + dictWord{6, 0, 118}, + dictWord{7, 0, 215}, + dictWord{7, 0, 1521}, + dictWord{140, 0, 11}, + dictWord{6, 10, 220}, + dictWord{7, 10, 1101}, + dictWord{141, 10, 105}, + dictWord{6, 11, 421}, + dictWord{7, 11, 61}, + dictWord{7, 11, 1540}, + dictWord{10, 11, 11}, + dictWord{138, 11, 501}, + dictWord{7, 0, 615}, + dictWord{138, 0, 251}, + dictWord{140, 11, 631}, + dictWord{135, 0, 1044}, + dictWord{6, 10, 19}, + dictWord{7, 10, 1413}, + dictWord{139, 10, 428}, + dictWord{ + 133, + 0, + 225, + }, + dictWord{7, 10, 96}, + dictWord{8, 10, 401}, + dictWord{8, 10, 703}, + dictWord{137, 10, 896}, + dictWord{145, 10, 116}, + dictWord{6, 11, 102}, + dictWord{ + 7, + 11, + 72, + }, + dictWord{15, 11, 142}, + dictWord{147, 11, 67}, + dictWord{7, 10, 1961}, + dictWord{7, 10, 1965}, + dictWord{8, 10, 702}, + dictWord{136, 10, 750}, + dictWord{ + 7, + 10, + 2030, + }, + dictWord{8, 10, 150}, + dictWord{8, 10, 737}, + dictWord{12, 10, 366}, + dictWord{151, 11, 30}, + dictWord{4, 0, 370}, + dictWord{5, 0, 756}, + dictWord{ + 7, + 0, + 1326, + }, + dictWord{135, 11, 823}, + dictWord{8, 10, 800}, + dictWord{9, 10, 148}, + dictWord{9, 10, 872}, + dictWord{9, 10, 890}, + dictWord{11, 10, 309}, + dictWord{ + 11, + 10, + 1001, + }, + dictWord{13, 10, 267}, + dictWord{141, 10, 323}, + dictWord{6, 0, 1662}, + dictWord{7, 0, 48}, + dictWord{8, 0, 771}, + dictWord{10, 0, 116}, + dictWord{ + 13, + 0, + 104, + }, + dictWord{14, 0, 105}, + dictWord{14, 0, 184}, + dictWord{15, 0, 168}, + dictWord{19, 0, 92}, + dictWord{148, 0, 68}, + dictWord{10, 0, 209}, + dictWord{ + 135, + 11, + 1870, + }, + dictWord{7, 11, 68}, + dictWord{8, 11, 48}, + dictWord{8, 11, 88}, + dictWord{8, 11, 582}, + dictWord{8, 11, 681}, + dictWord{9, 11, 373}, + dictWord{9, 11, 864}, + dictWord{11, 11, 157}, + dictWord{11, 11, 336}, + dictWord{11, 11, 843}, + dictWord{148, 11, 27}, + dictWord{134, 0, 930}, + dictWord{4, 11, 88}, + dictWord{5, 11, 137}, + dictWord{5, 11, 174}, + dictWord{5, 11, 777}, + dictWord{6, 11, 1664}, + dictWord{6, 11, 1725}, + dictWord{7, 11, 77}, + dictWord{7, 11, 426}, + dictWord{7, 11, 1317}, + dictWord{7, 11, 1355}, + dictWord{8, 11, 126}, + dictWord{8, 11, 563}, + dictWord{9, 11, 523}, + dictWord{9, 11, 750}, + dictWord{10, 11, 310}, + dictWord{10, 11, 836}, + dictWord{11, 11, 42}, + dictWord{11, 11, 318}, + dictWord{11, 11, 731}, + dictWord{12, 11, 68}, + dictWord{12, 11, 92}, + dictWord{12, 11, 507}, + dictWord{12, 11, 692}, + dictWord{13, 11, 81}, + dictWord{13, 11, 238}, + dictWord{13, 11, 374}, + dictWord{18, 11, 138}, + dictWord{19, 11, 78}, + dictWord{19, 11, 111}, + dictWord{20, 11, 55}, + dictWord{20, 11, 77}, + dictWord{148, 11, 92}, + dictWord{4, 11, 938}, + dictWord{135, 11, 1831}, + dictWord{5, 10, 547}, + dictWord{7, 10, 424}, + dictWord{ + 8, + 11, + 617, + }, + dictWord{138, 11, 351}, + dictWord{6, 0, 1286}, + dictWord{6, 11, 1668}, + dictWord{7, 11, 1499}, + dictWord{8, 11, 117}, + dictWord{9, 11, 314}, + dictWord{ + 138, + 11, + 174, + }, + dictWord{6, 0, 759}, + dictWord{6, 0, 894}, + dictWord{7, 11, 707}, + dictWord{139, 11, 563}, + dictWord{4, 0, 120}, + dictWord{135, 0, 1894}, + dictWord{ + 9, + 0, + 385, + }, + dictWord{149, 0, 17}, + dictWord{138, 0, 429}, + dictWord{133, 11, 403}, + dictWord{5, 0, 820}, + dictWord{135, 0, 931}, + dictWord{10, 0, 199}, + dictWord{ + 133, + 10, + 133, + }, + dictWord{6, 0, 151}, + dictWord{6, 0, 1675}, + dictWord{7, 0, 383}, + dictWord{151, 0, 10}, + dictWord{6, 0, 761}, + dictWord{136, 10, 187}, + dictWord{ + 8, + 0, + 365, + }, + dictWord{10, 10, 0}, + dictWord{10, 10, 818}, + dictWord{139, 10, 988}, + dictWord{4, 11, 44}, + dictWord{5, 11, 311}, + dictWord{6, 11, 156}, + dictWord{ + 7, + 11, + 639, + }, + dictWord{7, 11, 762}, + dictWord{7, 11, 1827}, + dictWord{9, 11, 8}, + dictWord{9, 11, 462}, + dictWord{148, 11, 83}, + dictWord{4, 11, 346}, + dictWord{7, 11, 115}, + dictWord{9, 11, 180}, + dictWord{9, 11, 456}, + dictWord{138, 11, 363}, + dictWord{136, 10, 685}, + dictWord{7, 0, 1086}, + dictWord{145, 0, 46}, + dictWord{ + 6, + 0, + 1624, + }, + dictWord{11, 0, 11}, + dictWord{12, 0, 422}, + dictWord{13, 0, 444}, + dictWord{142, 0, 360}, + dictWord{6, 0, 1020}, + dictWord{6, 0, 1260}, + dictWord{ + 134, + 0, + 1589, + }, + dictWord{4, 0, 43}, + dictWord{5, 0, 344}, + dictWord{5, 0, 357}, + dictWord{14, 0, 472}, + dictWord{150, 0, 58}, + dictWord{6, 0, 1864}, + dictWord{6, 0, 1866}, + dictWord{6, 0, 1868}, + dictWord{6, 0, 1869}, + dictWord{6, 0, 1874}, + dictWord{6, 0, 1877}, + dictWord{6, 0, 1903}, + dictWord{6, 0, 1911}, + dictWord{9, 0, 920}, + dictWord{ + 9, + 0, + 921, + }, + dictWord{9, 0, 924}, + dictWord{9, 0, 946}, + dictWord{9, 0, 959}, + dictWord{9, 0, 963}, + dictWord{9, 0, 970}, + dictWord{9, 0, 997}, + dictWord{9, 0, 1008}, + dictWord{ + 9, + 0, + 1017, + }, + dictWord{12, 0, 795}, + dictWord{12, 0, 797}, + dictWord{12, 0, 798}, + dictWord{12, 0, 800}, + dictWord{12, 0, 803}, + dictWord{12, 0, 811}, + dictWord{ + 12, + 0, + 820, + }, + dictWord{12, 0, 821}, + dictWord{12, 0, 839}, + dictWord{12, 0, 841}, + dictWord{12, 0, 848}, + dictWord{12, 0, 911}, + dictWord{12, 0, 921}, + dictWord{12, 0, 922}, + dictWord{12, 0, 925}, + dictWord{12, 0, 937}, + dictWord{12, 0, 944}, + dictWord{12, 0, 945}, + dictWord{12, 0, 953}, + dictWord{15, 0, 184}, + dictWord{15, 0, 191}, + dictWord{15, 0, 199}, + dictWord{15, 0, 237}, + dictWord{15, 0, 240}, + dictWord{15, 0, 243}, + dictWord{15, 0, 246}, + dictWord{18, 0, 203}, + dictWord{21, 0, 40}, + dictWord{ + 21, + 0, + 52, + }, + dictWord{21, 0, 57}, + dictWord{24, 0, 23}, + dictWord{24, 0, 28}, + dictWord{152, 0, 30}, + dictWord{134, 0, 725}, + dictWord{145, 11, 58}, + dictWord{133, 0, 888}, + dictWord{137, 10, 874}, + dictWord{4, 0, 711}, + dictWord{8, 10, 774}, + dictWord{10, 10, 670}, + dictWord{140, 10, 51}, + dictWord{144, 11, 40}, + dictWord{ + 6, + 11, + 185, + }, + dictWord{7, 11, 1899}, + dictWord{139, 11, 673}, + dictWord{137, 10, 701}, + dictWord{137, 0, 440}, + dictWord{4, 11, 327}, + dictWord{5, 11, 478}, + dictWord{ + 7, + 11, + 1332, + }, + dictWord{8, 11, 753}, + dictWord{140, 11, 227}, + dictWord{4, 10, 127}, + dictWord{5, 10, 350}, + dictWord{6, 10, 356}, + dictWord{8, 10, 426}, + dictWord{ + 9, + 10, + 572, + }, + dictWord{10, 10, 247}, + dictWord{139, 10, 312}, + dictWord{5, 11, 1020}, + dictWord{133, 11, 1022}, + dictWord{4, 11, 103}, + dictWord{ + 133, + 11, + 401, + }, + dictWord{6, 0, 1913}, + dictWord{6, 0, 1926}, + dictWord{6, 0, 1959}, + dictWord{9, 0, 914}, + dictWord{9, 0, 939}, + dictWord{9, 0, 952}, + dictWord{9, 0, 979}, + dictWord{ + 9, + 0, + 990, + }, + dictWord{9, 0, 998}, + dictWord{9, 0, 1003}, + dictWord{9, 0, 1023}, + dictWord{12, 0, 827}, + dictWord{12, 0, 834}, + dictWord{12, 0, 845}, + dictWord{ + 12, + 0, + 912, + }, + dictWord{12, 0, 935}, + dictWord{12, 0, 951}, + dictWord{15, 0, 172}, + dictWord{15, 0, 174}, + dictWord{18, 0, 198}, + dictWord{149, 0, 63}, + dictWord{5, 0, 958}, + dictWord{5, 0, 987}, + dictWord{4, 11, 499}, + dictWord{135, 11, 1421}, + dictWord{7, 0, 885}, + dictWord{6, 10, 59}, + dictWord{6, 10, 1762}, + dictWord{9, 10, 603}, + dictWord{141, 10, 397}, + dictWord{10, 11, 62}, + dictWord{141, 11, 164}, + dictWord{4, 0, 847}, + dictWord{135, 0, 326}, + dictWord{11, 0, 276}, + dictWord{142, 0, 293}, + dictWord{4, 0, 65}, + dictWord{5, 0, 479}, + dictWord{5, 0, 1004}, + dictWord{7, 0, 1913}, + dictWord{8, 0, 317}, + dictWord{9, 0, 302}, + dictWord{10, 0, 612}, + dictWord{ + 13, + 0, + 22, + }, + dictWord{132, 11, 96}, + dictWord{4, 0, 261}, + dictWord{135, 0, 510}, + dictWord{135, 0, 1514}, + dictWord{6, 10, 111}, + dictWord{7, 10, 4}, + dictWord{8, 10, 163}, + dictWord{8, 10, 776}, + dictWord{138, 10, 566}, + dictWord{4, 0, 291}, + dictWord{9, 0, 515}, + dictWord{12, 0, 152}, + dictWord{12, 0, 443}, + dictWord{13, 0, 392}, + dictWord{142, 0, 357}, + dictWord{7, 11, 399}, + dictWord{135, 11, 1492}, + dictWord{4, 0, 589}, + dictWord{139, 0, 282}, + dictWord{6, 11, 563}, + dictWord{ + 135, + 10, + 1994, + }, + dictWord{5, 10, 297}, + dictWord{135, 10, 1038}, + dictWord{4, 0, 130}, + dictWord{7, 0, 843}, + dictWord{135, 0, 1562}, + dictWord{5, 0, 42}, + dictWord{ + 5, + 0, + 879, + }, + dictWord{7, 0, 245}, + dictWord{7, 0, 324}, + dictWord{7, 0, 1532}, + dictWord{11, 0, 463}, + dictWord{11, 0, 472}, + dictWord{13, 0, 363}, + dictWord{144, 0, 52}, + dictWord{4, 0, 134}, + dictWord{133, 0, 372}, + dictWord{133, 0, 680}, + dictWord{136, 10, 363}, + dictWord{6, 0, 1997}, + dictWord{8, 0, 935}, + dictWord{136, 0, 977}, + dictWord{4, 0, 810}, + dictWord{135, 0, 1634}, + dictWord{135, 10, 1675}, + dictWord{7, 0, 1390}, + dictWord{4, 11, 910}, + dictWord{133, 11, 832}, + dictWord{ + 7, + 10, + 808, + }, + dictWord{8, 11, 266}, + dictWord{139, 11, 578}, + dictWord{132, 0, 644}, + dictWord{4, 0, 982}, + dictWord{138, 0, 867}, + dictWord{132, 10, 280}, + dictWord{ + 135, + 0, + 540, + }, + dictWord{140, 10, 54}, + dictWord{135, 0, 123}, + dictWord{134, 0, 1978}, + dictWord{4, 10, 421}, + dictWord{133, 10, 548}, + dictWord{6, 0, 623}, + dictWord{136, 0, 789}, + dictWord{4, 0, 908}, + dictWord{5, 0, 359}, + dictWord{5, 0, 508}, + dictWord{6, 0, 1723}, + dictWord{7, 0, 343}, + dictWord{7, 0, 1996}, + dictWord{ + 135, + 0, + 2026, + }, + dictWord{134, 0, 1220}, + dictWord{4, 0, 341}, + dictWord{135, 0, 480}, + dictWord{6, 10, 254}, + dictWord{9, 10, 109}, + dictWord{138, 10, 103}, + dictWord{ + 134, + 0, + 888, + }, + dictWord{8, 11, 528}, + dictWord{137, 11, 348}, + dictWord{7, 0, 1995}, + dictWord{8, 0, 299}, + dictWord{11, 0, 890}, + dictWord{12, 0, 674}, + dictWord{ + 4, + 11, + 20, + }, + dictWord{133, 11, 616}, + dictWord{135, 11, 1094}, + dictWord{134, 10, 1630}, + dictWord{4, 0, 238}, + dictWord{5, 0, 503}, + dictWord{6, 0, 179}, + dictWord{ + 7, + 0, + 2003, + }, + dictWord{8, 0, 381}, + dictWord{8, 0, 473}, + dictWord{9, 0, 149}, + dictWord{10, 0, 788}, + dictWord{15, 0, 45}, + dictWord{15, 0, 86}, + dictWord{20, 0, 110}, + dictWord{150, 0, 57}, + dictWord{133, 10, 671}, + dictWord{4, 11, 26}, + dictWord{5, 11, 429}, + dictWord{6, 11, 245}, + dictWord{7, 11, 704}, + dictWord{7, 11, 1379}, + dictWord{135, 11, 1474}, + dictWord{4, 0, 121}, + dictWord{5, 0, 156}, + dictWord{5, 0, 349}, + dictWord{9, 0, 431}, + dictWord{10, 0, 605}, + dictWord{142, 0, 342}, + dictWord{ + 7, + 11, + 943, + }, + dictWord{139, 11, 614}, + dictWord{132, 10, 889}, + dictWord{132, 11, 621}, + dictWord{7, 10, 1382}, + dictWord{7, 11, 1382}, + dictWord{ + 135, + 10, + 1910, + }, + dictWord{132, 10, 627}, + dictWord{133, 10, 775}, + dictWord{133, 11, 542}, + dictWord{133, 11, 868}, + dictWord{136, 11, 433}, + dictWord{6, 0, 1373}, + dictWord{7, 0, 1011}, + dictWord{11, 10, 362}, + dictWord{11, 10, 948}, + dictWord{140, 10, 388}, + dictWord{6, 0, 80}, + dictWord{7, 0, 173}, + dictWord{9, 0, 547}, + dictWord{10, 0, 730}, + dictWord{14, 0, 18}, + dictWord{22, 0, 39}, + dictWord{135, 11, 1495}, + dictWord{6, 0, 1694}, + dictWord{135, 0, 1974}, + dictWord{140, 0, 196}, + dictWord{4, 0, 923}, + dictWord{6, 0, 507}, + dictWord{6, 0, 1711}, + dictWord{7, 10, 451}, + dictWord{8, 10, 389}, + dictWord{12, 10, 490}, + dictWord{13, 10, 16}, + dictWord{ + 13, + 10, + 215, + }, + dictWord{13, 10, 351}, + dictWord{18, 10, 132}, + dictWord{147, 10, 125}, + dictWord{6, 0, 646}, + dictWord{134, 0, 1047}, + dictWord{135, 10, 841}, + dictWord{136, 10, 566}, + dictWord{6, 0, 1611}, + dictWord{135, 0, 1214}, + dictWord{139, 0, 926}, + dictWord{132, 11, 525}, + dictWord{132, 0, 595}, + dictWord{ + 5, + 0, + 240, + }, + dictWord{6, 0, 459}, + dictWord{7, 0, 12}, + dictWord{7, 0, 114}, + dictWord{7, 0, 949}, + dictWord{7, 0, 1753}, + dictWord{7, 0, 1805}, + dictWord{8, 0, 658}, + dictWord{ + 9, + 0, + 1, + }, + dictWord{11, 0, 959}, + dictWord{141, 0, 446}, + dictWord{5, 10, 912}, + dictWord{134, 10, 1695}, + dictWord{132, 0, 446}, + dictWord{7, 11, 62}, + dictWord{ + 12, + 11, + 45, + }, + dictWord{147, 11, 112}, + dictWord{5, 10, 236}, + dictWord{6, 10, 572}, + dictWord{8, 10, 492}, + dictWord{11, 10, 618}, + dictWord{144, 10, 56}, + dictWord{ + 5, + 10, + 190, + }, + dictWord{136, 10, 318}, + dictWord{135, 10, 1376}, + dictWord{4, 11, 223}, + dictWord{6, 11, 359}, + dictWord{11, 11, 3}, + dictWord{13, 11, 108}, + dictWord{ + 14, + 11, + 89, + }, + dictWord{144, 11, 22}, + dictWord{132, 11, 647}, + dictWord{134, 0, 490}, + dictWord{134, 0, 491}, + dictWord{134, 0, 1584}, + dictWord{ + 135, + 11, + 685, + }, + dictWord{138, 11, 220}, + dictWord{7, 0, 250}, + dictWord{136, 0, 507}, + dictWord{132, 0, 158}, + dictWord{4, 0, 140}, + dictWord{7, 0, 362}, + dictWord{8, 0, 209}, + dictWord{9, 0, 10}, + dictWord{9, 0, 160}, + dictWord{9, 0, 503}, + dictWord{9, 0, 614}, + dictWord{10, 0, 689}, + dictWord{11, 0, 327}, + dictWord{11, 0, 553}, + dictWord{ + 11, + 0, + 725, + }, + dictWord{11, 0, 767}, + dictWord{12, 0, 252}, + dictWord{12, 0, 583}, + dictWord{13, 0, 192}, + dictWord{14, 0, 269}, + dictWord{14, 0, 356}, + dictWord{148, 0, 50}, + dictWord{19, 0, 1}, + dictWord{19, 0, 26}, + dictWord{150, 0, 9}, + dictWord{132, 11, 109}, + dictWord{6, 0, 228}, + dictWord{7, 0, 1341}, + dictWord{9, 0, 408}, + dictWord{ + 138, + 0, + 343, + }, + dictWord{4, 0, 373}, + dictWord{5, 0, 283}, + dictWord{6, 0, 480}, + dictWord{7, 0, 609}, + dictWord{10, 0, 860}, + dictWord{138, 0, 878}, + dictWord{6, 0, 779}, + dictWord{134, 0, 1209}, + dictWord{4, 0, 557}, + dictWord{7, 11, 263}, + dictWord{7, 11, 628}, + dictWord{136, 11, 349}, + dictWord{132, 0, 548}, + dictWord{7, 0, 197}, + dictWord{8, 0, 142}, + dictWord{8, 0, 325}, + dictWord{9, 0, 150}, + dictWord{9, 0, 596}, + dictWord{10, 0, 350}, + dictWord{10, 0, 353}, + dictWord{11, 0, 74}, + dictWord{ + 11, + 0, + 315, + }, + dictWord{12, 0, 662}, + dictWord{12, 0, 681}, + dictWord{14, 0, 423}, + dictWord{143, 0, 141}, + dictWord{4, 11, 40}, + dictWord{10, 11, 67}, + dictWord{ + 11, + 11, + 117, + }, + dictWord{11, 11, 768}, + dictWord{139, 11, 935}, + dictWord{7, 11, 992}, + dictWord{8, 11, 301}, + dictWord{9, 11, 722}, + dictWord{12, 11, 63}, + dictWord{ + 13, + 11, + 29, + }, + dictWord{14, 11, 161}, + dictWord{143, 11, 18}, + dictWord{6, 0, 1490}, + dictWord{138, 11, 532}, + dictWord{5, 0, 580}, + dictWord{7, 0, 378}, + dictWord{ + 7, + 0, + 674, + }, + dictWord{7, 0, 1424}, + dictWord{15, 0, 83}, + dictWord{16, 0, 11}, + dictWord{15, 11, 83}, + dictWord{144, 11, 11}, + dictWord{6, 0, 1057}, + dictWord{6, 0, 1335}, + dictWord{10, 0, 316}, + dictWord{7, 10, 85}, + dictWord{7, 10, 247}, + dictWord{8, 10, 585}, + dictWord{138, 10, 163}, + dictWord{4, 0, 169}, + dictWord{5, 0, 83}, + dictWord{ + 6, + 0, + 399, + }, + dictWord{6, 0, 579}, + dictWord{6, 0, 1513}, + dictWord{7, 0, 692}, + dictWord{7, 0, 846}, + dictWord{7, 0, 1015}, + dictWord{7, 0, 1799}, + dictWord{8, 0, 403}, + dictWord{9, 0, 394}, + dictWord{10, 0, 133}, + dictWord{12, 0, 4}, + dictWord{12, 0, 297}, + dictWord{12, 0, 452}, + dictWord{16, 0, 81}, + dictWord{18, 0, 25}, + dictWord{21, 0, 14}, + dictWord{22, 0, 12}, + dictWord{151, 0, 18}, + dictWord{134, 0, 1106}, + dictWord{7, 0, 1546}, + dictWord{11, 0, 299}, + dictWord{142, 0, 407}, + dictWord{134, 0, 1192}, + dictWord{132, 0, 177}, + dictWord{5, 0, 411}, + dictWord{135, 0, 653}, + dictWord{7, 0, 439}, + dictWord{10, 0, 727}, + dictWord{11, 0, 260}, + dictWord{139, 0, 684}, + dictWord{138, 10, 145}, + dictWord{147, 10, 83}, + dictWord{5, 0, 208}, + dictWord{7, 0, 753}, + dictWord{135, 0, 1528}, + dictWord{137, 11, 617}, + dictWord{ + 135, + 10, + 1922, + }, + dictWord{135, 11, 825}, + dictWord{11, 0, 422}, + dictWord{13, 0, 389}, + dictWord{4, 10, 124}, + dictWord{10, 10, 457}, + dictWord{11, 10, 121}, + dictWord{ + 11, + 10, + 169, + }, + dictWord{11, 10, 870}, + dictWord{12, 10, 214}, + dictWord{14, 10, 187}, + dictWord{143, 10, 77}, + dictWord{11, 0, 615}, + dictWord{15, 0, 58}, + dictWord{ + 11, + 11, + 615, + }, + dictWord{143, 11, 58}, + dictWord{9, 0, 618}, + dictWord{138, 0, 482}, + dictWord{6, 0, 1952}, + dictWord{6, 0, 1970}, + dictWord{142, 0, 505}, + dictWord{ + 7, + 10, + 1193, + }, + dictWord{135, 11, 1838}, + dictWord{133, 0, 242}, + dictWord{135, 10, 1333}, + dictWord{6, 10, 107}, + dictWord{7, 10, 638}, + dictWord{ + 7, + 10, + 1632, + }, + dictWord{137, 10, 396}, + dictWord{133, 0, 953}, + dictWord{5, 10, 370}, + dictWord{134, 10, 1756}, + dictWord{5, 11, 28}, + dictWord{6, 11, 204}, + dictWord{ + 10, + 11, + 320, + }, + dictWord{10, 11, 583}, + dictWord{13, 11, 502}, + dictWord{14, 11, 72}, + dictWord{14, 11, 274}, + dictWord{14, 11, 312}, + dictWord{14, 11, 344}, + dictWord{15, 11, 159}, + dictWord{16, 11, 62}, + dictWord{16, 11, 69}, + dictWord{17, 11, 30}, + dictWord{18, 11, 42}, + dictWord{18, 11, 53}, + dictWord{18, 11, 84}, + dictWord{18, 11, 140}, + dictWord{19, 11, 68}, + dictWord{19, 11, 85}, + dictWord{20, 11, 5}, + dictWord{20, 11, 45}, + dictWord{20, 11, 101}, + dictWord{22, 11, 7}, + dictWord{ + 150, + 11, + 20, + }, + dictWord{4, 11, 558}, + dictWord{6, 11, 390}, + dictWord{7, 11, 162}, + dictWord{7, 11, 689}, + dictWord{9, 11, 360}, + dictWord{138, 11, 653}, + dictWord{ + 11, + 0, + 802, + }, + dictWord{141, 0, 67}, + dictWord{133, 10, 204}, + dictWord{133, 0, 290}, + dictWord{5, 10, 970}, + dictWord{134, 10, 1706}, + dictWord{132, 0, 380}, + dictWord{5, 0, 52}, + dictWord{7, 0, 277}, + dictWord{9, 0, 368}, + dictWord{139, 0, 791}, + dictWord{5, 11, 856}, + dictWord{6, 11, 1672}, + dictWord{6, 11, 1757}, + dictWord{ + 6, + 11, + 1781, + }, + dictWord{7, 11, 1150}, + dictWord{7, 11, 1425}, + dictWord{7, 11, 1453}, + dictWord{140, 11, 513}, + dictWord{5, 11, 92}, + dictWord{7, 10, 3}, + dictWord{ + 10, + 11, + 736, + }, + dictWord{140, 11, 102}, + dictWord{4, 0, 112}, + dictWord{5, 0, 653}, + dictWord{5, 10, 483}, + dictWord{5, 10, 685}, + dictWord{6, 10, 489}, + dictWord{ + 7, + 10, + 1204, + }, + dictWord{136, 10, 394}, + dictWord{132, 10, 921}, + dictWord{6, 0, 1028}, + dictWord{133, 10, 1007}, + dictWord{5, 11, 590}, + dictWord{9, 11, 213}, + dictWord{145, 11, 91}, + dictWord{135, 10, 1696}, + dictWord{10, 0, 138}, + dictWord{139, 0, 476}, + dictWord{5, 0, 725}, + dictWord{5, 0, 727}, + dictWord{135, 0, 1811}, + dictWord{4, 0, 979}, + dictWord{6, 0, 1821}, + dictWord{6, 0, 1838}, + dictWord{8, 0, 876}, + dictWord{8, 0, 883}, + dictWord{8, 0, 889}, + dictWord{8, 0, 893}, + dictWord{ + 8, + 0, + 895, + }, + dictWord{10, 0, 934}, + dictWord{12, 0, 720}, + dictWord{14, 0, 459}, + dictWord{148, 0, 123}, + dictWord{135, 11, 551}, + dictWord{4, 0, 38}, + dictWord{6, 0, 435}, + dictWord{7, 0, 307}, + dictWord{7, 0, 999}, + dictWord{7, 0, 1481}, + dictWord{7, 0, 1732}, + dictWord{7, 0, 1738}, + dictWord{8, 0, 371}, + dictWord{9, 0, 414}, + dictWord{ + 11, + 0, + 316, + }, + dictWord{12, 0, 52}, + dictWord{13, 0, 420}, + dictWord{147, 0, 100}, + dictWord{135, 0, 1296}, + dictWord{132, 10, 712}, + dictWord{134, 10, 1629}, + dictWord{133, 0, 723}, + dictWord{134, 0, 651}, + dictWord{136, 11, 191}, + dictWord{9, 11, 791}, + dictWord{10, 11, 93}, + dictWord{11, 11, 301}, + dictWord{16, 11, 13}, + dictWord{17, 11, 23}, + dictWord{18, 11, 135}, + dictWord{19, 11, 12}, + dictWord{20, 11, 1}, + dictWord{20, 11, 12}, + dictWord{148, 11, 14}, + dictWord{136, 11, 503}, + dictWord{6, 11, 466}, + dictWord{135, 11, 671}, + dictWord{6, 0, 1200}, + dictWord{134, 0, 1330}, + dictWord{135, 0, 1255}, + dictWord{134, 0, 986}, + dictWord{ + 5, + 0, + 109, + }, + dictWord{6, 0, 1784}, + dictWord{7, 0, 1895}, + dictWord{12, 0, 296}, + dictWord{140, 0, 302}, + dictWord{135, 11, 983}, + dictWord{133, 10, 485}, + dictWord{ + 134, + 0, + 660, + }, + dictWord{134, 0, 800}, + dictWord{5, 0, 216}, + dictWord{5, 0, 294}, + dictWord{6, 0, 591}, + dictWord{7, 0, 1879}, + dictWord{9, 0, 141}, + dictWord{9, 0, 270}, + dictWord{9, 0, 679}, + dictWord{10, 0, 159}, + dictWord{11, 0, 197}, + dictWord{11, 0, 438}, + dictWord{12, 0, 538}, + dictWord{12, 0, 559}, + dictWord{14, 0, 144}, + dictWord{ + 14, + 0, + 167, + }, + dictWord{15, 0, 67}, + dictWord{4, 10, 285}, + dictWord{5, 10, 317}, + dictWord{6, 10, 301}, + dictWord{7, 10, 7}, + dictWord{8, 10, 153}, + dictWord{ + 10, + 10, + 766, + }, + dictWord{11, 10, 468}, + dictWord{12, 10, 467}, + dictWord{141, 10, 143}, + dictWord{136, 0, 945}, + dictWord{134, 0, 1090}, + dictWord{137, 0, 81}, + dictWord{12, 11, 468}, + dictWord{19, 11, 96}, + dictWord{148, 11, 24}, + dictWord{134, 0, 391}, + dictWord{138, 11, 241}, + dictWord{7, 0, 322}, + dictWord{136, 0, 249}, + dictWord{134, 0, 1412}, + dictWord{135, 11, 795}, + dictWord{5, 0, 632}, + dictWord{138, 0, 526}, + dictWord{136, 10, 819}, + dictWord{6, 0, 144}, + dictWord{7, 0, 948}, + dictWord{7, 0, 1042}, + dictWord{8, 0, 235}, + dictWord{8, 0, 461}, + dictWord{9, 0, 453}, + dictWord{9, 0, 796}, + dictWord{10, 0, 354}, + dictWord{17, 0, 77}, + dictWord{ + 135, + 11, + 954, + }, + dictWord{139, 10, 917}, + dictWord{6, 0, 940}, + dictWord{134, 0, 1228}, + dictWord{4, 0, 362}, + dictWord{7, 0, 52}, + dictWord{135, 0, 303}, + dictWord{ + 6, + 11, + 549, + }, + dictWord{8, 11, 34}, + dictWord{8, 11, 283}, + dictWord{9, 11, 165}, + dictWord{138, 11, 475}, + dictWord{7, 11, 370}, + dictWord{7, 11, 1007}, + dictWord{ + 7, + 11, + 1177, + }, + dictWord{135, 11, 1565}, + dictWord{5, 11, 652}, + dictWord{5, 11, 701}, + dictWord{135, 11, 449}, + dictWord{5, 0, 196}, + dictWord{6, 0, 486}, + dictWord{ + 7, + 0, + 212, + }, + dictWord{8, 0, 309}, + dictWord{136, 0, 346}, + dictWord{6, 10, 1719}, + dictWord{6, 10, 1735}, + dictWord{7, 10, 2016}, + dictWord{7, 10, 2020}, + dictWord{ + 8, + 10, + 837, + }, + dictWord{137, 10, 852}, + dictWord{6, 11, 159}, + dictWord{6, 11, 364}, + dictWord{7, 11, 516}, + dictWord{7, 11, 1439}, + dictWord{137, 11, 518}, + dictWord{135, 0, 1912}, + dictWord{135, 0, 1290}, + dictWord{132, 0, 686}, + dictWord{141, 11, 151}, + dictWord{138, 0, 625}, + dictWord{136, 0, 706}, + dictWord{ + 138, + 10, + 568, + }, + dictWord{139, 0, 412}, + dictWord{4, 0, 30}, + dictWord{133, 0, 43}, + dictWord{8, 10, 67}, + dictWord{138, 10, 419}, + dictWord{7, 0, 967}, + dictWord{ + 141, + 0, + 11, + }, + dictWord{12, 0, 758}, + dictWord{14, 0, 441}, + dictWord{142, 0, 462}, + dictWord{10, 10, 657}, + dictWord{14, 10, 297}, + dictWord{142, 10, 361}, + dictWord{ + 139, + 10, + 729, + }, + dictWord{4, 0, 220}, + dictWord{135, 0, 1535}, + dictWord{7, 11, 501}, + dictWord{9, 11, 111}, + dictWord{10, 11, 141}, + dictWord{11, 11, 332}, + dictWord{ + 13, + 11, + 43, + }, + dictWord{13, 11, 429}, + dictWord{14, 11, 130}, + dictWord{14, 11, 415}, + dictWord{145, 11, 102}, + dictWord{4, 0, 950}, + dictWord{6, 0, 1859}, + dictWord{ + 7, + 0, + 11, + }, + dictWord{8, 0, 873}, + dictWord{12, 0, 710}, + dictWord{12, 0, 718}, + dictWord{12, 0, 748}, + dictWord{12, 0, 765}, + dictWord{148, 0, 124}, + dictWord{ + 5, + 11, + 149, + }, + dictWord{5, 11, 935}, + dictWord{136, 11, 233}, + dictWord{142, 11, 291}, + dictWord{134, 0, 1579}, + dictWord{7, 0, 890}, + dictWord{8, 10, 51}, + dictWord{ + 9, + 10, + 868, + }, + dictWord{10, 10, 833}, + dictWord{12, 10, 481}, + dictWord{12, 10, 570}, + dictWord{148, 10, 106}, + dictWord{141, 0, 2}, + dictWord{132, 10, 445}, + dictWord{136, 11, 801}, + dictWord{135, 0, 1774}, + dictWord{7, 0, 1725}, + dictWord{138, 0, 393}, + dictWord{5, 0, 263}, + dictWord{134, 0, 414}, + dictWord{ + 132, + 11, + 322, + }, + dictWord{133, 10, 239}, + dictWord{7, 0, 456}, + dictWord{7, 10, 1990}, + dictWord{8, 10, 130}, + dictWord{139, 10, 720}, + dictWord{137, 0, 818}, + dictWord{ + 5, + 10, + 123, + }, + dictWord{6, 10, 530}, + dictWord{7, 10, 348}, + dictWord{135, 10, 1419}, + dictWord{135, 10, 2024}, + dictWord{6, 0, 178}, + dictWord{6, 0, 1750}, + dictWord{8, 0, 251}, + dictWord{9, 0, 690}, + dictWord{10, 0, 155}, + dictWord{10, 0, 196}, + dictWord{10, 0, 373}, + dictWord{11, 0, 698}, + dictWord{13, 0, 155}, + dictWord{ + 148, + 0, + 93, + }, + dictWord{5, 0, 97}, + dictWord{137, 0, 393}, + dictWord{134, 0, 674}, + dictWord{11, 0, 223}, + dictWord{140, 0, 168}, + dictWord{132, 10, 210}, + dictWord{ + 139, + 11, + 464, + }, + dictWord{6, 0, 1639}, + dictWord{146, 0, 159}, + dictWord{139, 11, 2}, + dictWord{7, 0, 934}, + dictWord{8, 0, 647}, + dictWord{17, 0, 97}, + dictWord{19, 0, 59}, + dictWord{150, 0, 2}, + dictWord{132, 0, 191}, + dictWord{5, 0, 165}, + dictWord{9, 0, 346}, + dictWord{10, 0, 655}, + dictWord{11, 0, 885}, + dictWord{4, 10, 430}, + dictWord{135, 11, 357}, + dictWord{133, 0, 877}, + dictWord{5, 10, 213}, + dictWord{133, 11, 406}, + dictWord{8, 0, 128}, + dictWord{139, 0, 179}, + dictWord{6, 11, 69}, + dictWord{135, 11, 117}, + dictWord{135, 0, 1297}, + dictWord{11, 11, 43}, + dictWord{13, 11, 72}, + dictWord{141, 11, 142}, + dictWord{135, 11, 1830}, + dictWord{ + 142, + 0, + 164, + }, + dictWord{5, 0, 57}, + dictWord{6, 0, 101}, + dictWord{6, 0, 586}, + dictWord{6, 0, 1663}, + dictWord{7, 0, 132}, + dictWord{7, 0, 1154}, + dictWord{7, 0, 1415}, + dictWord{7, 0, 1507}, + dictWord{12, 0, 493}, + dictWord{15, 0, 105}, + dictWord{151, 0, 15}, + dictWord{5, 0, 459}, + dictWord{7, 0, 1073}, + dictWord{8, 0, 241}, + dictWord{ + 136, + 0, + 334, + }, + dictWord{133, 11, 826}, + dictWord{133, 10, 108}, + dictWord{5, 10, 219}, + dictWord{10, 11, 132}, + dictWord{11, 11, 191}, + dictWord{11, 11, 358}, + dictWord{139, 11, 460}, + dictWord{6, 0, 324}, + dictWord{6, 0, 520}, + dictWord{7, 0, 338}, + dictWord{7, 0, 1729}, + dictWord{8, 0, 228}, + dictWord{139, 0, 750}, + dictWord{ + 21, + 0, + 30, + }, + dictWord{22, 0, 53}, + dictWord{4, 10, 193}, + dictWord{5, 10, 916}, + dictWord{7, 10, 364}, + dictWord{10, 10, 398}, + dictWord{10, 10, 726}, + dictWord{ + 11, + 10, + 317, + }, + dictWord{11, 10, 626}, + dictWord{12, 10, 142}, + dictWord{12, 10, 288}, + dictWord{12, 10, 678}, + dictWord{13, 10, 313}, + dictWord{15, 10, 113}, + dictWord{146, 10, 114}, + dictWord{6, 11, 110}, + dictWord{135, 11, 1681}, + dictWord{135, 0, 910}, + dictWord{6, 10, 241}, + dictWord{7, 10, 907}, + dictWord{8, 10, 832}, + dictWord{9, 10, 342}, + dictWord{10, 10, 729}, + dictWord{11, 10, 284}, + dictWord{11, 10, 445}, + dictWord{11, 10, 651}, + dictWord{11, 10, 863}, + dictWord{ + 13, + 10, + 398, + }, + dictWord{146, 10, 99}, + dictWord{7, 0, 705}, + dictWord{9, 0, 734}, + dictWord{5, 11, 1000}, + dictWord{7, 11, 733}, + dictWord{137, 11, 583}, + dictWord{4, 0, 73}, + dictWord{6, 0, 612}, + dictWord{7, 0, 927}, + dictWord{7, 0, 1822}, + dictWord{8, 0, 217}, + dictWord{9, 0, 765}, + dictWord{9, 0, 766}, + dictWord{10, 0, 408}, + dictWord{ + 11, + 0, + 51, + }, + dictWord{11, 0, 793}, + dictWord{12, 0, 266}, + dictWord{15, 0, 158}, + dictWord{20, 0, 89}, + dictWord{150, 0, 32}, + dictWord{7, 0, 1330}, + dictWord{4, 11, 297}, + dictWord{6, 11, 529}, + dictWord{7, 11, 152}, + dictWord{7, 11, 713}, + dictWord{7, 11, 1845}, + dictWord{8, 11, 710}, + dictWord{8, 11, 717}, + dictWord{140, 11, 639}, + dictWord{5, 0, 389}, + dictWord{136, 0, 636}, + dictWord{134, 0, 1409}, + dictWord{4, 10, 562}, + dictWord{9, 10, 254}, + dictWord{139, 10, 879}, + dictWord{134, 0, 893}, + dictWord{132, 10, 786}, + dictWord{4, 11, 520}, + dictWord{135, 11, 575}, + dictWord{136, 0, 21}, + dictWord{140, 0, 721}, + dictWord{136, 0, 959}, + dictWord{ + 7, + 11, + 1428, + }, + dictWord{7, 11, 1640}, + dictWord{9, 11, 169}, + dictWord{9, 11, 182}, + dictWord{9, 11, 367}, + dictWord{9, 11, 478}, + dictWord{9, 11, 506}, + dictWord{ + 9, + 11, + 551, + }, + dictWord{9, 11, 648}, + dictWord{9, 11, 651}, + dictWord{9, 11, 697}, + dictWord{9, 11, 705}, + dictWord{9, 11, 725}, + dictWord{9, 11, 787}, + dictWord{9, 11, 794}, + dictWord{10, 11, 198}, + dictWord{10, 11, 214}, + dictWord{10, 11, 267}, + dictWord{10, 11, 275}, + dictWord{10, 11, 456}, + dictWord{10, 11, 551}, + dictWord{ + 10, + 11, + 561, + }, + dictWord{10, 11, 613}, + dictWord{10, 11, 627}, + dictWord{10, 11, 668}, + dictWord{10, 11, 675}, + dictWord{10, 11, 691}, + dictWord{10, 11, 695}, + dictWord{10, 11, 707}, + dictWord{10, 11, 715}, + dictWord{11, 11, 183}, + dictWord{11, 11, 201}, + dictWord{11, 11, 244}, + dictWord{11, 11, 262}, + dictWord{ + 11, + 11, + 352, + }, + dictWord{11, 11, 439}, + dictWord{11, 11, 493}, + dictWord{11, 11, 572}, + dictWord{11, 11, 591}, + dictWord{11, 11, 608}, + dictWord{11, 11, 611}, + dictWord{ + 11, + 11, + 646, + }, + dictWord{11, 11, 674}, + dictWord{11, 11, 711}, + dictWord{11, 11, 751}, + dictWord{11, 11, 761}, + dictWord{11, 11, 776}, + dictWord{11, 11, 785}, + dictWord{11, 11, 850}, + dictWord{11, 11, 853}, + dictWord{11, 11, 862}, + dictWord{11, 11, 865}, + dictWord{11, 11, 868}, + dictWord{11, 11, 898}, + dictWord{ + 11, + 11, + 902, + }, + dictWord{11, 11, 903}, + dictWord{11, 11, 910}, + dictWord{11, 11, 932}, + dictWord{11, 11, 942}, + dictWord{11, 11, 957}, + dictWord{11, 11, 967}, + dictWord{ + 11, + 11, + 972, + }, + dictWord{12, 11, 148}, + dictWord{12, 11, 195}, + dictWord{12, 11, 220}, + dictWord{12, 11, 237}, + dictWord{12, 11, 318}, + dictWord{12, 11, 339}, + dictWord{12, 11, 393}, + dictWord{12, 11, 445}, + dictWord{12, 11, 450}, + dictWord{12, 11, 474}, + dictWord{12, 11, 509}, + dictWord{12, 11, 533}, + dictWord{ + 12, + 11, + 591, + }, + dictWord{12, 11, 594}, + dictWord{12, 11, 597}, + dictWord{12, 11, 621}, + dictWord{12, 11, 633}, + dictWord{12, 11, 642}, + dictWord{13, 11, 59}, + dictWord{ + 13, + 11, + 60, + }, + dictWord{13, 11, 145}, + dictWord{13, 11, 239}, + dictWord{13, 11, 250}, + dictWord{13, 11, 273}, + dictWord{13, 11, 329}, + dictWord{13, 11, 344}, + dictWord{13, 11, 365}, + dictWord{13, 11, 372}, + dictWord{13, 11, 387}, + dictWord{13, 11, 403}, + dictWord{13, 11, 414}, + dictWord{13, 11, 456}, + dictWord{ + 13, + 11, + 478, + }, + dictWord{13, 11, 483}, + dictWord{13, 11, 489}, + dictWord{14, 11, 55}, + dictWord{14, 11, 57}, + dictWord{14, 11, 81}, + dictWord{14, 11, 90}, + dictWord{ + 14, + 11, + 148, + }, + dictWord{14, 11, 239}, + dictWord{14, 11, 266}, + dictWord{14, 11, 321}, + dictWord{14, 11, 326}, + dictWord{14, 11, 327}, + dictWord{14, 11, 330}, + dictWord{ + 14, + 11, + 347, + }, + dictWord{14, 11, 355}, + dictWord{14, 11, 401}, + dictWord{14, 11, 411}, + dictWord{14, 11, 414}, + dictWord{14, 11, 416}, + dictWord{14, 11, 420}, + dictWord{15, 11, 61}, + dictWord{15, 11, 74}, + dictWord{15, 11, 87}, + dictWord{15, 11, 88}, + dictWord{15, 11, 94}, + dictWord{15, 11, 96}, + dictWord{15, 11, 116}, + dictWord{15, 11, 149}, + dictWord{15, 11, 154}, + dictWord{16, 11, 50}, + dictWord{16, 11, 63}, + dictWord{16, 11, 73}, + dictWord{17, 11, 2}, + dictWord{17, 11, 66}, + dictWord{ + 17, + 11, + 92, + }, + dictWord{17, 11, 103}, + dictWord{17, 11, 112}, + dictWord{18, 11, 50}, + dictWord{18, 11, 54}, + dictWord{18, 11, 82}, + dictWord{18, 11, 86}, + dictWord{ + 18, + 11, + 90, + }, + dictWord{18, 11, 111}, + dictWord{18, 11, 115}, + dictWord{18, 11, 156}, + dictWord{19, 11, 40}, + dictWord{19, 11, 79}, + dictWord{20, 11, 78}, + dictWord{ + 149, + 11, + 22, + }, + dictWord{137, 11, 170}, + dictWord{134, 0, 1433}, + dictWord{135, 11, 1307}, + dictWord{139, 11, 411}, + dictWord{5, 0, 189}, + dictWord{7, 0, 442}, + dictWord{7, 0, 443}, + dictWord{8, 0, 281}, + dictWord{12, 0, 174}, + dictWord{141, 0, 261}, + dictWord{6, 10, 216}, + dictWord{7, 10, 901}, + dictWord{7, 10, 1343}, + dictWord{136, 10, 493}, + dictWord{5, 11, 397}, + dictWord{6, 11, 154}, + dictWord{7, 10, 341}, + dictWord{7, 11, 676}, + dictWord{8, 11, 443}, + dictWord{8, 11, 609}, + dictWord{ + 9, + 11, + 24, + }, + dictWord{9, 11, 325}, + dictWord{10, 11, 35}, + dictWord{11, 10, 219}, + dictWord{11, 11, 535}, + dictWord{11, 11, 672}, + dictWord{11, 11, 1018}, + dictWord{12, 11, 637}, + dictWord{144, 11, 30}, + dictWord{6, 0, 2}, + dictWord{7, 0, 191}, + dictWord{7, 0, 446}, + dictWord{7, 0, 1262}, + dictWord{7, 0, 1737}, + dictWord{8, 0, 22}, + dictWord{8, 0, 270}, + dictWord{8, 0, 612}, + dictWord{9, 0, 4}, + dictWord{9, 0, 312}, + dictWord{9, 0, 436}, + dictWord{9, 0, 626}, + dictWord{10, 0, 216}, + dictWord{10, 0, 311}, + dictWord{10, 0, 521}, + dictWord{10, 0, 623}, + dictWord{11, 0, 72}, + dictWord{11, 0, 330}, + dictWord{11, 0, 455}, + dictWord{12, 0, 321}, + dictWord{12, 0, 504}, + dictWord{12, 0, 530}, + dictWord{12, 0, 543}, + dictWord{13, 0, 17}, + dictWord{13, 0, 156}, + dictWord{13, 0, 334}, + dictWord{14, 0, 131}, + dictWord{17, 0, 60}, + dictWord{ + 148, + 0, + 64, + }, + dictWord{7, 0, 354}, + dictWord{10, 0, 410}, + dictWord{139, 0, 815}, + dictWord{139, 10, 130}, + dictWord{7, 10, 1734}, + dictWord{137, 11, 631}, + dictWord{ + 12, + 0, + 425, + }, + dictWord{15, 0, 112}, + dictWord{10, 10, 115}, + dictWord{11, 10, 420}, + dictWord{13, 10, 404}, + dictWord{14, 10, 346}, + dictWord{143, 10, 54}, + dictWord{ + 6, + 0, + 60, + }, + dictWord{6, 0, 166}, + dictWord{7, 0, 374}, + dictWord{7, 0, 670}, + dictWord{7, 0, 1327}, + dictWord{8, 0, 411}, + dictWord{8, 0, 435}, + dictWord{9, 0, 653}, + dictWord{ + 9, + 0, + 740, + }, + dictWord{10, 0, 385}, + dictWord{11, 0, 222}, + dictWord{11, 0, 324}, + dictWord{11, 0, 829}, + dictWord{140, 0, 611}, + dictWord{7, 0, 1611}, + dictWord{ + 13, + 0, + 14, + }, + dictWord{15, 0, 44}, + dictWord{19, 0, 13}, + dictWord{148, 0, 76}, + dictWord{133, 11, 981}, + dictWord{4, 11, 56}, + dictWord{7, 11, 1791}, + dictWord{8, 11, 607}, + dictWord{8, 11, 651}, + dictWord{11, 11, 465}, + dictWord{11, 11, 835}, + dictWord{12, 11, 337}, + dictWord{141, 11, 480}, + dictWord{6, 0, 1478}, + dictWord{ + 5, + 10, + 1011, + }, + dictWord{136, 10, 701}, + dictWord{139, 0, 596}, + dictWord{5, 0, 206}, + dictWord{134, 0, 398}, + dictWord{4, 10, 54}, + dictWord{5, 10, 666}, + dictWord{ + 7, + 10, + 1039, + }, + dictWord{7, 10, 1130}, + dictWord{9, 10, 195}, + dictWord{138, 10, 302}, + dictWord{7, 0, 50}, + dictWord{9, 11, 158}, + dictWord{138, 11, 411}, + dictWord{ + 135, + 11, + 1120, + }, + dictWord{6, 0, 517}, + dictWord{7, 0, 1159}, + dictWord{10, 0, 621}, + dictWord{11, 0, 192}, + dictWord{134, 10, 1669}, + dictWord{4, 0, 592}, + dictWord{ + 6, + 0, + 600, + }, + dictWord{135, 0, 1653}, + dictWord{10, 0, 223}, + dictWord{139, 0, 645}, + dictWord{136, 11, 139}, + dictWord{7, 0, 64}, + dictWord{136, 0, 245}, + dictWord{ + 142, + 0, + 278, + }, + dictWord{6, 11, 622}, + dictWord{135, 11, 1030}, + dictWord{136, 0, 604}, + dictWord{134, 0, 1502}, + dictWord{138, 0, 265}, + dictWord{ + 141, + 11, + 168, + }, + dictWord{7, 0, 1763}, + dictWord{140, 0, 310}, + dictWord{7, 10, 798}, + dictWord{139, 11, 719}, + dictWord{7, 11, 160}, + dictWord{10, 11, 624}, + dictWord{ + 142, + 11, + 279, + }, + dictWord{132, 11, 363}, + dictWord{7, 10, 122}, + dictWord{9, 10, 259}, + dictWord{10, 10, 84}, + dictWord{11, 10, 470}, + dictWord{12, 10, 541}, + dictWord{141, 10, 379}, + dictWord{5, 0, 129}, + dictWord{6, 0, 61}, + dictWord{135, 0, 947}, + dictWord{134, 0, 1356}, + dictWord{135, 11, 1191}, + dictWord{13, 0, 505}, + dictWord{141, 0, 506}, + dictWord{11, 0, 1000}, + dictWord{5, 10, 82}, + dictWord{5, 10, 131}, + dictWord{7, 10, 1755}, + dictWord{8, 10, 31}, + dictWord{9, 10, 168}, + dictWord{9, 10, 764}, + dictWord{139, 10, 869}, + dictWord{134, 0, 966}, + dictWord{134, 10, 605}, + dictWord{134, 11, 292}, + dictWord{5, 11, 177}, + dictWord{ + 6, + 11, + 616, + }, + dictWord{7, 11, 827}, + dictWord{9, 11, 525}, + dictWord{138, 11, 656}, + dictWord{135, 11, 1486}, + dictWord{138, 11, 31}, + dictWord{5, 10, 278}, + dictWord{137, 10, 68}, + dictWord{4, 10, 163}, + dictWord{5, 10, 201}, + dictWord{5, 10, 307}, + dictWord{5, 10, 310}, + dictWord{6, 10, 335}, + dictWord{7, 10, 284}, + dictWord{136, 10, 165}, + dictWord{6, 0, 839}, + dictWord{135, 10, 1660}, + dictWord{136, 10, 781}, + dictWord{6, 10, 33}, + dictWord{135, 10, 1244}, + dictWord{ + 133, + 0, + 637, + }, + dictWord{4, 11, 161}, + dictWord{133, 11, 631}, + dictWord{137, 0, 590}, + dictWord{7, 10, 1953}, + dictWord{136, 10, 720}, + dictWord{5, 0, 280}, + dictWord{ + 7, + 0, + 1226, + }, + dictWord{138, 10, 203}, + dictWord{134, 0, 1386}, + dictWord{5, 0, 281}, + dictWord{6, 0, 1026}, + dictWord{6, 10, 326}, + dictWord{7, 10, 677}, + dictWord{ + 137, + 10, + 425, + }, + dictWord{7, 11, 1557}, + dictWord{135, 11, 1684}, + dictWord{135, 0, 1064}, + dictWord{9, 11, 469}, + dictWord{9, 11, 709}, + dictWord{12, 11, 512}, + dictWord{14, 11, 65}, + dictWord{145, 11, 12}, + dictWord{134, 0, 917}, + dictWord{10, 11, 229}, + dictWord{11, 11, 73}, + dictWord{11, 11, 376}, + dictWord{ + 139, + 11, + 433, + }, + dictWord{7, 0, 555}, + dictWord{9, 0, 192}, + dictWord{13, 0, 30}, + dictWord{13, 0, 49}, + dictWord{15, 0, 150}, + dictWord{16, 0, 76}, + dictWord{20, 0, 52}, + dictWord{ + 7, + 10, + 1316, + }, + dictWord{7, 10, 1412}, + dictWord{7, 10, 1839}, + dictWord{9, 10, 589}, + dictWord{11, 10, 241}, + dictWord{11, 10, 676}, + dictWord{11, 10, 811}, + dictWord{11, 10, 891}, + dictWord{12, 10, 140}, + dictWord{12, 10, 346}, + dictWord{12, 10, 479}, + dictWord{13, 10, 381}, + dictWord{14, 10, 188}, + dictWord{ + 146, + 10, + 30, + }, + dictWord{149, 0, 15}, + dictWord{6, 0, 1882}, + dictWord{6, 0, 1883}, + dictWord{6, 0, 1897}, + dictWord{9, 0, 945}, + dictWord{9, 0, 1014}, + dictWord{9, 0, 1020}, + dictWord{12, 0, 823}, + dictWord{12, 0, 842}, + dictWord{12, 0, 866}, + dictWord{12, 0, 934}, + dictWord{15, 0, 242}, + dictWord{146, 0, 208}, + dictWord{6, 0, 965}, + dictWord{134, 0, 1499}, + dictWord{7, 0, 33}, + dictWord{7, 0, 120}, + dictWord{8, 0, 489}, + dictWord{9, 0, 319}, + dictWord{10, 0, 820}, + dictWord{11, 0, 1004}, + dictWord{ + 12, + 0, + 379, + }, + dictWord{12, 0, 679}, + dictWord{13, 0, 117}, + dictWord{13, 0, 412}, + dictWord{14, 0, 25}, + dictWord{15, 0, 52}, + dictWord{15, 0, 161}, + dictWord{16, 0, 47}, + dictWord{149, 0, 2}, + dictWord{6, 11, 558}, + dictWord{7, 11, 651}, + dictWord{8, 11, 421}, + dictWord{9, 11, 0}, + dictWord{138, 11, 34}, + dictWord{4, 0, 937}, + dictWord{ + 5, + 0, + 801, + }, + dictWord{7, 0, 473}, + dictWord{5, 10, 358}, + dictWord{7, 10, 1184}, + dictWord{10, 10, 662}, + dictWord{13, 10, 212}, + dictWord{13, 10, 304}, + dictWord{ + 13, + 10, + 333, + }, + dictWord{145, 10, 98}, + dictWord{132, 0, 877}, + dictWord{6, 0, 693}, + dictWord{134, 0, 824}, + dictWord{132, 0, 365}, + dictWord{7, 11, 1832}, + dictWord{ + 138, + 11, + 374, + }, + dictWord{5, 0, 7}, + dictWord{139, 0, 774}, + dictWord{4, 0, 734}, + dictWord{5, 0, 662}, + dictWord{134, 0, 430}, + dictWord{4, 0, 746}, + dictWord{ + 135, + 0, + 1090, + }, + dictWord{5, 0, 360}, + dictWord{8, 0, 237}, + dictWord{10, 0, 231}, + dictWord{147, 0, 124}, + dictWord{138, 11, 348}, + dictWord{6, 11, 6}, + dictWord{7, 11, 81}, + dictWord{7, 11, 771}, + dictWord{7, 11, 1731}, + dictWord{9, 11, 405}, + dictWord{138, 11, 421}, + dictWord{6, 0, 740}, + dictWord{137, 0, 822}, + dictWord{ + 133, + 10, + 946, + }, + dictWord{7, 0, 1485}, + dictWord{136, 0, 929}, + dictWord{7, 10, 411}, + dictWord{8, 10, 631}, + dictWord{9, 10, 323}, + dictWord{10, 10, 355}, + dictWord{ + 11, + 10, + 491, + }, + dictWord{12, 10, 143}, + dictWord{12, 10, 402}, + dictWord{13, 10, 73}, + dictWord{14, 10, 408}, + dictWord{15, 10, 107}, + dictWord{146, 10, 71}, + dictWord{ + 135, + 10, + 590, + }, + dictWord{5, 11, 881}, + dictWord{133, 11, 885}, + dictWord{150, 11, 25}, + dictWord{4, 0, 852}, + dictWord{5, 11, 142}, + dictWord{134, 11, 546}, + dictWord{7, 10, 1467}, + dictWord{8, 10, 328}, + dictWord{10, 10, 544}, + dictWord{11, 10, 955}, + dictWord{13, 10, 320}, + dictWord{145, 10, 83}, + dictWord{9, 0, 17}, + dictWord{10, 0, 291}, + dictWord{11, 10, 511}, + dictWord{13, 10, 394}, + dictWord{14, 10, 298}, + dictWord{14, 10, 318}, + dictWord{146, 10, 103}, + dictWord{5, 11, 466}, + dictWord{11, 11, 571}, + dictWord{12, 11, 198}, + dictWord{13, 11, 283}, + dictWord{14, 11, 186}, + dictWord{15, 11, 21}, + dictWord{143, 11, 103}, + dictWord{ + 134, + 0, + 1001, + }, + dictWord{4, 11, 185}, + dictWord{5, 11, 257}, + dictWord{5, 11, 839}, + dictWord{5, 11, 936}, + dictWord{7, 11, 171}, + dictWord{9, 11, 399}, + dictWord{ + 10, + 11, + 258, + }, + dictWord{10, 11, 395}, + dictWord{10, 11, 734}, + dictWord{11, 11, 1014}, + dictWord{12, 11, 23}, + dictWord{13, 11, 350}, + dictWord{14, 11, 150}, + dictWord{147, 11, 6}, + dictWord{143, 0, 35}, + dictWord{132, 0, 831}, + dictWord{5, 10, 835}, + dictWord{134, 10, 483}, + dictWord{4, 0, 277}, + dictWord{5, 0, 608}, + dictWord{ + 6, + 0, + 493, + }, + dictWord{7, 0, 457}, + dictWord{12, 0, 384}, + dictWord{7, 11, 404}, + dictWord{7, 11, 1377}, + dictWord{7, 11, 1430}, + dictWord{7, 11, 2017}, + dictWord{ + 8, + 11, + 149, + }, + dictWord{8, 11, 239}, + dictWord{8, 11, 512}, + dictWord{8, 11, 793}, + dictWord{8, 11, 818}, + dictWord{9, 11, 474}, + dictWord{9, 11, 595}, + dictWord{ + 10, + 11, + 122, + }, + dictWord{10, 11, 565}, + dictWord{10, 11, 649}, + dictWord{10, 11, 783}, + dictWord{11, 11, 239}, + dictWord{11, 11, 295}, + dictWord{11, 11, 447}, + dictWord{ + 11, + 11, + 528, + }, + dictWord{11, 11, 639}, + dictWord{11, 11, 800}, + dictWord{11, 11, 936}, + dictWord{12, 11, 25}, + dictWord{12, 11, 73}, + dictWord{12, 11, 77}, + dictWord{12, 11, 157}, + dictWord{12, 11, 316}, + dictWord{12, 11, 390}, + dictWord{12, 11, 391}, + dictWord{12, 11, 394}, + dictWord{12, 11, 395}, + dictWord{ + 12, + 11, + 478, + }, + dictWord{12, 11, 503}, + dictWord{12, 11, 592}, + dictWord{12, 11, 680}, + dictWord{13, 11, 50}, + dictWord{13, 11, 53}, + dictWord{13, 11, 132}, + dictWord{ + 13, + 11, + 198, + }, + dictWord{13, 11, 275}, + dictWord{13, 11, 322}, + dictWord{13, 11, 415}, + dictWord{14, 11, 71}, + dictWord{14, 11, 257}, + dictWord{14, 11, 395}, + dictWord{15, 11, 71}, + dictWord{15, 11, 136}, + dictWord{17, 11, 123}, + dictWord{18, 11, 93}, + dictWord{147, 11, 58}, + dictWord{134, 0, 1351}, + dictWord{7, 0, 27}, + dictWord{135, 0, 316}, + dictWord{136, 11, 712}, + dictWord{136, 0, 984}, + dictWord{133, 0, 552}, + dictWord{137, 0, 264}, + dictWord{132, 0, 401}, + dictWord{6, 0, 710}, + dictWord{6, 0, 1111}, + dictWord{134, 0, 1343}, + dictWord{134, 0, 1211}, + dictWord{9, 0, 543}, + dictWord{10, 0, 524}, + dictWord{11, 0, 108}, + dictWord{11, 0, 653}, + dictWord{12, 0, 524}, + dictWord{13, 0, 123}, + dictWord{14, 0, 252}, + dictWord{16, 0, 18}, + dictWord{19, 0, 38}, + dictWord{20, 0, 26}, + dictWord{20, 0, 65}, + dictWord{ + 21, + 0, + 3, + }, + dictWord{151, 0, 11}, + dictWord{4, 0, 205}, + dictWord{5, 0, 623}, + dictWord{7, 0, 104}, + dictWord{8, 0, 519}, + dictWord{137, 0, 716}, + dictWord{132, 10, 677}, + dictWord{4, 11, 377}, + dictWord{152, 11, 13}, + dictWord{135, 11, 1673}, + dictWord{7, 0, 579}, + dictWord{9, 0, 41}, + dictWord{9, 0, 244}, + dictWord{9, 0, 669}, + dictWord{ + 10, + 0, + 5, + }, + dictWord{11, 0, 861}, + dictWord{11, 0, 951}, + dictWord{139, 0, 980}, + dictWord{132, 0, 717}, + dictWord{136, 0, 1011}, + dictWord{132, 0, 805}, + dictWord{ + 4, + 11, + 180, + }, + dictWord{135, 11, 1906}, + dictWord{132, 10, 777}, + dictWord{132, 10, 331}, + dictWord{132, 0, 489}, + dictWord{6, 0, 1024}, + dictWord{4, 11, 491}, + dictWord{133, 10, 747}, + dictWord{135, 11, 1182}, + dictWord{4, 11, 171}, + dictWord{138, 11, 234}, + dictWord{4, 11, 586}, + dictWord{7, 11, 1186}, + dictWord{ + 138, + 11, + 631, + }, + dictWord{135, 0, 892}, + dictWord{135, 11, 336}, + dictWord{9, 11, 931}, + dictWord{10, 11, 334}, + dictWord{148, 11, 71}, + dictWord{137, 0, 473}, + dictWord{6, 0, 864}, + dictWord{12, 0, 659}, + dictWord{139, 11, 926}, + dictWord{7, 0, 819}, + dictWord{9, 0, 26}, + dictWord{9, 0, 392}, + dictWord{10, 0, 152}, + dictWord{ + 10, + 0, + 226, + }, + dictWord{11, 0, 19}, + dictWord{12, 0, 276}, + dictWord{12, 0, 426}, + dictWord{12, 0, 589}, + dictWord{13, 0, 460}, + dictWord{15, 0, 97}, + dictWord{19, 0, 48}, + dictWord{148, 0, 104}, + dictWord{135, 0, 51}, + dictWord{133, 10, 326}, + dictWord{4, 10, 691}, + dictWord{146, 10, 16}, + dictWord{9, 0, 130}, + dictWord{11, 0, 765}, + dictWord{10, 10, 680}, + dictWord{10, 10, 793}, + dictWord{141, 10, 357}, + dictWord{133, 11, 765}, + dictWord{8, 0, 229}, + dictWord{6, 10, 32}, + dictWord{7, 10, 385}, + dictWord{7, 10, 757}, + dictWord{7, 10, 1916}, + dictWord{8, 10, 94}, + dictWord{8, 10, 711}, + dictWord{9, 10, 541}, + dictWord{10, 10, 162}, + dictWord{10, 10, 795}, + dictWord{11, 10, 989}, + dictWord{11, 10, 1010}, + dictWord{12, 10, 14}, + dictWord{142, 10, 308}, + dictWord{7, 11, 474}, + dictWord{137, 11, 578}, + dictWord{ + 132, + 0, + 674, + }, + dictWord{132, 0, 770}, + dictWord{5, 0, 79}, + dictWord{7, 0, 1027}, + dictWord{7, 0, 1477}, + dictWord{139, 0, 52}, + dictWord{133, 11, 424}, + dictWord{ + 134, + 0, + 1666, + }, + dictWord{6, 0, 409}, + dictWord{6, 10, 349}, + dictWord{6, 10, 1682}, + dictWord{7, 10, 1252}, + dictWord{8, 10, 112}, + dictWord{8, 11, 714}, + dictWord{ + 9, + 10, + 435, + }, + dictWord{9, 10, 668}, + dictWord{10, 10, 290}, + dictWord{10, 10, 319}, + dictWord{10, 10, 815}, + dictWord{11, 10, 180}, + dictWord{11, 10, 837}, + dictWord{ + 12, + 10, + 240, + }, + dictWord{13, 10, 152}, + dictWord{13, 10, 219}, + dictWord{142, 10, 158}, + dictWord{5, 0, 789}, + dictWord{134, 0, 195}, + dictWord{4, 0, 251}, + dictWord{ + 4, + 0, + 688, + }, + dictWord{7, 0, 513}, + dictWord{135, 0, 1284}, + dictWord{132, 10, 581}, + dictWord{9, 11, 420}, + dictWord{10, 11, 269}, + dictWord{10, 11, 285}, + dictWord{10, 11, 576}, + dictWord{11, 11, 397}, + dictWord{13, 11, 175}, + dictWord{145, 11, 90}, + dictWord{6, 10, 126}, + dictWord{7, 10, 573}, + dictWord{8, 10, 397}, + dictWord{142, 10, 44}, + dictWord{132, 11, 429}, + dictWord{133, 0, 889}, + dictWord{4, 0, 160}, + dictWord{5, 0, 330}, + dictWord{7, 0, 1434}, + dictWord{136, 0, 174}, + dictWord{7, 11, 18}, + dictWord{7, 11, 699}, + dictWord{7, 11, 1966}, + dictWord{8, 11, 752}, + dictWord{9, 11, 273}, + dictWord{9, 11, 412}, + dictWord{9, 11, 703}, + dictWord{ + 10, + 11, + 71, + }, + dictWord{10, 11, 427}, + dictWord{10, 11, 508}, + dictWord{146, 11, 97}, + dictWord{6, 0, 872}, + dictWord{134, 0, 899}, + dictWord{133, 10, 926}, + dictWord{134, 0, 1126}, + dictWord{134, 0, 918}, + dictWord{4, 11, 53}, + dictWord{5, 11, 186}, + dictWord{135, 11, 752}, + dictWord{7, 0, 268}, + dictWord{136, 0, 569}, + dictWord{134, 0, 1224}, + dictWord{6, 0, 1361}, + dictWord{7, 10, 1232}, + dictWord{137, 10, 531}, + dictWord{8, 11, 575}, + dictWord{10, 11, 289}, + dictWord{ + 139, + 11, + 319, + }, + dictWord{133, 10, 670}, + dictWord{132, 11, 675}, + dictWord{133, 0, 374}, + dictWord{135, 10, 1957}, + dictWord{133, 0, 731}, + dictWord{11, 0, 190}, + dictWord{15, 0, 49}, + dictWord{11, 11, 190}, + dictWord{143, 11, 49}, + dictWord{4, 0, 626}, + dictWord{5, 0, 506}, + dictWord{5, 0, 642}, + dictWord{6, 0, 425}, + dictWord{ + 10, + 0, + 202, + }, + dictWord{139, 0, 141}, + dictWord{137, 0, 444}, + dictWord{7, 10, 242}, + dictWord{135, 10, 1942}, + dictWord{6, 11, 209}, + dictWord{8, 11, 468}, + dictWord{ + 9, + 11, + 210, + }, + dictWord{11, 11, 36}, + dictWord{12, 11, 28}, + dictWord{12, 11, 630}, + dictWord{13, 11, 21}, + dictWord{13, 11, 349}, + dictWord{14, 11, 7}, + dictWord{ + 145, + 11, + 13, + }, + dictWord{4, 11, 342}, + dictWord{135, 11, 1179}, + dictWord{5, 10, 834}, + dictWord{7, 10, 1202}, + dictWord{8, 10, 14}, + dictWord{9, 10, 481}, + dictWord{ + 137, + 10, + 880, + }, + dictWord{4, 11, 928}, + dictWord{133, 11, 910}, + dictWord{4, 11, 318}, + dictWord{4, 11, 496}, + dictWord{7, 11, 856}, + dictWord{139, 11, 654}, + dictWord{136, 0, 835}, + dictWord{7, 0, 1526}, + dictWord{138, 10, 465}, + dictWord{151, 0, 17}, + dictWord{135, 0, 477}, + dictWord{4, 10, 357}, + dictWord{6, 10, 172}, + dictWord{7, 10, 143}, + dictWord{137, 10, 413}, + dictWord{6, 0, 1374}, + dictWord{138, 0, 994}, + dictWord{18, 0, 76}, + dictWord{132, 10, 590}, + dictWord{7, 0, 287}, + dictWord{8, 0, 355}, + dictWord{9, 0, 293}, + dictWord{137, 0, 743}, + dictWord{134, 0, 1389}, + dictWord{7, 11, 915}, + dictWord{8, 11, 247}, + dictWord{147, 11, 0}, + dictWord{ + 4, + 11, + 202, + }, + dictWord{5, 11, 382}, + dictWord{6, 11, 454}, + dictWord{7, 11, 936}, + dictWord{7, 11, 1803}, + dictWord{8, 11, 758}, + dictWord{9, 11, 375}, + dictWord{ + 9, + 11, + 895, + }, + dictWord{10, 11, 743}, + dictWord{10, 11, 792}, + dictWord{11, 11, 978}, + dictWord{11, 11, 1012}, + dictWord{142, 11, 109}, + dictWord{5, 0, 384}, + dictWord{8, 0, 455}, + dictWord{140, 0, 48}, + dictWord{132, 11, 390}, + dictWord{5, 10, 169}, + dictWord{7, 10, 333}, + dictWord{136, 10, 45}, + dictWord{5, 0, 264}, + dictWord{134, 0, 184}, + dictWord{138, 11, 791}, + dictWord{133, 11, 717}, + dictWord{132, 10, 198}, + dictWord{6, 11, 445}, + dictWord{7, 11, 332}, + dictWord{ + 137, + 11, + 909, + }, + dictWord{136, 0, 1001}, + dictWord{4, 10, 24}, + dictWord{5, 10, 140}, + dictWord{5, 10, 185}, + dictWord{7, 10, 1500}, + dictWord{11, 10, 565}, + dictWord{ + 139, + 10, + 838, + }, + dictWord{134, 11, 578}, + dictWord{5, 0, 633}, + dictWord{6, 0, 28}, + dictWord{135, 0, 1323}, + dictWord{132, 0, 851}, + dictWord{136, 11, 267}, + dictWord{ + 7, + 0, + 359, + }, + dictWord{8, 0, 243}, + dictWord{140, 0, 175}, + dictWord{4, 10, 334}, + dictWord{133, 10, 593}, + dictWord{141, 11, 87}, + dictWord{136, 11, 766}, + dictWord{10, 0, 287}, + dictWord{12, 0, 138}, + dictWord{10, 11, 287}, + dictWord{140, 11, 138}, + dictWord{4, 0, 105}, + dictWord{132, 0, 740}, + dictWord{140, 10, 116}, + dictWord{134, 0, 857}, + dictWord{135, 11, 1841}, + dictWord{6, 0, 1402}, + dictWord{137, 0, 819}, + dictWord{132, 11, 584}, + dictWord{132, 10, 709}, + dictWord{ + 133, + 10, + 897, + }, + dictWord{5, 0, 224}, + dictWord{13, 0, 174}, + dictWord{146, 0, 52}, + dictWord{135, 10, 1840}, + dictWord{4, 10, 608}, + dictWord{133, 10, 497}, + dictWord{139, 11, 60}, + dictWord{4, 0, 758}, + dictWord{135, 0, 1649}, + dictWord{4, 11, 226}, + dictWord{4, 11, 326}, + dictWord{135, 11, 1770}, + dictWord{5, 11, 426}, + dictWord{8, 11, 30}, + dictWord{9, 11, 2}, + dictWord{11, 11, 549}, + dictWord{147, 11, 122}, + dictWord{135, 10, 2039}, + dictWord{6, 10, 540}, + dictWord{ + 136, + 10, + 136, + }, + dictWord{4, 0, 573}, + dictWord{8, 0, 655}, + dictWord{4, 10, 897}, + dictWord{133, 10, 786}, + dictWord{7, 0, 351}, + dictWord{139, 0, 128}, + dictWord{ + 133, + 10, + 999, + }, + dictWord{4, 10, 299}, + dictWord{135, 10, 1004}, + dictWord{133, 0, 918}, + dictWord{132, 11, 345}, + dictWord{4, 11, 385}, + dictWord{7, 11, 265}, + dictWord{135, 11, 587}, + dictWord{133, 10, 456}, + dictWord{136, 10, 180}, + dictWord{6, 0, 687}, + dictWord{134, 0, 1537}, + dictWord{4, 11, 347}, + dictWord{ + 5, + 11, + 423, + }, + dictWord{5, 11, 996}, + dictWord{135, 11, 1329}, + dictWord{132, 10, 755}, + dictWord{7, 11, 1259}, + dictWord{9, 11, 125}, + dictWord{11, 11, 65}, + dictWord{140, 11, 285}, + dictWord{5, 11, 136}, + dictWord{6, 11, 136}, + dictWord{136, 11, 644}, + dictWord{134, 0, 1525}, + dictWord{4, 0, 1009}, + dictWord{ + 135, + 0, + 1139, + }, + dictWord{139, 10, 338}, + dictWord{132, 0, 340}, + dictWord{135, 10, 1464}, + dictWord{8, 0, 847}, + dictWord{10, 0, 861}, + dictWord{10, 0, 876}, + dictWord{ + 10, + 0, + 889, + }, + dictWord{10, 0, 922}, + dictWord{10, 0, 929}, + dictWord{10, 0, 933}, + dictWord{12, 0, 784}, + dictWord{140, 0, 791}, + dictWord{139, 0, 176}, + dictWord{ + 9, + 11, + 134, + }, + dictWord{10, 11, 2}, + dictWord{10, 11, 27}, + dictWord{10, 11, 333}, + dictWord{11, 11, 722}, + dictWord{143, 11, 1}, + dictWord{4, 11, 433}, + dictWord{ + 133, + 11, + 719, + }, + dictWord{5, 0, 985}, + dictWord{7, 0, 509}, + dictWord{7, 0, 529}, + dictWord{145, 0, 96}, + dictWord{132, 0, 615}, + dictWord{4, 10, 890}, + dictWord{ + 5, + 10, + 805, + }, + dictWord{5, 10, 819}, + dictWord{5, 10, 961}, + dictWord{6, 10, 396}, + dictWord{6, 10, 1631}, + dictWord{6, 10, 1678}, + dictWord{7, 10, 1967}, + dictWord{ + 7, + 10, + 2041, + }, + dictWord{9, 10, 630}, + dictWord{11, 10, 8}, + dictWord{11, 10, 1019}, + dictWord{12, 10, 176}, + dictWord{13, 10, 225}, + dictWord{14, 10, 292}, + dictWord{ + 149, + 10, + 24, + }, + dictWord{135, 0, 1919}, + dictWord{134, 0, 1131}, + dictWord{144, 11, 21}, + dictWord{144, 11, 51}, + dictWord{135, 10, 1815}, + dictWord{4, 0, 247}, + dictWord{7, 10, 1505}, + dictWord{10, 10, 190}, + dictWord{10, 10, 634}, + dictWord{11, 10, 792}, + dictWord{12, 10, 358}, + dictWord{140, 10, 447}, + dictWord{ + 5, + 10, + 0, + }, + dictWord{6, 10, 536}, + dictWord{7, 10, 604}, + dictWord{13, 10, 445}, + dictWord{145, 10, 126}, + dictWord{4, 0, 184}, + dictWord{5, 0, 390}, + dictWord{6, 0, 337}, + dictWord{7, 0, 23}, + dictWord{7, 0, 494}, + dictWord{7, 0, 618}, + dictWord{7, 0, 1456}, + dictWord{8, 0, 27}, + dictWord{8, 0, 599}, + dictWord{10, 0, 153}, + dictWord{ + 139, + 0, + 710, + }, + dictWord{6, 10, 232}, + dictWord{6, 10, 412}, + dictWord{7, 10, 1074}, + dictWord{8, 10, 9}, + dictWord{8, 10, 157}, + dictWord{8, 10, 786}, + dictWord{9, 10, 196}, + dictWord{9, 10, 352}, + dictWord{9, 10, 457}, + dictWord{10, 10, 337}, + dictWord{11, 10, 232}, + dictWord{11, 10, 877}, + dictWord{12, 10, 480}, + dictWord{ + 140, + 10, + 546, + }, + dictWord{13, 0, 38}, + dictWord{135, 10, 958}, + dictWord{4, 10, 382}, + dictWord{136, 10, 579}, + dictWord{4, 10, 212}, + dictWord{135, 10, 1206}, + dictWord{ + 4, + 11, + 555, + }, + dictWord{8, 11, 536}, + dictWord{138, 11, 288}, + dictWord{11, 11, 139}, + dictWord{139, 11, 171}, + dictWord{9, 11, 370}, + dictWord{138, 11, 90}, + dictWord{132, 0, 1015}, + dictWord{134, 0, 1088}, + dictWord{5, 10, 655}, + dictWord{135, 11, 977}, + dictWord{134, 0, 1585}, + dictWord{17, 10, 67}, + dictWord{ + 147, + 10, + 74, + }, + dictWord{10, 0, 227}, + dictWord{11, 0, 497}, + dictWord{11, 0, 709}, + dictWord{140, 0, 415}, + dictWord{6, 0, 360}, + dictWord{7, 0, 1664}, + dictWord{ + 136, + 0, + 478, + }, + dictWord{7, 0, 95}, + dictWord{6, 10, 231}, + dictWord{136, 10, 423}, + dictWord{140, 11, 65}, + dictWord{4, 11, 257}, + dictWord{135, 11, 2031}, + dictWord{ + 135, + 11, + 1768, + }, + dictWord{133, 10, 300}, + dictWord{139, 11, 211}, + dictWord{136, 0, 699}, + dictWord{6, 10, 237}, + dictWord{7, 10, 611}, + dictWord{8, 10, 100}, + dictWord{9, 10, 416}, + dictWord{11, 10, 335}, + dictWord{12, 10, 173}, + dictWord{146, 10, 101}, + dictWord{14, 0, 26}, + dictWord{146, 0, 150}, + dictWord{6, 0, 581}, + dictWord{135, 0, 1119}, + dictWord{135, 10, 1208}, + dictWord{132, 0, 739}, + dictWord{6, 11, 83}, + dictWord{6, 11, 1733}, + dictWord{135, 11, 1389}, + dictWord{ + 137, + 0, + 869, + }, + dictWord{4, 0, 67}, + dictWord{5, 0, 422}, + dictWord{7, 0, 1037}, + dictWord{7, 0, 1289}, + dictWord{7, 0, 1555}, + dictWord{9, 0, 741}, + dictWord{145, 0, 108}, + dictWord{133, 10, 199}, + dictWord{12, 10, 427}, + dictWord{146, 10, 38}, + dictWord{136, 0, 464}, + dictWord{142, 0, 42}, + dictWord{10, 0, 96}, + dictWord{8, 11, 501}, + dictWord{137, 11, 696}, + dictWord{134, 11, 592}, + dictWord{4, 0, 512}, + dictWord{4, 0, 966}, + dictWord{5, 0, 342}, + dictWord{6, 0, 1855}, + dictWord{8, 0, 869}, + dictWord{8, 0, 875}, + dictWord{8, 0, 901}, + dictWord{144, 0, 26}, + dictWord{8, 0, 203}, + dictWord{11, 0, 823}, + dictWord{11, 0, 846}, + dictWord{12, 0, 482}, + dictWord{ + 13, + 0, + 277, + }, + dictWord{13, 0, 302}, + dictWord{13, 0, 464}, + dictWord{14, 0, 205}, + dictWord{142, 0, 221}, + dictWord{4, 0, 449}, + dictWord{133, 0, 718}, + dictWord{ + 7, + 11, + 1718, + }, + dictWord{9, 11, 95}, + dictWord{9, 11, 274}, + dictWord{10, 11, 279}, + dictWord{10, 11, 317}, + dictWord{10, 11, 420}, + dictWord{11, 11, 303}, + dictWord{ + 11, + 11, + 808, + }, + dictWord{12, 11, 134}, + dictWord{12, 11, 367}, + dictWord{13, 11, 149}, + dictWord{13, 11, 347}, + dictWord{14, 11, 349}, + dictWord{14, 11, 406}, + dictWord{18, 11, 22}, + dictWord{18, 11, 89}, + dictWord{18, 11, 122}, + dictWord{147, 11, 47}, + dictWord{133, 11, 26}, + dictWord{4, 0, 355}, + dictWord{6, 0, 311}, + dictWord{ + 9, + 0, + 256, + }, + dictWord{138, 0, 404}, + dictWord{132, 11, 550}, + dictWord{10, 0, 758}, + dictWord{6, 10, 312}, + dictWord{6, 10, 1715}, + dictWord{10, 10, 584}, + dictWord{11, 10, 546}, + dictWord{11, 10, 692}, + dictWord{12, 10, 259}, + dictWord{12, 10, 295}, + dictWord{13, 10, 46}, + dictWord{141, 10, 154}, + dictWord{ + 136, + 11, + 822, + }, + dictWord{5, 0, 827}, + dictWord{4, 11, 902}, + dictWord{5, 11, 809}, + dictWord{6, 11, 122}, + dictWord{135, 11, 896}, + dictWord{5, 0, 64}, + dictWord{140, 0, 581}, + dictWord{4, 0, 442}, + dictWord{6, 0, 739}, + dictWord{7, 0, 1047}, + dictWord{7, 0, 1352}, + dictWord{7, 0, 1643}, + dictWord{7, 11, 1911}, + dictWord{9, 11, 449}, + dictWord{10, 11, 192}, + dictWord{138, 11, 740}, + dictWord{135, 11, 262}, + dictWord{132, 10, 588}, + dictWord{133, 11, 620}, + dictWord{5, 0, 977}, + dictWord{ + 6, + 0, + 288, + }, + dictWord{7, 0, 528}, + dictWord{4, 11, 34}, + dictWord{5, 11, 574}, + dictWord{7, 11, 279}, + dictWord{7, 11, 1624}, + dictWord{136, 11, 601}, + dictWord{ + 6, + 0, + 1375, + }, + dictWord{4, 10, 231}, + dictWord{5, 10, 61}, + dictWord{6, 10, 104}, + dictWord{7, 10, 729}, + dictWord{7, 10, 964}, + dictWord{7, 10, 1658}, + dictWord{ + 140, + 10, + 414, + }, + dictWord{6, 10, 263}, + dictWord{138, 10, 757}, + dictWord{132, 10, 320}, + dictWord{4, 0, 254}, + dictWord{7, 0, 1309}, + dictWord{5, 11, 332}, + dictWord{ + 135, + 11, + 1309, + }, + dictWord{6, 11, 261}, + dictWord{8, 11, 182}, + dictWord{139, 11, 943}, + dictWord{132, 10, 225}, + dictWord{6, 0, 12}, + dictWord{135, 0, 1219}, + dictWord{4, 0, 275}, + dictWord{12, 0, 376}, + dictWord{6, 11, 1721}, + dictWord{141, 11, 490}, + dictWord{4, 11, 933}, + dictWord{133, 11, 880}, + dictWord{6, 0, 951}, + dictWord{6, 0, 1109}, + dictWord{6, 0, 1181}, + dictWord{7, 0, 154}, + dictWord{4, 10, 405}, + dictWord{7, 10, 817}, + dictWord{14, 10, 58}, + dictWord{17, 10, 37}, + dictWord{ + 146, + 10, + 124, + }, + dictWord{6, 0, 1520}, + dictWord{133, 10, 974}, + dictWord{134, 0, 1753}, + dictWord{6, 0, 369}, + dictWord{6, 0, 502}, + dictWord{7, 0, 1036}, + dictWord{ + 8, + 0, + 348, + }, + dictWord{9, 0, 452}, + dictWord{10, 0, 26}, + dictWord{11, 0, 224}, + dictWord{11, 0, 387}, + dictWord{11, 0, 772}, + dictWord{12, 0, 95}, + dictWord{12, 0, 629}, + dictWord{13, 0, 195}, + dictWord{13, 0, 207}, + dictWord{13, 0, 241}, + dictWord{14, 0, 260}, + dictWord{14, 0, 270}, + dictWord{143, 0, 140}, + dictWord{132, 0, 269}, + dictWord{5, 0, 480}, + dictWord{7, 0, 532}, + dictWord{7, 0, 1197}, + dictWord{7, 0, 1358}, + dictWord{8, 0, 291}, + dictWord{11, 0, 349}, + dictWord{142, 0, 396}, + dictWord{ + 5, + 10, + 235, + }, + dictWord{7, 10, 1239}, + dictWord{11, 10, 131}, + dictWord{140, 10, 370}, + dictWord{7, 10, 956}, + dictWord{7, 10, 1157}, + dictWord{7, 10, 1506}, + dictWord{ + 7, + 10, + 1606, + }, + dictWord{7, 10, 1615}, + dictWord{7, 10, 1619}, + dictWord{7, 10, 1736}, + dictWord{7, 10, 1775}, + dictWord{8, 10, 590}, + dictWord{9, 10, 324}, + dictWord{9, 10, 736}, + dictWord{9, 10, 774}, + dictWord{9, 10, 776}, + dictWord{9, 10, 784}, + dictWord{10, 10, 567}, + dictWord{10, 10, 708}, + dictWord{11, 10, 518}, + dictWord{11, 10, 613}, + dictWord{11, 10, 695}, + dictWord{11, 10, 716}, + dictWord{11, 10, 739}, + dictWord{11, 10, 770}, + dictWord{11, 10, 771}, + dictWord{ + 11, + 10, + 848, + }, + dictWord{11, 10, 857}, + dictWord{11, 10, 931}, + dictWord{11, 10, 947}, + dictWord{12, 10, 326}, + dictWord{12, 10, 387}, + dictWord{12, 10, 484}, + dictWord{ + 12, + 10, + 528, + }, + dictWord{12, 10, 552}, + dictWord{12, 10, 613}, + dictWord{13, 10, 189}, + dictWord{13, 10, 256}, + dictWord{13, 10, 340}, + dictWord{13, 10, 432}, + dictWord{13, 10, 436}, + dictWord{13, 10, 440}, + dictWord{13, 10, 454}, + dictWord{14, 10, 174}, + dictWord{14, 10, 220}, + dictWord{14, 10, 284}, + dictWord{ + 14, + 10, + 390, + }, + dictWord{145, 10, 121}, + dictWord{8, 11, 598}, + dictWord{9, 11, 664}, + dictWord{138, 11, 441}, + dictWord{9, 10, 137}, + dictWord{138, 10, 221}, + dictWord{133, 11, 812}, + dictWord{148, 0, 15}, + dictWord{134, 0, 1341}, + dictWord{6, 0, 1017}, + dictWord{4, 11, 137}, + dictWord{7, 11, 1178}, + dictWord{ + 135, + 11, + 1520, + }, + dictWord{7, 10, 390}, + dictWord{138, 10, 140}, + dictWord{7, 11, 1260}, + dictWord{135, 11, 1790}, + dictWord{137, 11, 191}, + dictWord{ + 135, + 10, + 1144, + }, + dictWord{6, 0, 1810}, + dictWord{7, 0, 657}, + dictWord{8, 0, 886}, + dictWord{10, 0, 857}, + dictWord{14, 0, 440}, + dictWord{144, 0, 96}, + dictWord{8, 0, 533}, + dictWord{6, 11, 1661}, + dictWord{7, 11, 1975}, + dictWord{7, 11, 2009}, + dictWord{135, 11, 2011}, + dictWord{6, 0, 1453}, + dictWord{134, 10, 464}, + dictWord{ + 132, + 11, + 715, + }, + dictWord{5, 10, 407}, + dictWord{11, 10, 204}, + dictWord{11, 10, 243}, + dictWord{11, 10, 489}, + dictWord{12, 10, 293}, + dictWord{19, 10, 37}, + dictWord{20, 10, 73}, + dictWord{150, 10, 38}, + dictWord{133, 11, 703}, + dictWord{4, 0, 211}, + dictWord{7, 0, 1483}, + dictWord{5, 10, 325}, + dictWord{8, 10, 5}, + dictWord{ + 8, + 10, + 227, + }, + dictWord{9, 10, 105}, + dictWord{10, 10, 585}, + dictWord{140, 10, 614}, + dictWord{4, 0, 332}, + dictWord{5, 0, 335}, + dictWord{6, 0, 238}, + dictWord{ + 7, + 0, + 269, + }, + dictWord{7, 0, 811}, + dictWord{7, 0, 1797}, + dictWord{8, 0, 836}, + dictWord{9, 0, 507}, + dictWord{141, 0, 242}, + dictWord{5, 11, 89}, + dictWord{7, 11, 1915}, + dictWord{9, 11, 185}, + dictWord{9, 11, 235}, + dictWord{9, 11, 496}, + dictWord{10, 11, 64}, + dictWord{10, 11, 270}, + dictWord{10, 11, 403}, + dictWord{10, 11, 469}, + dictWord{10, 11, 529}, + dictWord{10, 11, 590}, + dictWord{11, 11, 140}, + dictWord{11, 11, 860}, + dictWord{13, 11, 1}, + dictWord{13, 11, 422}, + dictWord{14, 11, 341}, + dictWord{14, 11, 364}, + dictWord{17, 11, 93}, + dictWord{18, 11, 113}, + dictWord{19, 11, 97}, + dictWord{147, 11, 113}, + dictWord{133, 11, 695}, + dictWord{ + 16, + 0, + 19, + }, + dictWord{5, 11, 6}, + dictWord{6, 11, 183}, + dictWord{6, 10, 621}, + dictWord{7, 11, 680}, + dictWord{7, 11, 978}, + dictWord{7, 11, 1013}, + dictWord{7, 11, 1055}, + dictWord{12, 11, 230}, + dictWord{13, 11, 172}, + dictWord{13, 10, 504}, + dictWord{146, 11, 29}, + dictWord{136, 0, 156}, + dictWord{133, 0, 1009}, + dictWord{ + 6, + 11, + 29, + }, + dictWord{139, 11, 63}, + dictWord{134, 0, 820}, + dictWord{134, 10, 218}, + dictWord{7, 10, 454}, + dictWord{7, 10, 782}, + dictWord{8, 10, 768}, + dictWord{ + 140, + 10, + 686, + }, + dictWord{5, 0, 228}, + dictWord{6, 0, 203}, + dictWord{7, 0, 156}, + dictWord{8, 0, 347}, + dictWord{9, 0, 265}, + dictWord{18, 0, 39}, + dictWord{20, 0, 54}, + dictWord{21, 0, 31}, + dictWord{22, 0, 3}, + dictWord{23, 0, 0}, + dictWord{15, 11, 8}, + dictWord{18, 11, 39}, + dictWord{20, 11, 54}, + dictWord{21, 11, 31}, + dictWord{22, 11, 3}, + dictWord{151, 11, 0}, + dictWord{7, 0, 1131}, + dictWord{135, 0, 1468}, + dictWord{144, 10, 0}, + dictWord{134, 0, 1276}, + dictWord{10, 10, 676}, + dictWord{ + 140, + 10, + 462, + }, + dictWord{132, 11, 311}, + dictWord{134, 11, 1740}, + dictWord{7, 11, 170}, + dictWord{8, 11, 90}, + dictWord{8, 11, 177}, + dictWord{8, 11, 415}, + dictWord{ + 11, + 11, + 714, + }, + dictWord{142, 11, 281}, + dictWord{134, 10, 164}, + dictWord{6, 0, 1792}, + dictWord{138, 0, 849}, + dictWord{150, 10, 50}, + dictWord{5, 0, 291}, + dictWord{5, 0, 318}, + dictWord{7, 0, 765}, + dictWord{9, 0, 389}, + dictWord{12, 0, 548}, + dictWord{8, 11, 522}, + dictWord{142, 11, 328}, + dictWord{11, 11, 91}, + dictWord{ + 13, + 11, + 129, + }, + dictWord{15, 11, 101}, + dictWord{145, 11, 125}, + dictWord{4, 11, 494}, + dictWord{6, 11, 74}, + dictWord{7, 11, 44}, + dictWord{7, 11, 407}, + dictWord{ + 8, + 11, + 551, + }, + dictWord{12, 11, 17}, + dictWord{15, 11, 5}, + dictWord{148, 11, 11}, + dictWord{4, 11, 276}, + dictWord{133, 11, 296}, + dictWord{6, 10, 343}, + dictWord{ + 7, + 10, + 195, + }, + dictWord{7, 11, 1777}, + dictWord{9, 10, 226}, + dictWord{10, 10, 197}, + dictWord{10, 10, 575}, + dictWord{11, 10, 502}, + dictWord{139, 10, 899}, + dictWord{ + 10, + 0, + 525, + }, + dictWord{139, 0, 82}, + dictWord{14, 0, 453}, + dictWord{4, 11, 7}, + dictWord{5, 11, 90}, + dictWord{5, 11, 158}, + dictWord{6, 11, 542}, + dictWord{7, 11, 221}, + dictWord{7, 11, 1574}, + dictWord{9, 11, 490}, + dictWord{10, 11, 540}, + dictWord{11, 11, 443}, + dictWord{139, 11, 757}, + dictWord{135, 0, 666}, + dictWord{ + 22, + 10, + 29, + }, + dictWord{150, 11, 29}, + dictWord{4, 0, 422}, + dictWord{147, 10, 8}, + dictWord{5, 0, 355}, + dictWord{145, 0, 0}, + dictWord{6, 0, 1873}, + dictWord{9, 0, 918}, + dictWord{7, 11, 588}, + dictWord{9, 11, 175}, + dictWord{138, 11, 530}, + dictWord{143, 11, 31}, + dictWord{11, 0, 165}, + dictWord{7, 10, 1125}, + dictWord{9, 10, 143}, + dictWord{14, 10, 405}, + dictWord{150, 10, 21}, + dictWord{9, 0, 260}, + dictWord{137, 0, 905}, + dictWord{5, 11, 872}, + dictWord{6, 11, 57}, + dictWord{6, 11, 479}, + dictWord{ + 6, + 11, + 562, + }, + dictWord{7, 11, 471}, + dictWord{7, 11, 1060}, + dictWord{9, 11, 447}, + dictWord{9, 11, 454}, + dictWord{141, 11, 6}, + dictWord{138, 11, 704}, + dictWord{133, 0, 865}, + dictWord{5, 0, 914}, + dictWord{134, 0, 1625}, + dictWord{133, 0, 234}, + dictWord{7, 0, 1383}, + dictWord{5, 11, 31}, + dictWord{6, 11, 614}, + dictWord{145, 11, 61}, + dictWord{7, 11, 1200}, + dictWord{138, 11, 460}, + dictWord{6, 11, 424}, + dictWord{135, 11, 1866}, + dictWord{136, 0, 306}, + dictWord{ + 5, + 10, + 959, + }, + dictWord{12, 11, 30}, + dictWord{13, 11, 148}, + dictWord{14, 11, 87}, + dictWord{14, 11, 182}, + dictWord{16, 11, 42}, + dictWord{18, 11, 92}, + dictWord{ + 148, + 11, + 70, + }, + dictWord{6, 0, 1919}, + dictWord{6, 0, 1921}, + dictWord{9, 0, 923}, + dictWord{9, 0, 930}, + dictWord{9, 0, 941}, + dictWord{9, 0, 949}, + dictWord{9, 0, 987}, + dictWord{ + 9, + 0, + 988, + }, + dictWord{9, 0, 992}, + dictWord{12, 0, 802}, + dictWord{12, 0, 815}, + dictWord{12, 0, 856}, + dictWord{12, 0, 885}, + dictWord{12, 0, 893}, + dictWord{ + 12, + 0, + 898, + }, + dictWord{12, 0, 919}, + dictWord{12, 0, 920}, + dictWord{12, 0, 941}, + dictWord{12, 0, 947}, + dictWord{15, 0, 183}, + dictWord{15, 0, 185}, + dictWord{15, 0, 189}, + dictWord{15, 0, 197}, + dictWord{15, 0, 202}, + dictWord{15, 0, 233}, + dictWord{18, 0, 218}, + dictWord{18, 0, 219}, + dictWord{18, 0, 233}, + dictWord{143, 11, 156}, + dictWord{135, 10, 1759}, + dictWord{136, 10, 173}, + dictWord{13, 0, 163}, + dictWord{13, 0, 180}, + dictWord{18, 0, 78}, + dictWord{20, 0, 35}, + dictWord{5, 11, 13}, + dictWord{134, 11, 142}, + dictWord{134, 10, 266}, + dictWord{6, 11, 97}, + dictWord{7, 11, 116}, + dictWord{8, 11, 322}, + dictWord{8, 11, 755}, + dictWord{9, 11, 548}, + dictWord{10, 11, 714}, + dictWord{11, 11, 884}, + dictWord{141, 11, 324}, + dictWord{135, 0, 1312}, + dictWord{9, 0, 814}, + dictWord{137, 11, 676}, + dictWord{ + 133, + 0, + 707, + }, + dictWord{135, 0, 1493}, + dictWord{6, 0, 421}, + dictWord{7, 0, 61}, + dictWord{7, 0, 1540}, + dictWord{10, 0, 11}, + dictWord{138, 0, 501}, + dictWord{12, 0, 733}, + dictWord{12, 0, 766}, + dictWord{7, 11, 866}, + dictWord{135, 11, 1163}, + dictWord{137, 0, 341}, + dictWord{142, 0, 98}, + dictWord{145, 11, 115}, + dictWord{ + 135, + 11, + 1111, + }, + dictWord{136, 10, 300}, + dictWord{136, 0, 1014}, + dictWord{8, 11, 1}, + dictWord{9, 11, 112}, + dictWord{138, 11, 326}, + dictWord{132, 11, 730}, + dictWord{5, 11, 488}, + dictWord{6, 11, 527}, + dictWord{7, 11, 489}, + dictWord{7, 11, 1636}, + dictWord{8, 11, 121}, + dictWord{8, 11, 144}, + dictWord{8, 11, 359}, + dictWord{ + 9, + 11, + 193, + }, + dictWord{9, 11, 241}, + dictWord{9, 11, 336}, + dictWord{9, 11, 882}, + dictWord{11, 11, 266}, + dictWord{11, 11, 372}, + dictWord{11, 11, 944}, + dictWord{ + 12, + 11, + 401, + }, + dictWord{140, 11, 641}, + dictWord{6, 0, 971}, + dictWord{134, 0, 1121}, + dictWord{6, 0, 102}, + dictWord{7, 0, 72}, + dictWord{15, 0, 142}, + dictWord{ + 147, + 0, + 67, + }, + dictWord{151, 0, 30}, + dictWord{135, 0, 823}, + dictWord{134, 0, 1045}, + dictWord{5, 10, 427}, + dictWord{5, 10, 734}, + dictWord{7, 10, 478}, + dictWord{ + 136, + 10, + 52, + }, + dictWord{7, 0, 1930}, + dictWord{11, 10, 217}, + dictWord{142, 10, 165}, + dictWord{6, 0, 1512}, + dictWord{135, 0, 1870}, + dictWord{9, 11, 31}, + dictWord{ + 10, + 11, + 244, + }, + dictWord{10, 11, 699}, + dictWord{12, 11, 149}, + dictWord{141, 11, 497}, + dictWord{133, 11, 377}, + dictWord{145, 11, 101}, + dictWord{ + 10, + 11, + 158, + }, + dictWord{13, 11, 13}, + dictWord{13, 11, 137}, + dictWord{13, 11, 258}, + dictWord{14, 11, 111}, + dictWord{14, 11, 225}, + dictWord{14, 11, 253}, + dictWord{ + 14, + 11, + 304, + }, + dictWord{14, 11, 339}, + dictWord{14, 11, 417}, + dictWord{146, 11, 33}, + dictWord{6, 0, 87}, + dictWord{6, 10, 1734}, + dictWord{7, 10, 20}, + dictWord{ + 7, + 10, + 1056, + }, + dictWord{8, 10, 732}, + dictWord{9, 10, 406}, + dictWord{9, 10, 911}, + dictWord{138, 10, 694}, + dictWord{134, 0, 1243}, + dictWord{137, 0, 245}, + dictWord{ + 7, + 0, + 68, + }, + dictWord{8, 0, 48}, + dictWord{8, 0, 88}, + dictWord{8, 0, 582}, + dictWord{8, 0, 681}, + dictWord{9, 0, 373}, + dictWord{9, 0, 864}, + dictWord{11, 0, 157}, + dictWord{ + 11, + 0, + 336, + }, + dictWord{11, 0, 843}, + dictWord{148, 0, 27}, + dictWord{8, 11, 663}, + dictWord{144, 11, 8}, + dictWord{133, 10, 613}, + dictWord{4, 0, 88}, + dictWord{ + 5, + 0, + 137, + }, + dictWord{5, 0, 174}, + dictWord{5, 0, 777}, + dictWord{6, 0, 1664}, + dictWord{6, 0, 1725}, + dictWord{7, 0, 77}, + dictWord{7, 0, 426}, + dictWord{7, 0, 1317}, + dictWord{ + 7, + 0, + 1355, + }, + dictWord{8, 0, 126}, + dictWord{8, 0, 563}, + dictWord{9, 0, 523}, + dictWord{9, 0, 750}, + dictWord{10, 0, 310}, + dictWord{10, 0, 836}, + dictWord{11, 0, 42}, + dictWord{11, 0, 318}, + dictWord{11, 0, 731}, + dictWord{12, 0, 68}, + dictWord{12, 0, 92}, + dictWord{12, 0, 507}, + dictWord{12, 0, 692}, + dictWord{13, 0, 81}, + dictWord{ + 13, + 0, + 238, + }, + dictWord{13, 0, 374}, + dictWord{14, 0, 436}, + dictWord{18, 0, 138}, + dictWord{19, 0, 78}, + dictWord{19, 0, 111}, + dictWord{20, 0, 55}, + dictWord{20, 0, 77}, + dictWord{148, 0, 92}, + dictWord{141, 0, 418}, + dictWord{4, 0, 938}, + dictWord{137, 0, 625}, + dictWord{138, 0, 351}, + dictWord{5, 11, 843}, + dictWord{7, 10, 32}, + dictWord{ + 7, + 10, + 984, + }, + dictWord{8, 10, 85}, + dictWord{8, 10, 709}, + dictWord{9, 10, 579}, + dictWord{9, 10, 847}, + dictWord{9, 10, 856}, + dictWord{10, 10, 799}, + dictWord{ + 11, + 10, + 258, + }, + dictWord{11, 10, 1007}, + dictWord{12, 10, 331}, + dictWord{12, 10, 615}, + dictWord{13, 10, 188}, + dictWord{13, 10, 435}, + dictWord{14, 10, 8}, + dictWord{ + 15, + 10, + 165, + }, + dictWord{16, 10, 27}, + dictWord{148, 10, 40}, + dictWord{6, 0, 1668}, + dictWord{7, 0, 1499}, + dictWord{8, 0, 117}, + dictWord{9, 0, 314}, + dictWord{ + 138, + 0, + 174, + }, + dictWord{135, 0, 707}, + dictWord{132, 11, 554}, + dictWord{133, 11, 536}, + dictWord{5, 0, 403}, + dictWord{5, 11, 207}, + dictWord{9, 11, 79}, + dictWord{ + 11, + 11, + 625, + }, + dictWord{145, 11, 7}, + dictWord{132, 11, 424}, + dictWord{136, 11, 785}, + dictWord{4, 10, 167}, + dictWord{135, 10, 82}, + dictWord{9, 0, 7}, + dictWord{ + 23, + 0, + 6, + }, + dictWord{9, 11, 7}, + dictWord{151, 11, 6}, + dictWord{6, 0, 282}, + dictWord{5, 10, 62}, + dictWord{6, 10, 534}, + dictWord{7, 10, 74}, + dictWord{7, 10, 678}, + dictWord{ + 7, + 10, + 684, + }, + dictWord{7, 10, 1043}, + dictWord{7, 10, 1072}, + dictWord{8, 10, 280}, + dictWord{8, 10, 541}, + dictWord{8, 10, 686}, + dictWord{9, 10, 258}, + dictWord{ + 10, + 10, + 519, + }, + dictWord{11, 10, 252}, + dictWord{140, 10, 282}, + dictWord{138, 10, 33}, + dictWord{132, 10, 359}, + dictWord{4, 0, 44}, + dictWord{5, 0, 311}, + dictWord{ + 6, + 0, + 156, + }, + dictWord{7, 0, 639}, + dictWord{7, 0, 762}, + dictWord{7, 0, 1827}, + dictWord{9, 0, 8}, + dictWord{9, 0, 462}, + dictWord{148, 0, 83}, + dictWord{7, 11, 769}, + dictWord{ + 9, + 11, + 18, + }, + dictWord{138, 11, 358}, + dictWord{4, 0, 346}, + dictWord{7, 0, 115}, + dictWord{9, 0, 180}, + dictWord{9, 0, 456}, + dictWord{10, 0, 363}, + dictWord{ + 4, + 11, + 896, + }, + dictWord{134, 11, 1777}, + dictWord{133, 10, 211}, + dictWord{7, 0, 761}, + dictWord{7, 0, 1051}, + dictWord{137, 0, 545}, + dictWord{6, 10, 145}, + dictWord{ + 141, + 10, + 336, + }, + dictWord{7, 11, 750}, + dictWord{9, 11, 223}, + dictWord{11, 11, 27}, + dictWord{11, 11, 466}, + dictWord{12, 11, 624}, + dictWord{14, 11, 265}, + dictWord{146, 11, 61}, + dictWord{6, 0, 752}, + dictWord{6, 0, 768}, + dictWord{6, 0, 1195}, + dictWord{6, 0, 1254}, + dictWord{6, 0, 1619}, + dictWord{137, 0, 835}, + dictWord{ + 6, + 0, + 1936, + }, + dictWord{8, 0, 930}, + dictWord{136, 0, 960}, + dictWord{132, 10, 263}, + dictWord{132, 11, 249}, + dictWord{12, 0, 653}, + dictWord{132, 10, 916}, + dictWord{4, 11, 603}, + dictWord{133, 11, 661}, + dictWord{8, 0, 344}, + dictWord{4, 11, 11}, + dictWord{6, 11, 128}, + dictWord{7, 11, 231}, + dictWord{7, 11, 1533}, + dictWord{138, 11, 725}, + dictWord{134, 0, 1483}, + dictWord{134, 0, 875}, + dictWord{6, 0, 185}, + dictWord{7, 0, 1899}, + dictWord{9, 0, 875}, + dictWord{139, 0, 673}, + dictWord{15, 10, 155}, + dictWord{144, 10, 79}, + dictWord{7, 0, 93}, + dictWord{7, 0, 210}, + dictWord{7, 0, 1223}, + dictWord{8, 0, 451}, + dictWord{8, 0, 460}, + dictWord{ + 11, + 0, + 353, + }, + dictWord{11, 0, 475}, + dictWord{4, 10, 599}, + dictWord{6, 10, 1634}, + dictWord{7, 10, 67}, + dictWord{7, 10, 691}, + dictWord{7, 10, 979}, + dictWord{ + 7, + 10, + 1697, + }, + dictWord{8, 10, 207}, + dictWord{8, 10, 214}, + dictWord{8, 10, 231}, + dictWord{8, 10, 294}, + dictWord{8, 10, 336}, + dictWord{8, 10, 428}, + dictWord{ + 8, + 10, + 471, + }, + dictWord{8, 10, 622}, + dictWord{8, 10, 626}, + dictWord{8, 10, 679}, + dictWord{8, 10, 759}, + dictWord{8, 10, 829}, + dictWord{9, 10, 11}, + dictWord{9, 10, 246}, + dictWord{9, 10, 484}, + dictWord{9, 10, 573}, + dictWord{9, 10, 706}, + dictWord{9, 10, 762}, + dictWord{9, 10, 798}, + dictWord{9, 10, 855}, + dictWord{9, 10, 870}, + dictWord{ + 9, + 10, + 912, + }, + dictWord{10, 10, 303}, + dictWord{10, 10, 335}, + dictWord{10, 10, 424}, + dictWord{10, 10, 461}, + dictWord{10, 10, 543}, + dictWord{10, 10, 759}, + dictWord{10, 10, 814}, + dictWord{11, 10, 59}, + dictWord{11, 10, 235}, + dictWord{11, 10, 590}, + dictWord{11, 10, 929}, + dictWord{11, 10, 963}, + dictWord{ + 11, + 10, + 987, + }, + dictWord{12, 10, 114}, + dictWord{12, 10, 182}, + dictWord{12, 10, 226}, + dictWord{12, 10, 332}, + dictWord{12, 10, 439}, + dictWord{12, 10, 575}, + dictWord{ + 12, + 10, + 598, + }, + dictWord{12, 10, 675}, + dictWord{13, 10, 8}, + dictWord{13, 10, 125}, + dictWord{13, 10, 194}, + dictWord{13, 10, 287}, + dictWord{14, 10, 197}, + dictWord{14, 10, 383}, + dictWord{15, 10, 53}, + dictWord{17, 10, 63}, + dictWord{19, 10, 46}, + dictWord{19, 10, 98}, + dictWord{19, 10, 106}, + dictWord{148, 10, 85}, + dictWord{132, 11, 476}, + dictWord{4, 0, 327}, + dictWord{5, 0, 478}, + dictWord{7, 0, 1332}, + dictWord{136, 0, 753}, + dictWord{5, 0, 1020}, + dictWord{133, 0, 1022}, + dictWord{135, 11, 1807}, + dictWord{4, 0, 103}, + dictWord{133, 0, 401}, + dictWord{4, 0, 499}, + dictWord{135, 0, 1421}, + dictWord{10, 0, 207}, + dictWord{13, 0, 164}, + dictWord{147, 10, 126}, + dictWord{9, 11, 20}, + dictWord{10, 11, 324}, + dictWord{139, 11, 488}, + dictWord{132, 0, 96}, + dictWord{9, 11, 280}, + dictWord{ + 138, + 11, + 134, + }, + dictWord{135, 0, 968}, + dictWord{133, 10, 187}, + dictWord{135, 10, 1286}, + dictWord{5, 11, 112}, + dictWord{6, 11, 103}, + dictWord{134, 11, 150}, + dictWord{8, 0, 914}, + dictWord{10, 0, 3}, + dictWord{4, 10, 215}, + dictWord{9, 10, 38}, + dictWord{11, 10, 23}, + dictWord{11, 10, 127}, + dictWord{139, 10, 796}, + dictWord{ + 135, + 0, + 399, + }, + dictWord{6, 0, 563}, + dictWord{137, 0, 224}, + dictWord{6, 0, 704}, + dictWord{134, 0, 1214}, + dictWord{4, 11, 708}, + dictWord{8, 11, 15}, + dictWord{ + 9, + 11, + 50, + }, + dictWord{9, 11, 386}, + dictWord{11, 11, 18}, + dictWord{11, 11, 529}, + dictWord{140, 11, 228}, + dictWord{4, 11, 563}, + dictWord{7, 11, 109}, + dictWord{ + 7, + 11, + 592, + }, + dictWord{7, 11, 637}, + dictWord{7, 11, 770}, + dictWord{7, 11, 1701}, + dictWord{8, 11, 436}, + dictWord{8, 11, 463}, + dictWord{9, 11, 60}, + dictWord{9, 11, 335}, + dictWord{9, 11, 904}, + dictWord{10, 11, 73}, + dictWord{11, 11, 434}, + dictWord{12, 11, 585}, + dictWord{13, 11, 331}, + dictWord{18, 11, 110}, + dictWord{ + 148, + 11, + 60, + }, + dictWord{134, 0, 1559}, + dictWord{132, 11, 502}, + dictWord{6, 11, 347}, + dictWord{138, 11, 161}, + dictWord{4, 11, 33}, + dictWord{5, 11, 102}, + dictWord{ + 5, + 11, + 500, + }, + dictWord{6, 11, 284}, + dictWord{7, 11, 1079}, + dictWord{7, 11, 1423}, + dictWord{7, 11, 1702}, + dictWord{8, 11, 470}, + dictWord{9, 11, 554}, + dictWord{ + 9, + 11, + 723, + }, + dictWord{139, 11, 333}, + dictWord{7, 11, 246}, + dictWord{135, 11, 840}, + dictWord{6, 11, 10}, + dictWord{8, 11, 571}, + dictWord{9, 11, 739}, + dictWord{ + 143, + 11, + 91, + }, + dictWord{8, 0, 861}, + dictWord{10, 0, 905}, + dictWord{12, 0, 730}, + dictWord{12, 0, 789}, + dictWord{133, 11, 626}, + dictWord{134, 0, 946}, + dictWord{ + 5, + 0, + 746, + }, + dictWord{12, 0, 333}, + dictWord{14, 0, 332}, + dictWord{12, 11, 333}, + dictWord{142, 11, 332}, + dictWord{5, 11, 18}, + dictWord{6, 11, 526}, + dictWord{ + 13, + 11, + 24, + }, + dictWord{13, 11, 110}, + dictWord{19, 11, 5}, + dictWord{147, 11, 44}, + dictWord{4, 0, 910}, + dictWord{5, 0, 832}, + dictWord{135, 10, 2002}, + dictWord{ + 10, + 11, + 768, + }, + dictWord{139, 11, 787}, + dictWord{4, 11, 309}, + dictWord{5, 11, 462}, + dictWord{7, 11, 970}, + dictWord{135, 11, 1097}, + dictWord{4, 10, 28}, + dictWord{ + 5, + 10, + 440, + }, + dictWord{7, 10, 248}, + dictWord{11, 10, 833}, + dictWord{140, 10, 344}, + dictWord{134, 10, 1654}, + dictWord{6, 0, 632}, + dictWord{6, 0, 652}, + dictWord{ + 6, + 0, + 1272, + }, + dictWord{6, 0, 1384}, + dictWord{134, 0, 1560}, + dictWord{134, 11, 1704}, + dictWord{6, 0, 1393}, + dictWord{133, 10, 853}, + dictWord{6, 10, 249}, + dictWord{7, 10, 1234}, + dictWord{139, 10, 573}, + dictWord{5, 11, 86}, + dictWord{7, 11, 743}, + dictWord{9, 11, 85}, + dictWord{10, 11, 281}, + dictWord{10, 11, 432}, + dictWord{11, 11, 490}, + dictWord{12, 11, 251}, + dictWord{13, 11, 118}, + dictWord{14, 11, 378}, + dictWord{146, 11, 143}, + dictWord{5, 11, 524}, + dictWord{ + 133, + 11, + 744, + }, + dictWord{134, 0, 1514}, + dictWord{10, 0, 201}, + dictWord{142, 0, 319}, + dictWord{7, 0, 717}, + dictWord{10, 0, 510}, + dictWord{7, 10, 392}, + dictWord{ + 8, + 10, + 20, + }, + dictWord{8, 10, 172}, + dictWord{8, 10, 690}, + dictWord{9, 10, 383}, + dictWord{9, 10, 845}, + dictWord{11, 10, 293}, + dictWord{11, 10, 832}, + dictWord{ + 11, + 10, + 920, + }, + dictWord{11, 10, 984}, + dictWord{141, 10, 221}, + dictWord{134, 0, 1381}, + dictWord{5, 10, 858}, + dictWord{133, 10, 992}, + dictWord{8, 0, 528}, + dictWord{137, 0, 348}, + dictWord{10, 11, 107}, + dictWord{140, 11, 436}, + dictWord{4, 0, 20}, + dictWord{133, 0, 616}, + dictWord{134, 0, 1251}, + dictWord{ + 132, + 11, + 927, + }, + dictWord{10, 11, 123}, + dictWord{12, 11, 670}, + dictWord{13, 11, 371}, + dictWord{14, 11, 142}, + dictWord{146, 11, 94}, + dictWord{134, 0, 1163}, + dictWord{ + 7, + 11, + 1149, + }, + dictWord{137, 11, 156}, + dictWord{134, 0, 307}, + dictWord{133, 11, 778}, + dictWord{7, 0, 1091}, + dictWord{135, 0, 1765}, + dictWord{ + 5, + 11, + 502, + }, + dictWord{6, 10, 268}, + dictWord{137, 10, 62}, + dictWord{8, 11, 196}, + dictWord{10, 11, 283}, + dictWord{139, 11, 406}, + dictWord{4, 0, 26}, + dictWord{ + 5, + 0, + 429, + }, + dictWord{6, 0, 245}, + dictWord{7, 0, 704}, + dictWord{7, 0, 1379}, + dictWord{135, 0, 1474}, + dictWord{133, 11, 855}, + dictWord{132, 0, 881}, + dictWord{ + 4, + 0, + 621, + }, + dictWord{135, 11, 1596}, + dictWord{7, 11, 1400}, + dictWord{9, 11, 446}, + dictWord{138, 11, 45}, + dictWord{6, 0, 736}, + dictWord{138, 10, 106}, + dictWord{133, 0, 542}, + dictWord{134, 0, 348}, + dictWord{133, 0, 868}, + dictWord{136, 0, 433}, + dictWord{135, 0, 1495}, + dictWord{138, 0, 771}, + dictWord{ + 6, + 10, + 613, + }, + dictWord{136, 10, 223}, + dictWord{138, 0, 215}, + dictWord{141, 0, 124}, + dictWord{136, 11, 391}, + dictWord{135, 11, 172}, + dictWord{132, 10, 670}, + dictWord{140, 0, 55}, + dictWord{9, 10, 40}, + dictWord{139, 10, 136}, + dictWord{7, 0, 62}, + dictWord{147, 0, 112}, + dictWord{132, 0, 856}, + dictWord{132, 11, 568}, + dictWord{12, 0, 270}, + dictWord{139, 10, 259}, + dictWord{8, 0, 572}, + dictWord{137, 0, 698}, + dictWord{4, 11, 732}, + dictWord{9, 10, 310}, + dictWord{137, 10, 682}, + dictWord{142, 10, 296}, + dictWord{134, 0, 939}, + dictWord{136, 11, 733}, + dictWord{135, 11, 1435}, + dictWord{7, 10, 1401}, + dictWord{135, 10, 1476}, + dictWord{6, 0, 352}, + dictWord{4, 10, 296}, + dictWord{7, 10, 401}, + dictWord{7, 10, 1410}, + dictWord{7, 10, 1594}, + dictWord{7, 10, 1674}, + dictWord{8, 10, 63}, + dictWord{ + 8, + 10, + 660, + }, + dictWord{137, 10, 74}, + dictWord{4, 11, 428}, + dictWord{133, 11, 668}, + dictWord{4, 10, 139}, + dictWord{4, 10, 388}, + dictWord{140, 10, 188}, + dictWord{7, 11, 2015}, + dictWord{140, 11, 665}, + dictWord{132, 0, 647}, + dictWord{146, 0, 10}, + dictWord{138, 0, 220}, + dictWord{142, 0, 464}, + dictWord{ + 132, + 0, + 109, + }, + dictWord{134, 0, 1746}, + dictWord{6, 0, 515}, + dictWord{4, 10, 747}, + dictWord{6, 11, 1623}, + dictWord{6, 11, 1681}, + dictWord{7, 10, 649}, + dictWord{ + 7, + 10, + 1479, + }, + dictWord{135, 10, 1583}, + dictWord{133, 10, 232}, + dictWord{135, 0, 566}, + dictWord{137, 10, 887}, + dictWord{4, 0, 40}, + dictWord{10, 0, 67}, + dictWord{ + 11, + 0, + 117, + }, + dictWord{11, 0, 768}, + dictWord{139, 0, 935}, + dictWord{132, 0, 801}, + dictWord{7, 0, 992}, + dictWord{8, 0, 301}, + dictWord{9, 0, 722}, + dictWord{ + 12, + 0, + 63, + }, + dictWord{13, 0, 29}, + dictWord{14, 0, 161}, + dictWord{143, 0, 18}, + dictWord{139, 0, 923}, + dictWord{6, 11, 1748}, + dictWord{8, 11, 715}, + dictWord{9, 11, 802}, + dictWord{10, 11, 46}, + dictWord{10, 11, 819}, + dictWord{13, 11, 308}, + dictWord{14, 11, 351}, + dictWord{14, 11, 363}, + dictWord{146, 11, 67}, + dictWord{ + 137, + 11, + 745, + }, + dictWord{7, 0, 1145}, + dictWord{4, 10, 14}, + dictWord{7, 10, 1801}, + dictWord{10, 10, 748}, + dictWord{141, 10, 458}, + dictWord{4, 11, 63}, + dictWord{ + 5, + 11, + 347, + }, + dictWord{134, 11, 474}, + dictWord{135, 0, 568}, + dictWord{4, 10, 425}, + dictWord{7, 11, 577}, + dictWord{7, 11, 1432}, + dictWord{9, 11, 475}, + dictWord{ + 9, + 11, + 505, + }, + dictWord{9, 11, 526}, + dictWord{9, 11, 609}, + dictWord{9, 11, 689}, + dictWord{9, 11, 726}, + dictWord{9, 11, 735}, + dictWord{9, 11, 738}, + dictWord{ + 10, + 11, + 556, + }, + dictWord{10, 11, 674}, + dictWord{10, 11, 684}, + dictWord{11, 11, 89}, + dictWord{11, 11, 202}, + dictWord{11, 11, 272}, + dictWord{11, 11, 380}, + dictWord{ + 11, + 11, + 415, + }, + dictWord{11, 11, 505}, + dictWord{11, 11, 537}, + dictWord{11, 11, 550}, + dictWord{11, 11, 562}, + dictWord{11, 11, 640}, + dictWord{11, 11, 667}, + dictWord{11, 11, 688}, + dictWord{11, 11, 847}, + dictWord{11, 11, 927}, + dictWord{11, 11, 930}, + dictWord{11, 11, 940}, + dictWord{12, 11, 144}, + dictWord{ + 12, + 11, + 325, + }, + dictWord{12, 11, 329}, + dictWord{12, 11, 389}, + dictWord{12, 11, 403}, + dictWord{12, 11, 451}, + dictWord{12, 11, 515}, + dictWord{12, 11, 604}, + dictWord{ + 12, + 11, + 616, + }, + dictWord{12, 11, 626}, + dictWord{13, 11, 66}, + dictWord{13, 11, 131}, + dictWord{13, 11, 167}, + dictWord{13, 11, 236}, + dictWord{13, 11, 368}, + dictWord{13, 11, 411}, + dictWord{13, 11, 434}, + dictWord{13, 11, 453}, + dictWord{13, 11, 461}, + dictWord{13, 11, 474}, + dictWord{14, 11, 59}, + dictWord{14, 11, 60}, + dictWord{14, 11, 139}, + dictWord{14, 11, 152}, + dictWord{14, 11, 276}, + dictWord{14, 11, 353}, + dictWord{14, 11, 402}, + dictWord{15, 11, 28}, + dictWord{ + 15, + 11, + 81, + }, + dictWord{15, 11, 123}, + dictWord{15, 11, 152}, + dictWord{18, 11, 136}, + dictWord{148, 11, 88}, + dictWord{137, 0, 247}, + dictWord{135, 11, 1622}, + dictWord{ + 9, + 11, + 544, + }, + dictWord{11, 11, 413}, + dictWord{144, 11, 25}, + dictWord{4, 0, 645}, + dictWord{7, 0, 825}, + dictWord{6, 10, 1768}, + dictWord{135, 11, 89}, + dictWord{140, 0, 328}, + dictWord{5, 10, 943}, + dictWord{134, 10, 1779}, + dictWord{134, 0, 1363}, + dictWord{5, 10, 245}, + dictWord{6, 10, 576}, + dictWord{7, 10, 582}, + dictWord{136, 10, 225}, + dictWord{134, 0, 1280}, + dictWord{5, 11, 824}, + dictWord{133, 11, 941}, + dictWord{7, 11, 440}, + dictWord{8, 11, 230}, + dictWord{ + 139, + 11, + 106, + }, + dictWord{5, 0, 28}, + dictWord{6, 0, 204}, + dictWord{10, 0, 320}, + dictWord{10, 0, 583}, + dictWord{13, 0, 502}, + dictWord{14, 0, 72}, + dictWord{14, 0, 274}, + dictWord{14, 0, 312}, + dictWord{14, 0, 344}, + dictWord{15, 0, 159}, + dictWord{16, 0, 62}, + dictWord{16, 0, 69}, + dictWord{17, 0, 30}, + dictWord{18, 0, 42}, + dictWord{ + 18, + 0, + 53, + }, + dictWord{18, 0, 84}, + dictWord{18, 0, 140}, + dictWord{19, 0, 68}, + dictWord{19, 0, 85}, + dictWord{20, 0, 5}, + dictWord{20, 0, 45}, + dictWord{20, 0, 101}, + dictWord{ + 22, + 0, + 7, + }, + dictWord{150, 0, 20}, + dictWord{4, 0, 558}, + dictWord{6, 0, 390}, + dictWord{7, 0, 162}, + dictWord{7, 0, 689}, + dictWord{9, 0, 360}, + dictWord{138, 0, 653}, + dictWord{134, 0, 764}, + dictWord{6, 0, 862}, + dictWord{137, 0, 833}, + dictWord{5, 0, 856}, + dictWord{6, 0, 1672}, + dictWord{6, 0, 1757}, + dictWord{134, 0, 1781}, + dictWord{ + 5, + 0, + 92, + }, + dictWord{10, 0, 736}, + dictWord{140, 0, 102}, + dictWord{6, 0, 1927}, + dictWord{6, 0, 1944}, + dictWord{8, 0, 924}, + dictWord{8, 0, 948}, + dictWord{ + 10, + 0, + 967, + }, + dictWord{138, 0, 978}, + dictWord{134, 0, 1479}, + dictWord{5, 0, 590}, + dictWord{8, 0, 360}, + dictWord{9, 0, 213}, + dictWord{138, 0, 63}, + dictWord{ + 134, + 0, + 1521, + }, + dictWord{6, 0, 709}, + dictWord{134, 0, 891}, + dictWord{132, 10, 443}, + dictWord{13, 0, 477}, + dictWord{14, 0, 120}, + dictWord{148, 0, 61}, + dictWord{ + 4, + 11, + 914, + }, + dictWord{5, 11, 800}, + dictWord{133, 11, 852}, + dictWord{10, 11, 54}, + dictWord{141, 11, 115}, + dictWord{4, 11, 918}, + dictWord{133, 11, 876}, + dictWord{139, 11, 152}, + dictWord{4, 11, 92}, + dictWord{133, 11, 274}, + dictWord{135, 11, 1901}, + dictWord{9, 11, 800}, + dictWord{10, 11, 693}, + dictWord{ + 11, + 11, + 482, + }, + dictWord{11, 11, 734}, + dictWord{139, 11, 789}, + dictWord{9, 0, 483}, + dictWord{132, 10, 298}, + dictWord{6, 0, 1213}, + dictWord{141, 11, 498}, + dictWord{135, 11, 1451}, + dictWord{133, 11, 743}, + dictWord{4, 0, 1022}, + dictWord{10, 0, 1000}, + dictWord{12, 0, 957}, + dictWord{12, 0, 980}, + dictWord{ + 12, + 0, + 1013, + }, + dictWord{14, 0, 481}, + dictWord{144, 0, 116}, + dictWord{8, 0, 503}, + dictWord{17, 0, 29}, + dictWord{4, 11, 49}, + dictWord{7, 11, 280}, + dictWord{ + 135, + 11, + 1633, + }, + dictWord{135, 0, 1712}, + dictWord{134, 0, 466}, + dictWord{136, 11, 47}, + dictWord{5, 10, 164}, + dictWord{7, 10, 121}, + dictWord{142, 10, 189}, + dictWord{ + 7, + 10, + 812, + }, + dictWord{7, 10, 1261}, + dictWord{7, 10, 1360}, + dictWord{9, 10, 632}, + dictWord{140, 10, 352}, + dictWord{139, 10, 556}, + dictWord{132, 0, 731}, + dictWord{5, 11, 272}, + dictWord{5, 11, 908}, + dictWord{5, 11, 942}, + dictWord{7, 11, 1008}, + dictWord{7, 11, 1560}, + dictWord{8, 11, 197}, + dictWord{9, 11, 47}, + dictWord{11, 11, 538}, + dictWord{139, 11, 742}, + dictWord{4, 10, 172}, + dictWord{9, 10, 611}, + dictWord{10, 10, 436}, + dictWord{12, 10, 673}, + dictWord{ + 141, + 10, + 255, + }, + dictWord{133, 10, 844}, + dictWord{10, 0, 484}, + dictWord{11, 0, 754}, + dictWord{12, 0, 457}, + dictWord{14, 0, 171}, + dictWord{14, 0, 389}, + dictWord{ + 146, + 0, + 153, + }, + dictWord{9, 10, 263}, + dictWord{10, 10, 147}, + dictWord{138, 10, 492}, + dictWord{137, 11, 891}, + dictWord{138, 0, 241}, + dictWord{133, 10, 537}, + dictWord{6, 0, 2005}, + dictWord{136, 0, 964}, + dictWord{137, 10, 842}, + dictWord{151, 11, 8}, + dictWord{4, 11, 407}, + dictWord{132, 11, 560}, + dictWord{ + 135, + 11, + 1884, + }, + dictWord{6, 0, 1100}, + dictWord{134, 0, 1242}, + dictWord{135, 0, 954}, + dictWord{5, 10, 230}, + dictWord{5, 10, 392}, + dictWord{6, 10, 420}, + dictWord{ + 9, + 10, + 568, + }, + dictWord{140, 10, 612}, + dictWord{4, 11, 475}, + dictWord{11, 11, 35}, + dictWord{11, 11, 90}, + dictWord{13, 11, 7}, + dictWord{13, 11, 71}, + dictWord{ + 13, + 11, + 177, + }, + dictWord{142, 11, 422}, + dictWord{136, 11, 332}, + dictWord{135, 0, 1958}, + dictWord{6, 0, 549}, + dictWord{8, 0, 34}, + dictWord{8, 0, 283}, + dictWord{ + 9, + 0, + 165, + }, + dictWord{138, 0, 475}, + dictWord{10, 0, 952}, + dictWord{12, 0, 966}, + dictWord{140, 0, 994}, + dictWord{5, 0, 652}, + dictWord{5, 0, 701}, + dictWord{ + 135, + 0, + 449, + }, + dictWord{4, 0, 655}, + dictWord{7, 0, 850}, + dictWord{17, 0, 75}, + dictWord{146, 0, 137}, + dictWord{4, 0, 146}, + dictWord{7, 0, 1618}, + dictWord{8, 0, 670}, + dictWord{ + 5, + 10, + 41, + }, + dictWord{7, 10, 1459}, + dictWord{7, 10, 1469}, + dictWord{7, 10, 1859}, + dictWord{9, 10, 549}, + dictWord{139, 10, 905}, + dictWord{133, 10, 696}, + dictWord{6, 0, 159}, + dictWord{6, 0, 364}, + dictWord{7, 0, 516}, + dictWord{137, 0, 518}, + dictWord{135, 0, 1439}, + dictWord{6, 11, 222}, + dictWord{7, 11, 636}, + dictWord{ + 7, + 11, + 1620, + }, + dictWord{8, 11, 409}, + dictWord{9, 11, 693}, + dictWord{139, 11, 77}, + dictWord{13, 0, 151}, + dictWord{141, 11, 45}, + dictWord{6, 0, 1027}, + dictWord{ + 4, + 11, + 336, + }, + dictWord{132, 10, 771}, + dictWord{139, 11, 392}, + dictWord{10, 11, 121}, + dictWord{11, 11, 175}, + dictWord{149, 11, 16}, + dictWord{8, 0, 950}, + dictWord{138, 0, 983}, + dictWord{133, 10, 921}, + dictWord{135, 0, 993}, + dictWord{6, 10, 180}, + dictWord{7, 10, 1137}, + dictWord{8, 10, 751}, + dictWord{ + 139, + 10, + 805, + }, + dictWord{7, 0, 501}, + dictWord{9, 0, 111}, + dictWord{10, 0, 141}, + dictWord{11, 0, 332}, + dictWord{13, 0, 43}, + dictWord{13, 0, 429}, + dictWord{14, 0, 130}, + dictWord{14, 0, 415}, + dictWord{145, 0, 102}, + dictWord{4, 10, 183}, + dictWord{5, 11, 882}, + dictWord{7, 10, 271}, + dictWord{11, 10, 824}, + dictWord{11, 10, 952}, + dictWord{13, 10, 278}, + dictWord{13, 10, 339}, + dictWord{13, 10, 482}, + dictWord{14, 10, 424}, + dictWord{148, 10, 99}, + dictWord{4, 10, 19}, + dictWord{5, 10, 477}, + dictWord{5, 10, 596}, + dictWord{6, 10, 505}, + dictWord{7, 10, 1221}, + dictWord{11, 10, 907}, + dictWord{12, 10, 209}, + dictWord{141, 10, 214}, + dictWord{ + 135, + 10, + 1215, + }, + dictWord{133, 0, 452}, + dictWord{132, 11, 426}, + dictWord{5, 0, 149}, + dictWord{136, 0, 233}, + dictWord{133, 0, 935}, + dictWord{6, 11, 58}, + dictWord{ + 7, + 11, + 654, + }, + dictWord{7, 11, 745}, + dictWord{7, 11, 1969}, + dictWord{8, 11, 240}, + dictWord{8, 11, 675}, + dictWord{9, 11, 479}, + dictWord{9, 11, 731}, + dictWord{ + 10, + 11, + 330, + }, + dictWord{10, 11, 593}, + dictWord{10, 11, 817}, + dictWord{11, 11, 32}, + dictWord{11, 11, 133}, + dictWord{11, 11, 221}, + dictWord{145, 11, 68}, + dictWord{ + 12, + 0, + 582, + }, + dictWord{18, 0, 131}, + dictWord{7, 11, 102}, + dictWord{137, 11, 538}, + dictWord{136, 0, 801}, + dictWord{134, 10, 1645}, + dictWord{132, 0, 70}, + dictWord{6, 10, 92}, + dictWord{6, 10, 188}, + dictWord{7, 10, 1269}, + dictWord{7, 10, 1524}, + dictWord{7, 10, 1876}, + dictWord{10, 10, 228}, + dictWord{139, 10, 1020}, + dictWord{4, 10, 459}, + dictWord{133, 10, 966}, + dictWord{138, 0, 369}, + dictWord{16, 0, 36}, + dictWord{140, 10, 330}, + dictWord{141, 11, 366}, + dictWord{ + 7, + 0, + 721, + }, + dictWord{10, 0, 236}, + dictWord{12, 0, 204}, + dictWord{6, 10, 18}, + dictWord{7, 10, 932}, + dictWord{8, 10, 757}, + dictWord{9, 10, 54}, + dictWord{9, 10, 65}, + dictWord{9, 10, 844}, + dictWord{10, 10, 113}, + dictWord{10, 10, 315}, + dictWord{10, 10, 798}, + dictWord{11, 10, 153}, + dictWord{12, 10, 151}, + dictWord{12, 10, 392}, + dictWord{12, 10, 666}, + dictWord{142, 10, 248}, + dictWord{7, 0, 241}, + dictWord{10, 0, 430}, + dictWord{8, 10, 548}, + dictWord{9, 10, 532}, + dictWord{10, 10, 117}, + dictWord{11, 10, 351}, + dictWord{11, 10, 375}, + dictWord{143, 10, 23}, + dictWord{134, 10, 1742}, + dictWord{133, 10, 965}, + dictWord{133, 11, 566}, + dictWord{ + 6, + 11, + 48, + }, + dictWord{135, 11, 63}, + dictWord{134, 10, 182}, + dictWord{10, 10, 65}, + dictWord{10, 10, 488}, + dictWord{138, 10, 497}, + dictWord{6, 11, 114}, + dictWord{7, 11, 1224}, + dictWord{7, 11, 1556}, + dictWord{136, 11, 3}, + dictWord{134, 0, 1817}, + dictWord{8, 11, 576}, + dictWord{137, 11, 267}, + dictWord{ + 6, + 0, + 1078, + }, + dictWord{144, 0, 16}, + dictWord{9, 10, 588}, + dictWord{138, 10, 260}, + dictWord{138, 0, 1021}, + dictWord{5, 0, 406}, + dictWord{134, 0, 2022}, + dictWord{133, 11, 933}, + dictWord{6, 0, 69}, + dictWord{135, 0, 117}, + dictWord{7, 0, 1830}, + dictWord{136, 11, 427}, + dictWord{4, 0, 432}, + dictWord{135, 0, 824}, + dictWord{134, 10, 1786}, + dictWord{133, 0, 826}, + dictWord{139, 11, 67}, + dictWord{133, 11, 759}, + dictWord{135, 10, 308}, + dictWord{137, 0, 816}, + dictWord{ + 133, + 0, + 1000, + }, + dictWord{4, 0, 297}, + dictWord{6, 0, 529}, + dictWord{7, 0, 152}, + dictWord{7, 0, 713}, + dictWord{7, 0, 1845}, + dictWord{8, 0, 710}, + dictWord{8, 0, 717}, + dictWord{12, 0, 639}, + dictWord{140, 0, 685}, + dictWord{7, 0, 423}, + dictWord{136, 10, 588}, + dictWord{136, 10, 287}, + dictWord{136, 0, 510}, + dictWord{ + 134, + 0, + 1048, + }, + dictWord{6, 0, 618}, + dictWord{7, 11, 56}, + dictWord{7, 11, 1989}, + dictWord{8, 11, 337}, + dictWord{8, 11, 738}, + dictWord{9, 11, 600}, + dictWord{ + 10, + 11, + 483, + }, + dictWord{12, 11, 37}, + dictWord{13, 11, 447}, + dictWord{142, 11, 92}, + dictWord{4, 0, 520}, + dictWord{135, 0, 575}, + dictWord{8, 0, 990}, + dictWord{ + 138, + 0, + 977, + }, + dictWord{135, 11, 774}, + dictWord{9, 11, 347}, + dictWord{11, 11, 24}, + dictWord{140, 11, 170}, + dictWord{136, 11, 379}, + dictWord{140, 10, 290}, + dictWord{132, 11, 328}, + dictWord{4, 0, 321}, + dictWord{134, 0, 569}, + dictWord{4, 11, 101}, + dictWord{135, 11, 1171}, + dictWord{7, 0, 723}, + dictWord{7, 0, 1135}, + dictWord{5, 11, 833}, + dictWord{136, 11, 744}, + dictWord{7, 10, 719}, + dictWord{8, 10, 809}, + dictWord{136, 10, 834}, + dictWord{8, 0, 921}, + dictWord{136, 10, 796}, + dictWord{5, 10, 210}, + dictWord{6, 10, 213}, + dictWord{7, 10, 60}, + dictWord{10, 10, 364}, + dictWord{139, 10, 135}, + dictWord{5, 0, 397}, + dictWord{6, 0, 154}, + dictWord{7, 0, 676}, + dictWord{8, 0, 443}, + dictWord{8, 0, 609}, + dictWord{9, 0, 24}, + dictWord{9, 0, 325}, + dictWord{10, 0, 35}, + dictWord{11, 0, 535}, + dictWord{11, 0, 672}, + dictWord{11, 0, 1018}, + dictWord{12, 0, 637}, + dictWord{16, 0, 30}, + dictWord{5, 10, 607}, + dictWord{8, 10, 326}, + dictWord{136, 10, 490}, + dictWord{4, 10, 701}, + dictWord{5, 10, 472}, + dictWord{6, 11, 9}, + dictWord{6, 11, 397}, + dictWord{7, 11, 53}, + dictWord{7, 11, 1742}, + dictWord{9, 10, 758}, + dictWord{10, 11, 632}, + dictWord{ + 11, + 11, + 828, + }, + dictWord{140, 11, 146}, + dictWord{135, 10, 380}, + dictWord{135, 10, 1947}, + dictWord{148, 11, 109}, + dictWord{10, 10, 278}, + dictWord{ + 138, + 11, + 278, + }, + dictWord{134, 0, 856}, + dictWord{7, 0, 139}, + dictWord{4, 10, 386}, + dictWord{8, 10, 405}, + dictWord{8, 10, 728}, + dictWord{9, 10, 497}, + dictWord{ + 11, + 10, + 110, + }, + dictWord{11, 10, 360}, + dictWord{15, 10, 37}, + dictWord{144, 10, 84}, + dictWord{141, 0, 282}, + dictWord{133, 0, 981}, + dictWord{5, 0, 288}, + dictWord{ + 7, + 10, + 1452, + }, + dictWord{7, 10, 1480}, + dictWord{8, 10, 634}, + dictWord{140, 10, 472}, + dictWord{7, 0, 1890}, + dictWord{8, 11, 367}, + dictWord{10, 11, 760}, + dictWord{ + 14, + 11, + 79, + }, + dictWord{20, 11, 17}, + dictWord{152, 11, 0}, + dictWord{4, 10, 524}, + dictWord{136, 10, 810}, + dictWord{4, 0, 56}, + dictWord{7, 0, 1791}, + dictWord{ + 8, + 0, + 607, + }, + dictWord{8, 0, 651}, + dictWord{11, 0, 465}, + dictWord{11, 0, 835}, + dictWord{12, 0, 337}, + dictWord{141, 0, 480}, + dictWord{10, 10, 238}, + dictWord{ + 141, + 10, + 33, + }, + dictWord{11, 11, 417}, + dictWord{12, 11, 223}, + dictWord{140, 11, 265}, + dictWord{9, 0, 158}, + dictWord{10, 0, 411}, + dictWord{140, 0, 261}, + dictWord{ + 133, + 10, + 532, + }, + dictWord{133, 10, 997}, + dictWord{12, 11, 186}, + dictWord{12, 11, 292}, + dictWord{14, 11, 100}, + dictWord{146, 11, 70}, + dictWord{6, 0, 1403}, + dictWord{136, 0, 617}, + dictWord{134, 0, 1205}, + dictWord{139, 0, 563}, + dictWord{4, 0, 242}, + dictWord{134, 0, 333}, + dictWord{4, 11, 186}, + dictWord{5, 11, 157}, + dictWord{8, 11, 168}, + dictWord{138, 11, 6}, + dictWord{132, 0, 369}, + dictWord{133, 11, 875}, + dictWord{5, 10, 782}, + dictWord{5, 10, 829}, + dictWord{ + 134, + 10, + 1738, + }, + dictWord{134, 0, 622}, + dictWord{135, 11, 1272}, + dictWord{6, 0, 1407}, + dictWord{7, 11, 111}, + dictWord{136, 11, 581}, + dictWord{7, 10, 1823}, + dictWord{139, 10, 693}, + dictWord{7, 0, 160}, + dictWord{10, 0, 624}, + dictWord{142, 0, 279}, + dictWord{132, 0, 363}, + dictWord{10, 11, 589}, + dictWord{12, 11, 111}, + dictWord{13, 11, 260}, + dictWord{14, 11, 82}, + dictWord{18, 11, 63}, + dictWord{147, 11, 45}, + dictWord{7, 11, 1364}, + dictWord{7, 11, 1907}, + dictWord{ + 141, + 11, + 158, + }, + dictWord{4, 11, 404}, + dictWord{4, 11, 659}, + dictWord{135, 11, 675}, + dictWord{13, 11, 211}, + dictWord{14, 11, 133}, + dictWord{14, 11, 204}, + dictWord{ + 15, + 11, + 64, + }, + dictWord{15, 11, 69}, + dictWord{15, 11, 114}, + dictWord{16, 11, 10}, + dictWord{19, 11, 23}, + dictWord{19, 11, 35}, + dictWord{19, 11, 39}, + dictWord{ + 19, + 11, + 51, + }, + dictWord{19, 11, 71}, + dictWord{19, 11, 75}, + dictWord{152, 11, 15}, + dictWord{4, 10, 78}, + dictWord{5, 10, 96}, + dictWord{5, 10, 182}, + dictWord{7, 10, 1724}, + dictWord{7, 10, 1825}, + dictWord{10, 10, 394}, + dictWord{10, 10, 471}, + dictWord{11, 10, 532}, + dictWord{14, 10, 340}, + dictWord{145, 10, 88}, + dictWord{ + 135, + 10, + 1964, + }, + dictWord{133, 11, 391}, + dictWord{11, 11, 887}, + dictWord{14, 11, 365}, + dictWord{142, 11, 375}, + dictWord{5, 11, 540}, + dictWord{6, 11, 1697}, + dictWord{7, 11, 222}, + dictWord{136, 11, 341}, + dictWord{134, 11, 78}, + dictWord{9, 0, 601}, + dictWord{9, 0, 619}, + dictWord{10, 0, 505}, + dictWord{10, 0, 732}, + dictWord{11, 0, 355}, + dictWord{140, 0, 139}, + dictWord{134, 0, 292}, + dictWord{139, 0, 174}, + dictWord{5, 0, 177}, + dictWord{6, 0, 616}, + dictWord{7, 0, 827}, + dictWord{ + 9, + 0, + 525, + }, + dictWord{138, 0, 656}, + dictWord{10, 0, 31}, + dictWord{6, 10, 215}, + dictWord{7, 10, 1028}, + dictWord{7, 10, 1473}, + dictWord{7, 10, 1721}, + dictWord{ + 9, + 10, + 424, + }, + dictWord{138, 10, 779}, + dictWord{135, 10, 584}, + dictWord{136, 11, 293}, + dictWord{134, 0, 685}, + dictWord{135, 11, 1868}, + dictWord{ + 133, + 11, + 460, + }, + dictWord{7, 0, 647}, + dictWord{6, 10, 67}, + dictWord{7, 10, 1630}, + dictWord{9, 10, 354}, + dictWord{9, 10, 675}, + dictWord{10, 10, 830}, + dictWord{ + 14, + 10, + 80, + }, + dictWord{145, 10, 80}, + dictWord{4, 0, 161}, + dictWord{133, 0, 631}, + dictWord{6, 10, 141}, + dictWord{7, 10, 225}, + dictWord{9, 10, 59}, + dictWord{9, 10, 607}, + dictWord{10, 10, 312}, + dictWord{11, 10, 687}, + dictWord{12, 10, 555}, + dictWord{13, 10, 373}, + dictWord{13, 10, 494}, + dictWord{148, 10, 58}, + dictWord{ + 7, + 11, + 965, + }, + dictWord{7, 11, 1460}, + dictWord{135, 11, 1604}, + dictWord{136, 10, 783}, + dictWord{134, 11, 388}, + dictWord{6, 0, 722}, + dictWord{6, 0, 1267}, + dictWord{ + 4, + 11, + 511, + }, + dictWord{9, 11, 333}, + dictWord{9, 11, 379}, + dictWord{10, 11, 602}, + dictWord{11, 11, 441}, + dictWord{11, 11, 723}, + dictWord{11, 11, 976}, + dictWord{140, 11, 357}, + dictWord{134, 0, 1797}, + dictWord{135, 0, 1684}, + dictWord{9, 0, 469}, + dictWord{9, 0, 709}, + dictWord{12, 0, 512}, + dictWord{14, 0, 65}, + dictWord{17, 0, 12}, + dictWord{5, 11, 938}, + dictWord{136, 11, 707}, + dictWord{7, 0, 1230}, + dictWord{136, 0, 531}, + dictWord{10, 0, 229}, + dictWord{11, 0, 73}, + dictWord{ + 11, + 0, + 376, + }, + dictWord{139, 0, 433}, + dictWord{12, 0, 268}, + dictWord{12, 0, 640}, + dictWord{142, 0, 119}, + dictWord{7, 10, 430}, + dictWord{139, 10, 46}, + dictWord{ + 6, + 0, + 558, + }, + dictWord{7, 0, 651}, + dictWord{8, 0, 421}, + dictWord{9, 0, 0}, + dictWord{10, 0, 34}, + dictWord{139, 0, 1008}, + dictWord{6, 0, 106}, + dictWord{7, 0, 1786}, + dictWord{7, 0, 1821}, + dictWord{9, 0, 102}, + dictWord{9, 0, 763}, + dictWord{5, 10, 602}, + dictWord{7, 10, 2018}, + dictWord{137, 10, 418}, + dictWord{5, 0, 65}, + dictWord{ + 6, + 0, + 416, + }, + dictWord{7, 0, 1720}, + dictWord{7, 0, 1924}, + dictWord{10, 0, 109}, + dictWord{11, 0, 14}, + dictWord{11, 0, 70}, + dictWord{11, 0, 569}, + dictWord{11, 0, 735}, + dictWord{15, 0, 153}, + dictWord{20, 0, 80}, + dictWord{136, 10, 677}, + dictWord{135, 11, 1625}, + dictWord{137, 11, 772}, + dictWord{136, 0, 595}, + dictWord{ + 6, + 11, + 469, + }, + dictWord{7, 11, 1709}, + dictWord{138, 11, 515}, + dictWord{7, 0, 1832}, + dictWord{138, 0, 374}, + dictWord{9, 0, 106}, + dictWord{9, 0, 163}, + dictWord{ + 9, + 0, + 296, + }, + dictWord{10, 0, 167}, + dictWord{10, 0, 172}, + dictWord{10, 0, 777}, + dictWord{139, 0, 16}, + dictWord{6, 0, 6}, + dictWord{7, 0, 81}, + dictWord{7, 0, 771}, + dictWord{ + 7, + 0, + 1731, + }, + dictWord{9, 0, 405}, + dictWord{138, 0, 421}, + dictWord{4, 11, 500}, + dictWord{135, 11, 938}, + dictWord{5, 11, 68}, + dictWord{134, 11, 383}, + dictWord{ + 5, + 0, + 881, + }, + dictWord{133, 0, 885}, + dictWord{6, 0, 854}, + dictWord{6, 0, 1132}, + dictWord{6, 0, 1495}, + dictWord{6, 0, 1526}, + dictWord{6, 0, 1533}, + dictWord{ + 134, + 0, + 1577, + }, + dictWord{4, 11, 337}, + dictWord{6, 11, 353}, + dictWord{7, 11, 1934}, + dictWord{8, 11, 488}, + dictWord{137, 11, 429}, + dictWord{7, 11, 236}, + dictWord{ + 7, + 11, + 1795, + }, + dictWord{8, 11, 259}, + dictWord{9, 11, 135}, + dictWord{9, 11, 177}, + dictWord{10, 11, 825}, + dictWord{11, 11, 115}, + dictWord{11, 11, 370}, + dictWord{ + 11, + 11, + 405, + }, + dictWord{11, 11, 604}, + dictWord{12, 11, 10}, + dictWord{12, 11, 667}, + dictWord{12, 11, 669}, + dictWord{13, 11, 76}, + dictWord{14, 11, 310}, + dictWord{15, 11, 76}, + dictWord{15, 11, 147}, + dictWord{148, 11, 23}, + dictWord{5, 0, 142}, + dictWord{134, 0, 546}, + dictWord{4, 11, 15}, + dictWord{5, 11, 22}, + dictWord{ + 6, + 11, + 244, + }, + dictWord{7, 11, 40}, + dictWord{7, 11, 200}, + dictWord{7, 11, 906}, + dictWord{7, 11, 1199}, + dictWord{9, 11, 616}, + dictWord{10, 11, 716}, + dictWord{ + 11, + 11, + 635, + }, + dictWord{11, 11, 801}, + dictWord{140, 11, 458}, + dictWord{5, 0, 466}, + dictWord{11, 0, 571}, + dictWord{12, 0, 198}, + dictWord{13, 0, 283}, + dictWord{ + 14, + 0, + 186, + }, + dictWord{15, 0, 21}, + dictWord{15, 0, 103}, + dictWord{135, 10, 329}, + dictWord{4, 0, 185}, + dictWord{5, 0, 257}, + dictWord{5, 0, 839}, + dictWord{5, 0, 936}, + dictWord{9, 0, 399}, + dictWord{10, 0, 258}, + dictWord{10, 0, 395}, + dictWord{10, 0, 734}, + dictWord{11, 0, 1014}, + dictWord{12, 0, 23}, + dictWord{13, 0, 350}, + dictWord{ + 14, + 0, + 150, + }, + dictWord{19, 0, 6}, + dictWord{135, 11, 1735}, + dictWord{12, 11, 36}, + dictWord{141, 11, 337}, + dictWord{5, 11, 598}, + dictWord{7, 11, 791}, + dictWord{ + 8, + 11, + 108, + }, + dictWord{137, 11, 123}, + dictWord{132, 10, 469}, + dictWord{7, 0, 404}, + dictWord{7, 0, 1377}, + dictWord{7, 0, 1430}, + dictWord{7, 0, 2017}, + dictWord{ + 8, + 0, + 149, + }, + dictWord{8, 0, 239}, + dictWord{8, 0, 512}, + dictWord{8, 0, 793}, + dictWord{8, 0, 818}, + dictWord{9, 0, 474}, + dictWord{9, 0, 595}, + dictWord{10, 0, 122}, + dictWord{10, 0, 565}, + dictWord{10, 0, 649}, + dictWord{10, 0, 783}, + dictWord{11, 0, 239}, + dictWord{11, 0, 295}, + dictWord{11, 0, 447}, + dictWord{11, 0, 528}, + dictWord{ + 11, + 0, + 639, + }, + dictWord{11, 0, 800}, + dictWord{12, 0, 25}, + dictWord{12, 0, 77}, + dictWord{12, 0, 157}, + dictWord{12, 0, 256}, + dictWord{12, 0, 316}, + dictWord{12, 0, 390}, + dictWord{12, 0, 391}, + dictWord{12, 0, 395}, + dictWord{12, 0, 478}, + dictWord{12, 0, 503}, + dictWord{12, 0, 592}, + dictWord{12, 0, 680}, + dictWord{13, 0, 50}, + dictWord{13, 0, 53}, + dictWord{13, 0, 132}, + dictWord{13, 0, 198}, + dictWord{13, 0, 322}, + dictWord{13, 0, 415}, + dictWord{13, 0, 511}, + dictWord{14, 0, 71}, + dictWord{ + 14, + 0, + 395, + }, + dictWord{15, 0, 71}, + dictWord{15, 0, 136}, + dictWord{17, 0, 123}, + dictWord{18, 0, 93}, + dictWord{147, 0, 58}, + dictWord{136, 0, 712}, + dictWord{ + 134, + 10, + 1743, + }, + dictWord{5, 10, 929}, + dictWord{6, 10, 340}, + dictWord{8, 10, 376}, + dictWord{136, 10, 807}, + dictWord{6, 0, 1848}, + dictWord{8, 0, 860}, + dictWord{ + 10, + 0, + 856, + }, + dictWord{10, 0, 859}, + dictWord{10, 0, 925}, + dictWord{10, 0, 941}, + dictWord{140, 0, 762}, + dictWord{6, 0, 629}, + dictWord{6, 0, 906}, + dictWord{9, 0, 810}, + dictWord{140, 0, 652}, + dictWord{5, 10, 218}, + dictWord{7, 10, 1610}, + dictWord{138, 10, 83}, + dictWord{7, 10, 1512}, + dictWord{135, 10, 1794}, + dictWord{ + 4, + 0, + 377, + }, + dictWord{24, 0, 13}, + dictWord{4, 11, 155}, + dictWord{7, 11, 1689}, + dictWord{11, 10, 0}, + dictWord{144, 10, 78}, + dictWord{4, 11, 164}, + dictWord{5, 11, 151}, + dictWord{5, 11, 730}, + dictWord{5, 11, 741}, + dictWord{7, 11, 498}, + dictWord{7, 11, 870}, + dictWord{7, 11, 1542}, + dictWord{12, 11, 213}, + dictWord{14, 11, 36}, + dictWord{14, 11, 391}, + dictWord{17, 11, 111}, + dictWord{18, 11, 6}, + dictWord{18, 11, 46}, + dictWord{18, 11, 151}, + dictWord{19, 11, 36}, + dictWord{20, 11, 32}, + dictWord{20, 11, 56}, + dictWord{20, 11, 69}, + dictWord{20, 11, 102}, + dictWord{21, 11, 4}, + dictWord{22, 11, 8}, + dictWord{22, 11, 10}, + dictWord{22, 11, 14}, + dictWord{ + 150, + 11, + 31, + }, + dictWord{7, 0, 1842}, + dictWord{133, 10, 571}, + dictWord{4, 10, 455}, + dictWord{4, 11, 624}, + dictWord{135, 11, 1752}, + dictWord{134, 0, 1501}, + dictWord{4, 11, 492}, + dictWord{5, 11, 451}, + dictWord{6, 10, 161}, + dictWord{7, 10, 372}, + dictWord{137, 10, 597}, + dictWord{132, 10, 349}, + dictWord{4, 0, 180}, + dictWord{135, 0, 1906}, + dictWord{135, 11, 835}, + dictWord{141, 11, 70}, + dictWord{132, 0, 491}, + dictWord{137, 10, 751}, + dictWord{6, 10, 432}, + dictWord{ + 139, + 10, + 322, + }, + dictWord{4, 0, 171}, + dictWord{138, 0, 234}, + dictWord{6, 11, 113}, + dictWord{135, 11, 436}, + dictWord{4, 0, 586}, + dictWord{7, 0, 1186}, + dictWord{ + 138, + 0, + 631, + }, + dictWord{5, 10, 468}, + dictWord{10, 10, 325}, + dictWord{11, 10, 856}, + dictWord{12, 10, 345}, + dictWord{143, 10, 104}, + dictWord{5, 10, 223}, + dictWord{10, 11, 592}, + dictWord{10, 11, 753}, + dictWord{12, 11, 317}, + dictWord{12, 11, 355}, + dictWord{12, 11, 465}, + dictWord{12, 11, 469}, + dictWord{ + 12, + 11, + 560, + }, + dictWord{12, 11, 578}, + dictWord{141, 11, 243}, + dictWord{132, 10, 566}, + dictWord{135, 11, 520}, + dictWord{4, 10, 59}, + dictWord{135, 10, 1394}, + dictWord{6, 10, 436}, + dictWord{139, 10, 481}, + dictWord{9, 0, 931}, + dictWord{10, 0, 334}, + dictWord{20, 0, 71}, + dictWord{4, 10, 48}, + dictWord{5, 10, 271}, + dictWord{ + 7, + 10, + 953, + }, + dictWord{135, 11, 1878}, + dictWord{11, 0, 170}, + dictWord{5, 10, 610}, + dictWord{136, 10, 457}, + dictWord{133, 10, 755}, + dictWord{6, 0, 1587}, + dictWord{135, 10, 1217}, + dictWord{4, 10, 197}, + dictWord{149, 11, 26}, + dictWord{133, 11, 585}, + dictWord{137, 11, 521}, + dictWord{133, 0, 765}, + dictWord{ + 133, + 10, + 217, + }, + dictWord{139, 11, 586}, + dictWord{133, 0, 424}, + dictWord{9, 11, 752}, + dictWord{12, 11, 610}, + dictWord{13, 11, 431}, + dictWord{16, 11, 59}, + dictWord{146, 11, 109}, + dictWord{136, 0, 714}, + dictWord{7, 0, 685}, + dictWord{132, 11, 307}, + dictWord{9, 0, 420}, + dictWord{10, 0, 269}, + dictWord{10, 0, 285}, + dictWord{10, 0, 576}, + dictWord{11, 0, 397}, + dictWord{13, 0, 175}, + dictWord{145, 0, 90}, + dictWord{132, 0, 429}, + dictWord{133, 11, 964}, + dictWord{9, 11, 463}, + dictWord{138, 11, 595}, + dictWord{7, 0, 18}, + dictWord{7, 0, 699}, + dictWord{7, 0, 1966}, + dictWord{8, 0, 752}, + dictWord{9, 0, 273}, + dictWord{9, 0, 412}, + dictWord{ + 9, + 0, + 703, + }, + dictWord{10, 0, 71}, + dictWord{10, 0, 427}, + dictWord{138, 0, 508}, + dictWord{4, 10, 165}, + dictWord{7, 10, 1398}, + dictWord{135, 10, 1829}, + dictWord{ + 4, + 0, + 53, + }, + dictWord{5, 0, 186}, + dictWord{7, 0, 752}, + dictWord{7, 0, 828}, + dictWord{142, 0, 116}, + dictWord{8, 0, 575}, + dictWord{10, 0, 289}, + dictWord{139, 0, 319}, + dictWord{132, 0, 675}, + dictWord{134, 0, 1424}, + dictWord{4, 11, 75}, + dictWord{5, 11, 180}, + dictWord{6, 11, 500}, + dictWord{7, 11, 58}, + dictWord{7, 11, 710}, + dictWord{138, 11, 645}, + dictWord{133, 11, 649}, + dictWord{6, 11, 276}, + dictWord{7, 11, 282}, + dictWord{7, 11, 879}, + dictWord{7, 11, 924}, + dictWord{8, 11, 459}, + dictWord{9, 11, 599}, + dictWord{9, 11, 754}, + dictWord{11, 11, 574}, + dictWord{12, 11, 128}, + dictWord{12, 11, 494}, + dictWord{13, 11, 52}, + dictWord{13, 11, 301}, + dictWord{15, 11, 30}, + dictWord{143, 11, 132}, + dictWord{6, 0, 647}, + dictWord{134, 0, 1095}, + dictWord{5, 10, 9}, + dictWord{7, 10, 297}, + dictWord{7, 10, 966}, + dictWord{140, 10, 306}, + dictWord{132, 11, 200}, + dictWord{134, 0, 1334}, + dictWord{5, 10, 146}, + dictWord{6, 10, 411}, + dictWord{138, 10, 721}, + dictWord{ + 6, + 0, + 209, + }, + dictWord{6, 0, 1141}, + dictWord{6, 0, 1288}, + dictWord{8, 0, 468}, + dictWord{9, 0, 210}, + dictWord{11, 0, 36}, + dictWord{12, 0, 28}, + dictWord{12, 0, 630}, + dictWord{13, 0, 21}, + dictWord{13, 0, 349}, + dictWord{14, 0, 7}, + dictWord{145, 0, 13}, + dictWord{6, 10, 177}, + dictWord{135, 10, 467}, + dictWord{4, 0, 342}, + dictWord{ + 135, + 0, + 1179, + }, + dictWord{10, 11, 454}, + dictWord{140, 11, 324}, + dictWord{4, 0, 928}, + dictWord{133, 0, 910}, + dictWord{7, 0, 1838}, + dictWord{6, 11, 225}, + dictWord{ + 137, + 11, + 211, + }, + dictWord{16, 0, 101}, + dictWord{20, 0, 115}, + dictWord{20, 0, 118}, + dictWord{148, 0, 122}, + dictWord{4, 0, 496}, + dictWord{135, 0, 856}, + dictWord{ + 4, + 0, + 318, + }, + dictWord{11, 0, 654}, + dictWord{7, 11, 718}, + dictWord{139, 11, 102}, + dictWord{8, 11, 58}, + dictWord{9, 11, 724}, + dictWord{11, 11, 809}, + dictWord{ + 13, + 11, + 113, + }, + dictWord{145, 11, 72}, + dictWord{5, 10, 200}, + dictWord{6, 11, 345}, + dictWord{135, 11, 1247}, + dictWord{8, 11, 767}, + dictWord{8, 11, 803}, + dictWord{ + 9, + 11, + 301, + }, + dictWord{137, 11, 903}, + dictWord{7, 0, 915}, + dictWord{8, 0, 247}, + dictWord{19, 0, 0}, + dictWord{7, 11, 1949}, + dictWord{136, 11, 674}, + dictWord{ + 4, + 0, + 202, + }, + dictWord{5, 0, 382}, + dictWord{6, 0, 454}, + dictWord{7, 0, 936}, + dictWord{7, 0, 1803}, + dictWord{8, 0, 758}, + dictWord{9, 0, 375}, + dictWord{9, 0, 895}, + dictWord{ + 10, + 0, + 743, + }, + dictWord{10, 0, 792}, + dictWord{11, 0, 978}, + dictWord{11, 0, 1012}, + dictWord{142, 0, 109}, + dictWord{7, 0, 1150}, + dictWord{7, 0, 1425}, + dictWord{ + 7, + 0, + 1453, + }, + dictWord{140, 0, 513}, + dictWord{134, 11, 259}, + dictWord{138, 0, 791}, + dictWord{11, 0, 821}, + dictWord{12, 0, 110}, + dictWord{12, 0, 153}, + dictWord{ + 18, + 0, + 41, + }, + dictWord{150, 0, 19}, + dictWord{134, 10, 481}, + dictWord{132, 0, 796}, + dictWord{6, 0, 445}, + dictWord{9, 0, 909}, + dictWord{136, 11, 254}, + dictWord{ + 10, + 0, + 776, + }, + dictWord{13, 0, 345}, + dictWord{142, 0, 425}, + dictWord{4, 10, 84}, + dictWord{7, 10, 1482}, + dictWord{10, 10, 76}, + dictWord{138, 10, 142}, + dictWord{ + 135, + 11, + 742, + }, + dictWord{6, 0, 578}, + dictWord{133, 10, 1015}, + dictWord{6, 0, 1387}, + dictWord{4, 10, 315}, + dictWord{5, 10, 507}, + dictWord{135, 10, 1370}, + dictWord{4, 0, 438}, + dictWord{133, 0, 555}, + dictWord{136, 0, 766}, + dictWord{133, 11, 248}, + dictWord{134, 10, 1722}, + dictWord{4, 11, 116}, + dictWord{5, 11, 95}, + dictWord{5, 11, 445}, + dictWord{7, 11, 1688}, + dictWord{8, 11, 29}, + dictWord{9, 11, 272}, + dictWord{11, 11, 509}, + dictWord{139, 11, 915}, + dictWord{135, 0, 541}, + dictWord{133, 11, 543}, + dictWord{8, 10, 222}, + dictWord{8, 10, 476}, + dictWord{9, 10, 238}, + dictWord{11, 10, 516}, + dictWord{11, 10, 575}, + dictWord{ + 15, + 10, + 109, + }, + dictWord{146, 10, 100}, + dictWord{6, 0, 880}, + dictWord{134, 0, 1191}, + dictWord{5, 11, 181}, + dictWord{136, 11, 41}, + dictWord{134, 0, 1506}, + dictWord{132, 11, 681}, + dictWord{7, 11, 25}, + dictWord{8, 11, 202}, + dictWord{138, 11, 536}, + dictWord{139, 0, 983}, + dictWord{137, 0, 768}, + dictWord{132, 0, 584}, + dictWord{9, 11, 423}, + dictWord{140, 11, 89}, + dictWord{8, 11, 113}, + dictWord{9, 11, 877}, + dictWord{10, 11, 554}, + dictWord{11, 11, 83}, + dictWord{12, 11, 136}, + dictWord{147, 11, 109}, + dictWord{7, 10, 706}, + dictWord{7, 10, 1058}, + dictWord{138, 10, 538}, + dictWord{133, 11, 976}, + dictWord{4, 11, 206}, + dictWord{ + 135, + 11, + 746, + }, + dictWord{136, 11, 526}, + dictWord{140, 0, 737}, + dictWord{11, 10, 92}, + dictWord{11, 10, 196}, + dictWord{11, 10, 409}, + dictWord{11, 10, 450}, + dictWord{11, 10, 666}, + dictWord{11, 10, 777}, + dictWord{12, 10, 262}, + dictWord{13, 10, 385}, + dictWord{13, 10, 393}, + dictWord{15, 10, 115}, + dictWord{ + 16, + 10, + 45, + }, + dictWord{145, 10, 82}, + dictWord{4, 0, 226}, + dictWord{4, 0, 326}, + dictWord{7, 0, 1770}, + dictWord{4, 11, 319}, + dictWord{5, 11, 699}, + dictWord{138, 11, 673}, + dictWord{6, 10, 40}, + dictWord{135, 10, 1781}, + dictWord{5, 0, 426}, + dictWord{8, 0, 30}, + dictWord{9, 0, 2}, + dictWord{11, 0, 549}, + dictWord{147, 0, 122}, + dictWord{ + 6, + 0, + 1161, + }, + dictWord{134, 0, 1329}, + dictWord{138, 10, 97}, + dictWord{6, 10, 423}, + dictWord{7, 10, 665}, + dictWord{135, 10, 1210}, + dictWord{7, 11, 13}, + dictWord{ + 8, + 11, + 226, + }, + dictWord{10, 11, 537}, + dictWord{11, 11, 570}, + dictWord{11, 11, 605}, + dictWord{11, 11, 799}, + dictWord{11, 11, 804}, + dictWord{12, 11, 85}, + dictWord{12, 11, 516}, + dictWord{12, 11, 623}, + dictWord{13, 11, 112}, + dictWord{13, 11, 361}, + dictWord{14, 11, 77}, + dictWord{14, 11, 78}, + dictWord{17, 11, 28}, + dictWord{147, 11, 110}, + dictWord{132, 11, 769}, + dictWord{132, 11, 551}, + dictWord{132, 11, 728}, + dictWord{147, 0, 117}, + dictWord{9, 11, 57}, + dictWord{ + 9, + 11, + 459, + }, + dictWord{10, 11, 425}, + dictWord{11, 11, 119}, + dictWord{12, 11, 184}, + dictWord{12, 11, 371}, + dictWord{13, 11, 358}, + dictWord{145, 11, 51}, + dictWord{ + 5, + 11, + 188, + }, + dictWord{5, 11, 814}, + dictWord{8, 11, 10}, + dictWord{9, 11, 421}, + dictWord{9, 11, 729}, + dictWord{10, 11, 609}, + dictWord{139, 11, 689}, + dictWord{134, 11, 624}, + dictWord{135, 11, 298}, + dictWord{135, 0, 462}, + dictWord{4, 0, 345}, + dictWord{139, 10, 624}, + dictWord{136, 10, 574}, + dictWord{ + 4, + 0, + 385, + }, + dictWord{7, 0, 265}, + dictWord{135, 0, 587}, + dictWord{6, 0, 808}, + dictWord{132, 11, 528}, + dictWord{133, 0, 398}, + dictWord{132, 10, 354}, + dictWord{ + 4, + 0, + 347, + }, + dictWord{5, 0, 423}, + dictWord{5, 0, 996}, + dictWord{135, 0, 1329}, + dictWord{135, 10, 1558}, + dictWord{7, 0, 1259}, + dictWord{9, 0, 125}, + dictWord{ + 139, + 0, + 65, + }, + dictWord{5, 0, 136}, + dictWord{6, 0, 136}, + dictWord{136, 0, 644}, + dictWord{5, 11, 104}, + dictWord{6, 11, 173}, + dictWord{135, 11, 1631}, + dictWord{ + 135, + 0, + 469, + }, + dictWord{133, 10, 830}, + dictWord{4, 0, 278}, + dictWord{5, 0, 465}, + dictWord{135, 0, 1367}, + dictWord{7, 11, 810}, + dictWord{8, 11, 138}, + dictWord{ + 8, + 11, + 342, + }, + dictWord{9, 11, 84}, + dictWord{10, 11, 193}, + dictWord{11, 11, 883}, + dictWord{140, 11, 359}, + dictWord{5, 10, 496}, + dictWord{135, 10, 203}, + dictWord{ + 4, + 0, + 433, + }, + dictWord{133, 0, 719}, + dictWord{6, 11, 95}, + dictWord{134, 10, 547}, + dictWord{5, 10, 88}, + dictWord{137, 10, 239}, + dictWord{6, 11, 406}, + dictWord{ + 10, + 11, + 409, + }, + dictWord{10, 11, 447}, + dictWord{11, 11, 44}, + dictWord{140, 11, 100}, + dictWord{134, 0, 1423}, + dictWord{7, 10, 650}, + dictWord{135, 10, 1310}, + dictWord{134, 0, 749}, + dictWord{135, 11, 1243}, + dictWord{135, 0, 1363}, + dictWord{6, 0, 381}, + dictWord{7, 0, 645}, + dictWord{7, 0, 694}, + dictWord{8, 0, 546}, + dictWord{7, 10, 1076}, + dictWord{9, 10, 80}, + dictWord{11, 10, 78}, + dictWord{11, 10, 421}, + dictWord{11, 10, 534}, + dictWord{140, 10, 545}, + dictWord{ + 134, + 11, + 1636, + }, + dictWord{135, 11, 1344}, + dictWord{12, 0, 277}, + dictWord{7, 10, 274}, + dictWord{11, 10, 479}, + dictWord{139, 10, 507}, + dictWord{6, 0, 705}, + dictWord{ + 6, + 0, + 783, + }, + dictWord{6, 0, 1275}, + dictWord{6, 0, 1481}, + dictWord{4, 11, 282}, + dictWord{7, 11, 1034}, + dictWord{11, 11, 398}, + dictWord{11, 11, 634}, + dictWord{ + 12, + 11, + 1, + }, + dictWord{12, 11, 79}, + dictWord{12, 11, 544}, + dictWord{14, 11, 237}, + dictWord{17, 11, 10}, + dictWord{146, 11, 20}, + dictWord{134, 0, 453}, + dictWord{ + 4, + 0, + 555, + }, + dictWord{8, 0, 536}, + dictWord{10, 0, 288}, + dictWord{11, 0, 1005}, + dictWord{4, 10, 497}, + dictWord{135, 10, 1584}, + dictWord{5, 11, 118}, + dictWord{ + 5, + 11, + 499, + }, + dictWord{6, 11, 476}, + dictWord{7, 11, 600}, + dictWord{7, 11, 888}, + dictWord{135, 11, 1096}, + dictWord{138, 0, 987}, + dictWord{7, 0, 1107}, + dictWord{ + 7, + 10, + 261, + }, + dictWord{7, 10, 1115}, + dictWord{7, 10, 1354}, + dictWord{7, 10, 1588}, + dictWord{7, 10, 1705}, + dictWord{7, 10, 1902}, + dictWord{9, 10, 465}, + dictWord{10, 10, 248}, + dictWord{10, 10, 349}, + dictWord{10, 10, 647}, + dictWord{11, 10, 527}, + dictWord{11, 10, 660}, + dictWord{11, 10, 669}, + dictWord{ + 12, + 10, + 529, + }, + dictWord{141, 10, 305}, + dictWord{7, 11, 296}, + dictWord{7, 11, 596}, + dictWord{8, 11, 560}, + dictWord{8, 11, 586}, + dictWord{9, 11, 612}, + dictWord{ + 11, + 11, + 100, + }, + dictWord{11, 11, 304}, + dictWord{12, 11, 46}, + dictWord{13, 11, 89}, + dictWord{14, 11, 112}, + dictWord{145, 11, 122}, + dictWord{9, 0, 370}, + dictWord{ + 138, + 0, + 90, + }, + dictWord{136, 10, 13}, + dictWord{132, 0, 860}, + dictWord{7, 10, 642}, + dictWord{8, 10, 250}, + dictWord{11, 10, 123}, + dictWord{11, 10, 137}, + dictWord{ + 13, + 10, + 48, + }, + dictWord{142, 10, 95}, + dictWord{135, 10, 1429}, + dictWord{137, 11, 321}, + dictWord{132, 0, 257}, + dictWord{135, 0, 2031}, + dictWord{7, 0, 1768}, + dictWord{7, 11, 1599}, + dictWord{7, 11, 1723}, + dictWord{8, 11, 79}, + dictWord{8, 11, 106}, + dictWord{8, 11, 190}, + dictWord{8, 11, 302}, + dictWord{8, 11, 383}, + dictWord{9, 11, 119}, + dictWord{9, 11, 233}, + dictWord{9, 11, 298}, + dictWord{9, 11, 419}, + dictWord{9, 11, 471}, + dictWord{10, 11, 181}, + dictWord{10, 11, 406}, + dictWord{11, 11, 57}, + dictWord{11, 11, 85}, + dictWord{11, 11, 120}, + dictWord{11, 11, 177}, + dictWord{11, 11, 296}, + dictWord{11, 11, 382}, + dictWord{11, 11, 454}, + dictWord{11, 11, 758}, + dictWord{11, 11, 999}, + dictWord{12, 11, 27}, + dictWord{12, 11, 98}, + dictWord{12, 11, 131}, + dictWord{12, 11, 245}, + dictWord{ + 12, + 11, + 312, + }, + dictWord{12, 11, 446}, + dictWord{12, 11, 454}, + dictWord{13, 11, 25}, + dictWord{13, 11, 98}, + dictWord{13, 11, 426}, + dictWord{13, 11, 508}, + dictWord{ + 14, + 11, + 6, + }, + dictWord{14, 11, 163}, + dictWord{14, 11, 272}, + dictWord{14, 11, 277}, + dictWord{14, 11, 370}, + dictWord{15, 11, 95}, + dictWord{15, 11, 138}, + dictWord{ + 15, + 11, + 167, + }, + dictWord{17, 11, 18}, + dictWord{17, 11, 38}, + dictWord{20, 11, 96}, + dictWord{149, 11, 32}, + dictWord{5, 11, 722}, + dictWord{134, 11, 1759}, + dictWord{145, 11, 16}, + dictWord{6, 0, 1071}, + dictWord{134, 0, 1561}, + dictWord{10, 10, 545}, + dictWord{140, 10, 301}, + dictWord{6, 0, 83}, + dictWord{6, 0, 1733}, + dictWord{135, 0, 1389}, + dictWord{4, 0, 835}, + dictWord{135, 0, 1818}, + dictWord{133, 11, 258}, + dictWord{4, 10, 904}, + dictWord{133, 10, 794}, + dictWord{ + 134, + 0, + 2006, + }, + dictWord{5, 11, 30}, + dictWord{7, 11, 495}, + dictWord{8, 11, 134}, + dictWord{9, 11, 788}, + dictWord{140, 11, 438}, + dictWord{135, 11, 2004}, + dictWord{ + 137, + 0, + 696, + }, + dictWord{5, 11, 50}, + dictWord{6, 11, 439}, + dictWord{7, 11, 780}, + dictWord{135, 11, 1040}, + dictWord{7, 11, 772}, + dictWord{7, 11, 1104}, + dictWord{ + 7, + 11, + 1647, + }, + dictWord{11, 11, 269}, + dictWord{11, 11, 539}, + dictWord{11, 11, 607}, + dictWord{11, 11, 627}, + dictWord{11, 11, 706}, + dictWord{11, 11, 975}, + dictWord{12, 11, 248}, + dictWord{12, 11, 311}, + dictWord{12, 11, 434}, + dictWord{12, 11, 600}, + dictWord{12, 11, 622}, + dictWord{13, 11, 297}, + dictWord{ + 13, + 11, + 367, + }, + dictWord{13, 11, 485}, + dictWord{14, 11, 69}, + dictWord{14, 11, 409}, + dictWord{143, 11, 108}, + dictWord{5, 11, 1}, + dictWord{6, 11, 81}, + dictWord{ + 138, + 11, + 520, + }, + dictWord{7, 0, 1718}, + dictWord{9, 0, 95}, + dictWord{9, 0, 274}, + dictWord{10, 0, 279}, + dictWord{10, 0, 317}, + dictWord{10, 0, 420}, + dictWord{11, 0, 303}, + dictWord{11, 0, 808}, + dictWord{12, 0, 134}, + dictWord{12, 0, 367}, + dictWord{13, 0, 149}, + dictWord{13, 0, 347}, + dictWord{14, 0, 349}, + dictWord{14, 0, 406}, + dictWord{ + 18, + 0, + 22, + }, + dictWord{18, 0, 89}, + dictWord{18, 0, 122}, + dictWord{147, 0, 47}, + dictWord{5, 11, 482}, + dictWord{8, 11, 98}, + dictWord{9, 11, 172}, + dictWord{10, 11, 222}, + dictWord{10, 11, 700}, + dictWord{10, 11, 822}, + dictWord{11, 11, 302}, + dictWord{11, 11, 778}, + dictWord{12, 11, 50}, + dictWord{12, 11, 127}, + dictWord{ + 12, + 11, + 396, + }, + dictWord{13, 11, 62}, + dictWord{13, 11, 328}, + dictWord{14, 11, 122}, + dictWord{147, 11, 72}, + dictWord{7, 10, 386}, + dictWord{138, 10, 713}, + dictWord{ + 6, + 10, + 7, + }, + dictWord{6, 10, 35}, + dictWord{7, 10, 147}, + dictWord{7, 10, 1069}, + dictWord{7, 10, 1568}, + dictWord{7, 10, 1575}, + dictWord{7, 10, 1917}, + dictWord{ + 8, + 10, + 43, + }, + dictWord{8, 10, 208}, + dictWord{9, 10, 128}, + dictWord{9, 10, 866}, + dictWord{10, 10, 20}, + dictWord{11, 10, 981}, + dictWord{147, 10, 33}, + dictWord{ + 133, + 0, + 26, + }, + dictWord{132, 0, 550}, + dictWord{5, 11, 2}, + dictWord{7, 11, 1494}, + dictWord{136, 11, 589}, + dictWord{6, 11, 512}, + dictWord{7, 11, 797}, + dictWord{ + 8, + 11, + 253, + }, + dictWord{9, 11, 77}, + dictWord{10, 11, 1}, + dictWord{10, 11, 129}, + dictWord{10, 11, 225}, + dictWord{11, 11, 118}, + dictWord{11, 11, 226}, + dictWord{ + 11, + 11, + 251, + }, + dictWord{11, 11, 430}, + dictWord{11, 11, 701}, + dictWord{11, 11, 974}, + dictWord{11, 11, 982}, + dictWord{12, 11, 64}, + dictWord{12, 11, 260}, + dictWord{ + 12, + 11, + 488, + }, + dictWord{140, 11, 690}, + dictWord{7, 10, 893}, + dictWord{141, 10, 424}, + dictWord{134, 0, 901}, + dictWord{136, 0, 822}, + dictWord{4, 0, 902}, + dictWord{5, 0, 809}, + dictWord{134, 0, 122}, + dictWord{6, 0, 807}, + dictWord{134, 0, 1366}, + dictWord{7, 0, 262}, + dictWord{5, 11, 748}, + dictWord{134, 11, 553}, + dictWord{133, 0, 620}, + dictWord{4, 0, 34}, + dictWord{5, 0, 574}, + dictWord{7, 0, 279}, + dictWord{7, 0, 1624}, + dictWord{136, 0, 601}, + dictWord{9, 0, 170}, + dictWord{ + 6, + 10, + 322, + }, + dictWord{9, 10, 552}, + dictWord{11, 10, 274}, + dictWord{13, 10, 209}, + dictWord{13, 10, 499}, + dictWord{14, 10, 85}, + dictWord{15, 10, 126}, + dictWord{ + 145, + 10, + 70, + }, + dictWord{132, 0, 537}, + dictWord{4, 11, 12}, + dictWord{7, 11, 420}, + dictWord{7, 11, 522}, + dictWord{7, 11, 809}, + dictWord{8, 11, 797}, + dictWord{ + 141, + 11, + 88, + }, + dictWord{133, 0, 332}, + dictWord{8, 10, 83}, + dictWord{8, 10, 742}, + dictWord{8, 10, 817}, + dictWord{9, 10, 28}, + dictWord{9, 10, 29}, + dictWord{9, 10, 885}, + dictWord{10, 10, 387}, + dictWord{11, 10, 633}, + dictWord{11, 10, 740}, + dictWord{13, 10, 235}, + dictWord{13, 10, 254}, + dictWord{15, 10, 143}, + dictWord{ + 143, + 10, + 146, + }, + dictWord{6, 0, 1909}, + dictWord{9, 0, 964}, + dictWord{12, 0, 822}, + dictWord{12, 0, 854}, + dictWord{12, 0, 865}, + dictWord{12, 0, 910}, + dictWord{12, 0, 938}, + dictWord{15, 0, 169}, + dictWord{15, 0, 208}, + dictWord{15, 0, 211}, + dictWord{18, 0, 205}, + dictWord{18, 0, 206}, + dictWord{18, 0, 220}, + dictWord{18, 0, 223}, + dictWord{152, 0, 24}, + dictWord{140, 10, 49}, + dictWord{5, 11, 528}, + dictWord{135, 11, 1580}, + dictWord{6, 0, 261}, + dictWord{8, 0, 182}, + dictWord{139, 0, 943}, + dictWord{134, 0, 1721}, + dictWord{4, 0, 933}, + dictWord{133, 0, 880}, + dictWord{136, 11, 321}, + dictWord{5, 11, 266}, + dictWord{9, 11, 290}, + dictWord{9, 11, 364}, + dictWord{10, 11, 293}, + dictWord{11, 11, 606}, + dictWord{142, 11, 45}, + dictWord{6, 0, 1609}, + dictWord{4, 11, 50}, + dictWord{6, 11, 510}, + dictWord{6, 11, 594}, + dictWord{9, 11, 121}, + dictWord{10, 11, 49}, + dictWord{10, 11, 412}, + dictWord{139, 11, 834}, + dictWord{7, 0, 895}, + dictWord{136, 11, 748}, + dictWord{132, 11, 466}, + dictWord{4, 10, 110}, + dictWord{10, 10, 415}, + dictWord{10, 10, 597}, + dictWord{142, 10, 206}, + dictWord{133, 0, 812}, + dictWord{135, 11, 281}, + dictWord{ + 6, + 0, + 1890, + }, + dictWord{6, 0, 1902}, + dictWord{6, 0, 1916}, + dictWord{9, 0, 929}, + dictWord{9, 0, 942}, + dictWord{9, 0, 975}, + dictWord{9, 0, 984}, + dictWord{9, 0, 986}, + dictWord{ + 9, + 0, + 1011, + }, + dictWord{9, 0, 1019}, + dictWord{12, 0, 804}, + dictWord{12, 0, 851}, + dictWord{12, 0, 867}, + dictWord{12, 0, 916}, + dictWord{12, 0, 923}, + dictWord{ + 15, + 0, + 194, + }, + dictWord{15, 0, 204}, + dictWord{15, 0, 210}, + dictWord{15, 0, 222}, + dictWord{15, 0, 223}, + dictWord{15, 0, 229}, + dictWord{15, 0, 250}, + dictWord{ + 18, + 0, + 179, + }, + dictWord{18, 0, 186}, + dictWord{18, 0, 192}, + dictWord{7, 10, 205}, + dictWord{135, 10, 2000}, + dictWord{132, 11, 667}, + dictWord{135, 0, 778}, + dictWord{ + 4, + 0, + 137, + }, + dictWord{7, 0, 1178}, + dictWord{135, 0, 1520}, + dictWord{134, 0, 1314}, + dictWord{4, 11, 242}, + dictWord{134, 11, 333}, + dictWord{6, 0, 1661}, + dictWord{7, 0, 1975}, + dictWord{7, 0, 2009}, + dictWord{135, 0, 2011}, + dictWord{134, 0, 1591}, + dictWord{4, 10, 283}, + dictWord{135, 10, 1194}, + dictWord{ + 11, + 0, + 820, + }, + dictWord{150, 0, 51}, + dictWord{4, 11, 39}, + dictWord{5, 11, 36}, + dictWord{7, 11, 1843}, + dictWord{8, 11, 407}, + dictWord{11, 11, 144}, + dictWord{ + 140, + 11, + 523, + }, + dictWord{134, 10, 1720}, + dictWord{4, 11, 510}, + dictWord{7, 11, 29}, + dictWord{7, 11, 66}, + dictWord{7, 11, 1980}, + dictWord{10, 11, 487}, + dictWord{ + 10, + 11, + 809, + }, + dictWord{146, 11, 9}, + dictWord{5, 0, 89}, + dictWord{7, 0, 1915}, + dictWord{9, 0, 185}, + dictWord{9, 0, 235}, + dictWord{10, 0, 64}, + dictWord{10, 0, 270}, + dictWord{10, 0, 403}, + dictWord{10, 0, 469}, + dictWord{10, 0, 529}, + dictWord{10, 0, 590}, + dictWord{11, 0, 140}, + dictWord{11, 0, 860}, + dictWord{13, 0, 1}, + dictWord{ + 13, + 0, + 422, + }, + dictWord{14, 0, 341}, + dictWord{14, 0, 364}, + dictWord{17, 0, 93}, + dictWord{18, 0, 113}, + dictWord{19, 0, 97}, + dictWord{147, 0, 113}, + dictWord{133, 0, 695}, + dictWord{6, 0, 987}, + dictWord{134, 0, 1160}, + dictWord{5, 0, 6}, + dictWord{6, 0, 183}, + dictWord{7, 0, 680}, + dictWord{7, 0, 978}, + dictWord{7, 0, 1013}, + dictWord{ + 7, + 0, + 1055, + }, + dictWord{12, 0, 230}, + dictWord{13, 0, 172}, + dictWord{146, 0, 29}, + dictWord{134, 11, 570}, + dictWord{132, 11, 787}, + dictWord{134, 11, 518}, + dictWord{ + 6, + 0, + 29, + }, + dictWord{139, 0, 63}, + dictWord{132, 11, 516}, + dictWord{136, 11, 821}, + dictWord{132, 0, 311}, + dictWord{134, 0, 1740}, + dictWord{7, 0, 170}, + dictWord{8, 0, 90}, + dictWord{8, 0, 177}, + dictWord{8, 0, 415}, + dictWord{11, 0, 714}, + dictWord{14, 0, 281}, + dictWord{136, 10, 735}, + dictWord{134, 0, 1961}, + dictWord{ + 135, + 11, + 1405, + }, + dictWord{4, 11, 10}, + dictWord{7, 11, 917}, + dictWord{139, 11, 786}, + dictWord{5, 10, 132}, + dictWord{9, 10, 486}, + dictWord{9, 10, 715}, + dictWord{ + 10, + 10, + 458, + }, + dictWord{11, 10, 373}, + dictWord{11, 10, 668}, + dictWord{11, 10, 795}, + dictWord{11, 10, 897}, + dictWord{12, 10, 272}, + dictWord{12, 10, 424}, + dictWord{12, 10, 539}, + dictWord{12, 10, 558}, + dictWord{14, 10, 245}, + dictWord{14, 10, 263}, + dictWord{14, 10, 264}, + dictWord{14, 10, 393}, + dictWord{ + 142, + 10, + 403, + }, + dictWord{11, 0, 91}, + dictWord{13, 0, 129}, + dictWord{15, 0, 101}, + dictWord{145, 0, 125}, + dictWord{135, 0, 1132}, + dictWord{4, 0, 494}, + dictWord{6, 0, 74}, + dictWord{7, 0, 44}, + dictWord{7, 0, 407}, + dictWord{12, 0, 17}, + dictWord{15, 0, 5}, + dictWord{148, 0, 11}, + dictWord{133, 10, 379}, + dictWord{5, 0, 270}, + dictWord{ + 5, + 11, + 684, + }, + dictWord{6, 10, 89}, + dictWord{6, 10, 400}, + dictWord{7, 10, 1569}, + dictWord{7, 10, 1623}, + dictWord{7, 10, 1850}, + dictWord{8, 10, 218}, + dictWord{ + 8, + 10, + 422, + }, + dictWord{9, 10, 570}, + dictWord{138, 10, 626}, + dictWord{4, 0, 276}, + dictWord{133, 0, 296}, + dictWord{6, 0, 1523}, + dictWord{134, 11, 27}, + dictWord{ + 6, + 10, + 387, + }, + dictWord{7, 10, 882}, + dictWord{141, 10, 111}, + dictWord{6, 10, 224}, + dictWord{7, 10, 877}, + dictWord{137, 10, 647}, + dictWord{135, 10, 790}, + dictWord{ + 4, + 0, + 7, + }, + dictWord{5, 0, 90}, + dictWord{5, 0, 158}, + dictWord{6, 0, 542}, + dictWord{7, 0, 221}, + dictWord{7, 0, 1574}, + dictWord{9, 0, 490}, + dictWord{10, 0, 540}, + dictWord{ + 11, + 0, + 443, + }, + dictWord{139, 0, 757}, + dictWord{7, 0, 588}, + dictWord{9, 0, 175}, + dictWord{138, 0, 530}, + dictWord{135, 10, 394}, + dictWord{142, 11, 23}, + dictWord{ + 134, + 0, + 786, + }, + dictWord{135, 0, 580}, + dictWord{7, 0, 88}, + dictWord{136, 0, 627}, + dictWord{5, 0, 872}, + dictWord{6, 0, 57}, + dictWord{7, 0, 471}, + dictWord{9, 0, 447}, + dictWord{137, 0, 454}, + dictWord{6, 11, 342}, + dictWord{6, 11, 496}, + dictWord{8, 11, 275}, + dictWord{137, 11, 206}, + dictWord{4, 11, 909}, + dictWord{133, 11, 940}, + dictWord{6, 0, 735}, + dictWord{132, 11, 891}, + dictWord{8, 0, 845}, + dictWord{8, 0, 916}, + dictWord{135, 10, 1409}, + dictWord{5, 0, 31}, + dictWord{134, 0, 614}, + dictWord{11, 0, 458}, + dictWord{12, 0, 15}, + dictWord{140, 0, 432}, + dictWord{8, 0, 330}, + dictWord{140, 0, 477}, + dictWord{4, 0, 530}, + dictWord{5, 0, 521}, + dictWord{ + 7, + 0, + 1200, + }, + dictWord{10, 0, 460}, + dictWord{132, 11, 687}, + dictWord{6, 0, 424}, + dictWord{135, 0, 1866}, + dictWord{9, 0, 569}, + dictWord{12, 0, 12}, + dictWord{ + 12, + 0, + 81, + }, + dictWord{12, 0, 319}, + dictWord{13, 0, 69}, + dictWord{14, 0, 259}, + dictWord{16, 0, 87}, + dictWord{17, 0, 1}, + dictWord{17, 0, 21}, + dictWord{17, 0, 24}, + dictWord{ + 18, + 0, + 15, + }, + dictWord{18, 0, 56}, + dictWord{18, 0, 59}, + dictWord{18, 0, 127}, + dictWord{18, 0, 154}, + dictWord{19, 0, 19}, + dictWord{148, 0, 31}, + dictWord{7, 0, 1302}, + dictWord{136, 10, 38}, + dictWord{134, 11, 253}, + dictWord{5, 10, 261}, + dictWord{7, 10, 78}, + dictWord{7, 10, 199}, + dictWord{8, 10, 815}, + dictWord{9, 10, 126}, + dictWord{138, 10, 342}, + dictWord{5, 0, 595}, + dictWord{135, 0, 1863}, + dictWord{6, 11, 41}, + dictWord{141, 11, 160}, + dictWord{5, 0, 13}, + dictWord{134, 0, 142}, + dictWord{6, 0, 97}, + dictWord{7, 0, 116}, + dictWord{8, 0, 322}, + dictWord{8, 0, 755}, + dictWord{9, 0, 548}, + dictWord{10, 0, 714}, + dictWord{11, 0, 884}, + dictWord{13, 0, 324}, + dictWord{7, 11, 1304}, + dictWord{138, 11, 477}, + dictWord{132, 10, 628}, + dictWord{134, 11, 1718}, + dictWord{7, 10, 266}, + dictWord{136, 10, 804}, + dictWord{135, 10, 208}, + dictWord{7, 0, 1021}, + dictWord{6, 10, 79}, + dictWord{135, 10, 1519}, + dictWord{7, 0, 1472}, + dictWord{135, 0, 1554}, + dictWord{6, 11, 362}, + dictWord{146, 11, 51}, + dictWord{7, 0, 1071}, + dictWord{7, 0, 1541}, + dictWord{7, 0, 1767}, + dictWord{7, 0, 1806}, + dictWord{11, 0, 162}, + dictWord{11, 0, 242}, + dictWord{11, 0, 452}, + dictWord{12, 0, 605}, + dictWord{15, 0, 26}, + dictWord{144, 0, 44}, + dictWord{136, 10, 741}, + dictWord{133, 11, 115}, + dictWord{145, 0, 115}, + dictWord{134, 10, 376}, + dictWord{6, 0, 1406}, + dictWord{134, 0, 1543}, + dictWord{5, 11, 193}, + dictWord{12, 11, 178}, + dictWord{13, 11, 130}, + dictWord{ + 145, + 11, + 84, + }, + dictWord{135, 0, 1111}, + dictWord{8, 0, 1}, + dictWord{9, 0, 650}, + dictWord{10, 0, 326}, + dictWord{5, 11, 705}, + dictWord{137, 11, 606}, + dictWord{5, 0, 488}, + dictWord{6, 0, 527}, + dictWord{7, 0, 489}, + dictWord{7, 0, 1636}, + dictWord{8, 0, 121}, + dictWord{8, 0, 144}, + dictWord{8, 0, 359}, + dictWord{9, 0, 193}, + dictWord{9, 0, 241}, + dictWord{9, 0, 336}, + dictWord{9, 0, 882}, + dictWord{11, 0, 266}, + dictWord{11, 0, 372}, + dictWord{11, 0, 944}, + dictWord{12, 0, 401}, + dictWord{140, 0, 641}, + dictWord{135, 11, 174}, + dictWord{6, 0, 267}, + dictWord{7, 10, 244}, + dictWord{7, 10, 632}, + dictWord{7, 10, 1609}, + dictWord{8, 10, 178}, + dictWord{8, 10, 638}, + dictWord{141, 10, 58}, + dictWord{134, 0, 1983}, + dictWord{134, 0, 1155}, + dictWord{134, 0, 1575}, + dictWord{134, 0, 1438}, + dictWord{9, 0, 31}, + dictWord{ + 10, + 0, + 244, + }, + dictWord{10, 0, 699}, + dictWord{12, 0, 149}, + dictWord{141, 0, 497}, + dictWord{133, 0, 377}, + dictWord{4, 11, 122}, + dictWord{5, 11, 796}, + dictWord{ + 5, + 11, + 952, + }, + dictWord{6, 11, 1660}, + dictWord{6, 11, 1671}, + dictWord{8, 11, 567}, + dictWord{9, 11, 687}, + dictWord{9, 11, 742}, + dictWord{10, 11, 686}, + dictWord{ + 11, + 11, + 356, + }, + dictWord{11, 11, 682}, + dictWord{140, 11, 281}, + dictWord{145, 0, 101}, + dictWord{11, 11, 0}, + dictWord{144, 11, 78}, + dictWord{5, 11, 179}, + dictWord{ + 5, + 10, + 791, + }, + dictWord{7, 11, 1095}, + dictWord{135, 11, 1213}, + dictWord{8, 11, 372}, + dictWord{9, 11, 122}, + dictWord{138, 11, 175}, + dictWord{7, 10, 686}, + dictWord{8, 10, 33}, + dictWord{8, 10, 238}, + dictWord{10, 10, 616}, + dictWord{11, 10, 467}, + dictWord{11, 10, 881}, + dictWord{13, 10, 217}, + dictWord{13, 10, 253}, + dictWord{142, 10, 268}, + dictWord{9, 0, 476}, + dictWord{4, 11, 66}, + dictWord{7, 11, 722}, + dictWord{135, 11, 904}, + dictWord{7, 11, 352}, + dictWord{137, 11, 684}, + dictWord{135, 0, 2023}, + dictWord{135, 0, 1836}, + dictWord{132, 10, 447}, + dictWord{5, 0, 843}, + dictWord{144, 0, 35}, + dictWord{137, 11, 779}, + dictWord{ + 141, + 11, + 35, + }, + dictWord{4, 10, 128}, + dictWord{5, 10, 415}, + dictWord{6, 10, 462}, + dictWord{7, 10, 294}, + dictWord{7, 10, 578}, + dictWord{10, 10, 710}, + dictWord{ + 139, + 10, + 86, + }, + dictWord{132, 0, 554}, + dictWord{133, 0, 536}, + dictWord{136, 10, 587}, + dictWord{5, 0, 207}, + dictWord{9, 0, 79}, + dictWord{11, 0, 625}, + dictWord{ + 145, + 0, + 7, + }, + dictWord{7, 0, 1371}, + dictWord{6, 10, 427}, + dictWord{138, 10, 692}, + dictWord{4, 0, 424}, + dictWord{4, 10, 195}, + dictWord{135, 10, 802}, + dictWord{ + 8, + 0, + 785, + }, + dictWord{133, 11, 564}, + dictWord{135, 0, 336}, + dictWord{4, 0, 896}, + dictWord{6, 0, 1777}, + dictWord{134, 11, 556}, + dictWord{137, 11, 103}, + dictWord{134, 10, 1683}, + dictWord{7, 11, 544}, + dictWord{8, 11, 719}, + dictWord{138, 11, 61}, + dictWord{138, 10, 472}, + dictWord{4, 11, 5}, + dictWord{5, 11, 498}, + dictWord{136, 11, 637}, + dictWord{7, 0, 750}, + dictWord{9, 0, 223}, + dictWord{11, 0, 27}, + dictWord{11, 0, 466}, + dictWord{12, 0, 624}, + dictWord{14, 0, 265}, + dictWord{ + 146, + 0, + 61, + }, + dictWord{12, 0, 238}, + dictWord{18, 0, 155}, + dictWord{12, 11, 238}, + dictWord{146, 11, 155}, + dictWord{151, 10, 28}, + dictWord{133, 11, 927}, + dictWord{12, 0, 383}, + dictWord{5, 10, 3}, + dictWord{8, 10, 578}, + dictWord{9, 10, 118}, + dictWord{10, 10, 705}, + dictWord{141, 10, 279}, + dictWord{4, 11, 893}, + dictWord{ + 5, + 11, + 780, + }, + dictWord{133, 11, 893}, + dictWord{4, 0, 603}, + dictWord{133, 0, 661}, + dictWord{4, 0, 11}, + dictWord{6, 0, 128}, + dictWord{7, 0, 231}, + dictWord{ + 7, + 0, + 1533, + }, + dictWord{10, 0, 725}, + dictWord{5, 10, 229}, + dictWord{5, 11, 238}, + dictWord{135, 11, 1350}, + dictWord{8, 10, 102}, + dictWord{10, 10, 578}, + dictWord{ + 10, + 10, + 672, + }, + dictWord{12, 10, 496}, + dictWord{13, 10, 408}, + dictWord{14, 10, 121}, + dictWord{145, 10, 106}, + dictWord{132, 0, 476}, + dictWord{134, 0, 1552}, + dictWord{134, 11, 1729}, + dictWord{8, 10, 115}, + dictWord{8, 10, 350}, + dictWord{9, 10, 489}, + dictWord{10, 10, 128}, + dictWord{11, 10, 306}, + dictWord{ + 12, + 10, + 373, + }, + dictWord{14, 10, 30}, + dictWord{17, 10, 79}, + dictWord{19, 10, 80}, + dictWord{150, 10, 55}, + dictWord{135, 0, 1807}, + dictWord{4, 0, 680}, + dictWord{ + 4, + 11, + 60, + }, + dictWord{7, 11, 760}, + dictWord{7, 11, 1800}, + dictWord{8, 11, 314}, + dictWord{9, 11, 700}, + dictWord{139, 11, 487}, + dictWord{4, 10, 230}, + dictWord{ + 5, + 10, + 702, + }, + dictWord{148, 11, 94}, + dictWord{132, 11, 228}, + dictWord{139, 0, 435}, + dictWord{9, 0, 20}, + dictWord{10, 0, 324}, + dictWord{10, 0, 807}, + dictWord{ + 139, + 0, + 488, + }, + dictWord{6, 10, 1728}, + dictWord{136, 11, 419}, + dictWord{4, 10, 484}, + dictWord{18, 10, 26}, + dictWord{19, 10, 42}, + dictWord{20, 10, 43}, + dictWord{ + 21, + 10, + 0, + }, + dictWord{23, 10, 27}, + dictWord{152, 10, 14}, + dictWord{135, 0, 1431}, + dictWord{133, 11, 828}, + dictWord{5, 0, 112}, + dictWord{6, 0, 103}, + dictWord{ + 6, + 0, + 150, + }, + dictWord{7, 0, 1303}, + dictWord{9, 0, 292}, + dictWord{10, 0, 481}, + dictWord{20, 0, 13}, + dictWord{7, 11, 176}, + dictWord{7, 11, 178}, + dictWord{7, 11, 1110}, + dictWord{10, 11, 481}, + dictWord{148, 11, 13}, + dictWord{138, 0, 356}, + dictWord{4, 11, 51}, + dictWord{5, 11, 39}, + dictWord{6, 11, 4}, + dictWord{7, 11, 591}, + dictWord{ + 7, + 11, + 849, + }, + dictWord{7, 11, 951}, + dictWord{7, 11, 1129}, + dictWord{7, 11, 1613}, + dictWord{7, 11, 1760}, + dictWord{7, 11, 1988}, + dictWord{9, 11, 434}, + dictWord{10, 11, 754}, + dictWord{11, 11, 25}, + dictWord{11, 11, 37}, + dictWord{139, 11, 414}, + dictWord{6, 0, 1963}, + dictWord{134, 0, 2000}, + dictWord{ + 132, + 10, + 633, + }, + dictWord{6, 0, 1244}, + dictWord{133, 11, 902}, + dictWord{135, 11, 928}, + dictWord{140, 0, 18}, + dictWord{138, 0, 204}, + dictWord{135, 11, 1173}, + dictWord{134, 0, 867}, + dictWord{4, 0, 708}, + dictWord{8, 0, 15}, + dictWord{9, 0, 50}, + dictWord{9, 0, 386}, + dictWord{11, 0, 18}, + dictWord{11, 0, 529}, + dictWord{140, 0, 228}, + dictWord{134, 11, 270}, + dictWord{4, 0, 563}, + dictWord{7, 0, 109}, + dictWord{7, 0, 592}, + dictWord{7, 0, 637}, + dictWord{7, 0, 770}, + dictWord{8, 0, 463}, + dictWord{ + 9, + 0, + 60, + }, + dictWord{9, 0, 335}, + dictWord{9, 0, 904}, + dictWord{10, 0, 73}, + dictWord{11, 0, 434}, + dictWord{12, 0, 585}, + dictWord{13, 0, 331}, + dictWord{18, 0, 110}, + dictWord{148, 0, 60}, + dictWord{132, 0, 502}, + dictWord{14, 11, 359}, + dictWord{19, 11, 52}, + dictWord{148, 11, 47}, + dictWord{6, 11, 377}, + dictWord{7, 11, 1025}, + dictWord{9, 11, 613}, + dictWord{145, 11, 104}, + dictWord{6, 0, 347}, + dictWord{10, 0, 161}, + dictWord{5, 10, 70}, + dictWord{5, 10, 622}, + dictWord{6, 10, 334}, + dictWord{ + 7, + 10, + 1032, + }, + dictWord{9, 10, 171}, + dictWord{11, 10, 26}, + dictWord{11, 10, 213}, + dictWord{11, 10, 637}, + dictWord{11, 10, 707}, + dictWord{12, 10, 202}, + dictWord{12, 10, 380}, + dictWord{13, 10, 226}, + dictWord{13, 10, 355}, + dictWord{14, 10, 222}, + dictWord{145, 10, 42}, + dictWord{132, 11, 416}, + dictWord{4, 0, 33}, + dictWord{5, 0, 102}, + dictWord{6, 0, 284}, + dictWord{7, 0, 1079}, + dictWord{7, 0, 1423}, + dictWord{7, 0, 1702}, + dictWord{8, 0, 470}, + dictWord{9, 0, 554}, + dictWord{ + 9, + 0, + 723, + }, + dictWord{11, 0, 333}, + dictWord{142, 11, 372}, + dictWord{5, 11, 152}, + dictWord{5, 11, 197}, + dictWord{7, 11, 340}, + dictWord{7, 11, 867}, + dictWord{ + 10, + 11, + 548, + }, + dictWord{10, 11, 581}, + dictWord{11, 11, 6}, + dictWord{12, 11, 3}, + dictWord{12, 11, 19}, + dictWord{14, 11, 110}, + dictWord{142, 11, 289}, + dictWord{ + 7, + 0, + 246, + }, + dictWord{135, 0, 840}, + dictWord{6, 0, 10}, + dictWord{8, 0, 571}, + dictWord{9, 0, 739}, + dictWord{143, 0, 91}, + dictWord{6, 0, 465}, + dictWord{7, 0, 1465}, + dictWord{ + 4, + 10, + 23, + }, + dictWord{4, 10, 141}, + dictWord{5, 10, 313}, + dictWord{5, 10, 1014}, + dictWord{6, 10, 50}, + dictWord{7, 10, 142}, + dictWord{7, 10, 559}, + dictWord{ + 8, + 10, + 640, + }, + dictWord{9, 10, 460}, + dictWord{9, 10, 783}, + dictWord{11, 10, 741}, + dictWord{12, 10, 183}, + dictWord{141, 10, 488}, + dictWord{133, 0, 626}, + dictWord{ + 136, + 0, + 614, + }, + dictWord{138, 0, 237}, + dictWord{7, 11, 34}, + dictWord{7, 11, 190}, + dictWord{8, 11, 28}, + dictWord{8, 11, 141}, + dictWord{8, 11, 444}, + dictWord{ + 8, + 11, + 811, + }, + dictWord{9, 11, 468}, + dictWord{11, 11, 334}, + dictWord{12, 11, 24}, + dictWord{12, 11, 386}, + dictWord{140, 11, 576}, + dictWord{133, 11, 757}, + dictWord{ + 5, + 0, + 18, + }, + dictWord{6, 0, 526}, + dictWord{13, 0, 24}, + dictWord{13, 0, 110}, + dictWord{19, 0, 5}, + dictWord{147, 0, 44}, + dictWord{6, 0, 506}, + dictWord{134, 11, 506}, + dictWord{135, 11, 1553}, + dictWord{4, 0, 309}, + dictWord{5, 0, 462}, + dictWord{7, 0, 970}, + dictWord{7, 0, 1097}, + dictWord{22, 0, 30}, + dictWord{22, 0, 33}, + dictWord{ + 7, + 11, + 1385, + }, + dictWord{11, 11, 582}, + dictWord{11, 11, 650}, + dictWord{11, 11, 901}, + dictWord{11, 11, 949}, + dictWord{12, 11, 232}, + dictWord{12, 11, 236}, + dictWord{13, 11, 413}, + dictWord{13, 11, 501}, + dictWord{146, 11, 116}, + dictWord{9, 0, 140}, + dictWord{5, 10, 222}, + dictWord{138, 10, 534}, + dictWord{6, 0, 1056}, + dictWord{137, 10, 906}, + dictWord{134, 0, 1704}, + dictWord{138, 10, 503}, + dictWord{134, 0, 1036}, + dictWord{5, 10, 154}, + dictWord{7, 10, 1491}, + dictWord{ + 10, + 10, + 379, + }, + dictWord{138, 10, 485}, + dictWord{4, 11, 383}, + dictWord{133, 10, 716}, + dictWord{134, 0, 1315}, + dictWord{5, 0, 86}, + dictWord{7, 0, 743}, + dictWord{ + 9, + 0, + 85, + }, + dictWord{10, 0, 281}, + dictWord{10, 0, 432}, + dictWord{11, 0, 825}, + dictWord{12, 0, 251}, + dictWord{13, 0, 118}, + dictWord{142, 0, 378}, + dictWord{ + 8, + 0, + 264, + }, + dictWord{4, 10, 91}, + dictWord{5, 10, 388}, + dictWord{5, 10, 845}, + dictWord{6, 10, 206}, + dictWord{6, 10, 252}, + dictWord{6, 10, 365}, + dictWord{7, 10, 136}, + dictWord{7, 10, 531}, + dictWord{136, 10, 621}, + dictWord{5, 0, 524}, + dictWord{133, 0, 744}, + dictWord{5, 11, 277}, + dictWord{141, 11, 247}, + dictWord{ + 132, + 11, + 435, + }, + dictWord{10, 0, 107}, + dictWord{140, 0, 436}, + dictWord{132, 0, 927}, + dictWord{10, 0, 123}, + dictWord{12, 0, 670}, + dictWord{146, 0, 94}, + dictWord{ + 7, + 0, + 1149, + }, + dictWord{9, 0, 156}, + dictWord{138, 0, 957}, + dictWord{5, 11, 265}, + dictWord{6, 11, 212}, + dictWord{135, 11, 28}, + dictWord{133, 0, 778}, + dictWord{ + 133, + 0, + 502, + }, + dictWord{8, 0, 196}, + dictWord{10, 0, 283}, + dictWord{139, 0, 406}, + dictWord{135, 10, 576}, + dictWord{136, 11, 535}, + dictWord{134, 0, 1312}, + dictWord{ + 5, + 10, + 771, + }, + dictWord{5, 10, 863}, + dictWord{5, 10, 898}, + dictWord{6, 10, 1632}, + dictWord{6, 10, 1644}, + dictWord{134, 10, 1780}, + dictWord{5, 0, 855}, + dictWord{5, 10, 331}, + dictWord{135, 11, 1487}, + dictWord{132, 11, 702}, + dictWord{5, 11, 808}, + dictWord{135, 11, 2045}, + dictWord{7, 0, 1400}, + dictWord{ + 9, + 0, + 446, + }, + dictWord{138, 0, 45}, + dictWord{140, 10, 632}, + dictWord{132, 0, 1003}, + dictWord{5, 11, 166}, + dictWord{8, 11, 739}, + dictWord{140, 11, 511}, + dictWord{ + 5, + 10, + 107, + }, + dictWord{7, 10, 201}, + dictWord{136, 10, 518}, + dictWord{6, 10, 446}, + dictWord{135, 10, 1817}, + dictWord{134, 0, 1532}, + dictWord{ + 134, + 0, + 1097, + }, + dictWord{4, 11, 119}, + dictWord{5, 11, 170}, + dictWord{5, 11, 447}, + dictWord{7, 11, 1708}, + dictWord{7, 11, 1889}, + dictWord{9, 11, 357}, + dictWord{ + 9, + 11, + 719, + }, + dictWord{12, 11, 486}, + dictWord{140, 11, 596}, + dictWord{9, 10, 851}, + dictWord{141, 10, 510}, + dictWord{7, 0, 612}, + dictWord{8, 0, 545}, + dictWord{ + 8, + 0, + 568, + }, + dictWord{8, 0, 642}, + dictWord{9, 0, 717}, + dictWord{10, 0, 541}, + dictWord{10, 0, 763}, + dictWord{11, 0, 449}, + dictWord{12, 0, 489}, + dictWord{13, 0, 153}, + dictWord{13, 0, 296}, + dictWord{14, 0, 138}, + dictWord{14, 0, 392}, + dictWord{15, 0, 50}, + dictWord{16, 0, 6}, + dictWord{16, 0, 12}, + dictWord{20, 0, 9}, + dictWord{ + 132, + 10, + 504, + }, + dictWord{4, 11, 450}, + dictWord{135, 11, 1158}, + dictWord{11, 0, 54}, + dictWord{13, 0, 173}, + dictWord{13, 0, 294}, + dictWord{5, 10, 883}, + dictWord{ + 5, + 10, + 975, + }, + dictWord{8, 10, 392}, + dictWord{148, 10, 7}, + dictWord{13, 0, 455}, + dictWord{15, 0, 99}, + dictWord{15, 0, 129}, + dictWord{144, 0, 68}, + dictWord{135, 0, 172}, + dictWord{132, 11, 754}, + dictWord{5, 10, 922}, + dictWord{134, 10, 1707}, + dictWord{134, 0, 1029}, + dictWord{17, 11, 39}, + dictWord{148, 11, 36}, + dictWord{ + 4, + 0, + 568, + }, + dictWord{5, 10, 993}, + dictWord{7, 10, 515}, + dictWord{137, 10, 91}, + dictWord{132, 0, 732}, + dictWord{10, 0, 617}, + dictWord{138, 11, 617}, + dictWord{ + 134, + 0, + 974, + }, + dictWord{7, 0, 989}, + dictWord{10, 0, 377}, + dictWord{12, 0, 363}, + dictWord{13, 0, 68}, + dictWord{13, 0, 94}, + dictWord{14, 0, 108}, + dictWord{ + 142, + 0, + 306, + }, + dictWord{136, 0, 733}, + dictWord{132, 0, 428}, + dictWord{7, 0, 1789}, + dictWord{135, 11, 1062}, + dictWord{7, 0, 2015}, + dictWord{140, 0, 665}, + dictWord{135, 10, 1433}, + dictWord{5, 0, 287}, + dictWord{7, 10, 921}, + dictWord{8, 10, 580}, + dictWord{8, 10, 593}, + dictWord{8, 10, 630}, + dictWord{138, 10, 28}, + dictWord{138, 0, 806}, + dictWord{4, 10, 911}, + dictWord{5, 10, 867}, + dictWord{5, 10, 1013}, + dictWord{7, 10, 2034}, + dictWord{8, 10, 798}, + dictWord{136, 10, 813}, + dictWord{134, 0, 1539}, + dictWord{8, 11, 523}, + dictWord{150, 11, 34}, + dictWord{135, 11, 740}, + dictWord{7, 11, 238}, + dictWord{7, 11, 2033}, + dictWord{ + 8, + 11, + 120, + }, + dictWord{8, 11, 188}, + dictWord{8, 11, 659}, + dictWord{9, 11, 598}, + dictWord{10, 11, 466}, + dictWord{12, 11, 342}, + dictWord{12, 11, 588}, + dictWord{ + 13, + 11, + 503, + }, + dictWord{14, 11, 246}, + dictWord{143, 11, 92}, + dictWord{7, 0, 1563}, + dictWord{141, 0, 182}, + dictWord{5, 10, 135}, + dictWord{6, 10, 519}, + dictWord{ + 7, + 10, + 1722, + }, + dictWord{10, 10, 271}, + dictWord{11, 10, 261}, + dictWord{145, 10, 54}, + dictWord{14, 10, 338}, + dictWord{148, 10, 81}, + dictWord{7, 0, 484}, + dictWord{ + 4, + 10, + 300, + }, + dictWord{133, 10, 436}, + dictWord{145, 11, 114}, + dictWord{6, 0, 1623}, + dictWord{134, 0, 1681}, + dictWord{133, 11, 640}, + dictWord{4, 11, 201}, + dictWord{7, 11, 1744}, + dictWord{8, 11, 602}, + dictWord{11, 11, 247}, + dictWord{11, 11, 826}, + dictWord{145, 11, 65}, + dictWord{8, 11, 164}, + dictWord{ + 146, + 11, + 62, + }, + dictWord{6, 0, 1833}, + dictWord{6, 0, 1861}, + dictWord{136, 0, 878}, + dictWord{134, 0, 1569}, + dictWord{8, 10, 357}, + dictWord{10, 10, 745}, + dictWord{ + 14, + 10, + 426, + }, + dictWord{17, 10, 94}, + dictWord{147, 10, 57}, + dictWord{12, 0, 93}, + dictWord{12, 0, 501}, + dictWord{13, 0, 362}, + dictWord{14, 0, 151}, + dictWord{15, 0, 40}, + dictWord{15, 0, 59}, + dictWord{16, 0, 46}, + dictWord{17, 0, 25}, + dictWord{18, 0, 14}, + dictWord{18, 0, 134}, + dictWord{19, 0, 25}, + dictWord{19, 0, 69}, + dictWord{ + 20, + 0, + 16, + }, + dictWord{20, 0, 19}, + dictWord{20, 0, 66}, + dictWord{21, 0, 23}, + dictWord{21, 0, 25}, + dictWord{150, 0, 42}, + dictWord{6, 0, 1748}, + dictWord{8, 0, 715}, + dictWord{ + 9, + 0, + 802, + }, + dictWord{10, 0, 46}, + dictWord{10, 0, 819}, + dictWord{13, 0, 308}, + dictWord{14, 0, 351}, + dictWord{14, 0, 363}, + dictWord{146, 0, 67}, + dictWord{ + 132, + 0, + 994, + }, + dictWord{4, 0, 63}, + dictWord{133, 0, 347}, + dictWord{132, 0, 591}, + dictWord{133, 0, 749}, + dictWord{7, 11, 1577}, + dictWord{10, 11, 304}, + dictWord{ + 10, + 11, + 549, + }, + dictWord{11, 11, 424}, + dictWord{12, 11, 365}, + dictWord{13, 11, 220}, + dictWord{13, 11, 240}, + dictWord{142, 11, 33}, + dictWord{133, 0, 366}, + dictWord{ + 7, + 0, + 557, + }, + dictWord{12, 0, 547}, + dictWord{14, 0, 86}, + dictWord{133, 10, 387}, + dictWord{135, 0, 1747}, + dictWord{132, 11, 907}, + dictWord{5, 11, 100}, + dictWord{10, 11, 329}, + dictWord{12, 11, 416}, + dictWord{149, 11, 29}, + dictWord{4, 10, 6}, + dictWord{5, 10, 708}, + dictWord{136, 10, 75}, + dictWord{7, 10, 1351}, + dictWord{9, 10, 581}, + dictWord{10, 10, 639}, + dictWord{11, 10, 453}, + dictWord{140, 10, 584}, + dictWord{7, 0, 89}, + dictWord{132, 10, 303}, + dictWord{138, 10, 772}, + dictWord{132, 11, 176}, + dictWord{5, 11, 636}, + dictWord{5, 11, 998}, + dictWord{8, 11, 26}, + dictWord{137, 11, 358}, + dictWord{7, 11, 9}, + dictWord{7, 11, 1508}, + dictWord{9, 11, 317}, + dictWord{10, 11, 210}, + dictWord{10, 11, 292}, + dictWord{10, 11, 533}, + dictWord{11, 11, 555}, + dictWord{12, 11, 526}, + dictWord{ + 12, + 11, + 607, + }, + dictWord{13, 11, 263}, + dictWord{13, 11, 459}, + dictWord{142, 11, 271}, + dictWord{134, 0, 1463}, + dictWord{6, 0, 772}, + dictWord{6, 0, 1137}, + dictWord{ + 139, + 11, + 595, + }, + dictWord{7, 0, 977}, + dictWord{139, 11, 66}, + dictWord{138, 0, 893}, + dictWord{20, 0, 48}, + dictWord{148, 11, 48}, + dictWord{5, 0, 824}, + dictWord{ + 133, + 0, + 941, + }, + dictWord{134, 11, 295}, + dictWord{7, 0, 1543}, + dictWord{7, 0, 1785}, + dictWord{10, 0, 690}, + dictWord{4, 10, 106}, + dictWord{139, 10, 717}, + dictWord{ + 7, + 0, + 440, + }, + dictWord{8, 0, 230}, + dictWord{139, 0, 106}, + dictWord{5, 10, 890}, + dictWord{133, 10, 988}, + dictWord{6, 10, 626}, + dictWord{142, 10, 431}, + dictWord{ + 10, + 11, + 127, + }, + dictWord{141, 11, 27}, + dictWord{17, 0, 32}, + dictWord{10, 10, 706}, + dictWord{150, 10, 44}, + dictWord{132, 0, 216}, + dictWord{137, 0, 332}, + dictWord{4, 10, 698}, + dictWord{136, 11, 119}, + dictWord{139, 11, 267}, + dictWord{138, 10, 17}, + dictWord{11, 11, 526}, + dictWord{11, 11, 939}, + dictWord{ + 141, + 11, + 290, + }, + dictWord{7, 11, 1167}, + dictWord{11, 11, 934}, + dictWord{13, 11, 391}, + dictWord{145, 11, 76}, + dictWord{139, 11, 39}, + dictWord{134, 10, 84}, + dictWord{ + 4, + 0, + 914, + }, + dictWord{5, 0, 800}, + dictWord{133, 0, 852}, + dictWord{10, 0, 416}, + dictWord{141, 0, 115}, + dictWord{7, 0, 564}, + dictWord{142, 0, 168}, + dictWord{ + 4, + 0, + 918, + }, + dictWord{133, 0, 876}, + dictWord{134, 0, 1764}, + dictWord{152, 0, 3}, + dictWord{4, 0, 92}, + dictWord{5, 0, 274}, + dictWord{7, 11, 126}, + dictWord{136, 11, 84}, + dictWord{140, 10, 498}, + dictWord{136, 11, 790}, + dictWord{8, 0, 501}, + dictWord{5, 10, 986}, + dictWord{6, 10, 130}, + dictWord{7, 10, 1582}, + dictWord{ + 8, + 10, + 458, + }, + dictWord{10, 10, 101}, + dictWord{10, 10, 318}, + dictWord{138, 10, 823}, + dictWord{6, 11, 64}, + dictWord{12, 11, 377}, + dictWord{141, 11, 309}, + dictWord{ + 5, + 0, + 743, + }, + dictWord{138, 0, 851}, + dictWord{4, 0, 49}, + dictWord{7, 0, 280}, + dictWord{135, 0, 1633}, + dictWord{134, 0, 879}, + dictWord{136, 0, 47}, + dictWord{ + 7, + 10, + 1644, + }, + dictWord{137, 10, 129}, + dictWord{132, 0, 865}, + dictWord{134, 0, 1202}, + dictWord{9, 11, 34}, + dictWord{139, 11, 484}, + dictWord{135, 10, 997}, + dictWord{5, 0, 272}, + dictWord{5, 0, 908}, + dictWord{5, 0, 942}, + dictWord{8, 0, 197}, + dictWord{9, 0, 47}, + dictWord{11, 0, 538}, + dictWord{139, 0, 742}, + dictWord{ + 6, + 11, + 1700, + }, + dictWord{7, 11, 26}, + dictWord{7, 11, 293}, + dictWord{7, 11, 382}, + dictWord{7, 11, 1026}, + dictWord{7, 11, 1087}, + dictWord{7, 11, 2027}, + dictWord{ + 8, + 11, + 24, + }, + dictWord{8, 11, 114}, + dictWord{8, 11, 252}, + dictWord{8, 11, 727}, + dictWord{8, 11, 729}, + dictWord{9, 11, 30}, + dictWord{9, 11, 199}, + dictWord{9, 11, 231}, + dictWord{9, 11, 251}, + dictWord{9, 11, 334}, + dictWord{9, 11, 361}, + dictWord{9, 11, 488}, + dictWord{9, 11, 712}, + dictWord{10, 11, 55}, + dictWord{10, 11, 60}, + dictWord{ + 10, + 11, + 232, + }, + dictWord{10, 11, 332}, + dictWord{10, 11, 384}, + dictWord{10, 11, 396}, + dictWord{10, 11, 504}, + dictWord{10, 11, 542}, + dictWord{10, 11, 652}, + dictWord{11, 11, 20}, + dictWord{11, 11, 48}, + dictWord{11, 11, 207}, + dictWord{11, 11, 291}, + dictWord{11, 11, 298}, + dictWord{11, 11, 342}, + dictWord{ + 11, + 11, + 365, + }, + dictWord{11, 11, 394}, + dictWord{11, 11, 620}, + dictWord{11, 11, 705}, + dictWord{11, 11, 1017}, + dictWord{12, 11, 123}, + dictWord{12, 11, 340}, + dictWord{12, 11, 406}, + dictWord{12, 11, 643}, + dictWord{13, 11, 61}, + dictWord{13, 11, 269}, + dictWord{13, 11, 311}, + dictWord{13, 11, 319}, + dictWord{13, 11, 486}, + dictWord{14, 11, 234}, + dictWord{15, 11, 62}, + dictWord{15, 11, 85}, + dictWord{16, 11, 71}, + dictWord{18, 11, 119}, + dictWord{148, 11, 105}, + dictWord{ + 6, + 0, + 1455, + }, + dictWord{150, 11, 37}, + dictWord{135, 10, 1927}, + dictWord{135, 0, 1911}, + dictWord{137, 0, 891}, + dictWord{7, 10, 1756}, + dictWord{137, 10, 98}, + dictWord{7, 10, 1046}, + dictWord{139, 10, 160}, + dictWord{132, 0, 761}, + dictWord{6, 11, 379}, + dictWord{7, 11, 270}, + dictWord{7, 11, 1116}, + dictWord{ + 8, + 11, + 176, + }, + dictWord{8, 11, 183}, + dictWord{9, 11, 432}, + dictWord{9, 11, 661}, + dictWord{12, 11, 247}, + dictWord{12, 11, 617}, + dictWord{146, 11, 125}, + dictWord{ + 6, + 10, + 45, + }, + dictWord{7, 10, 433}, + dictWord{8, 10, 129}, + dictWord{9, 10, 21}, + dictWord{10, 10, 392}, + dictWord{11, 10, 79}, + dictWord{12, 10, 499}, + dictWord{ + 13, + 10, + 199, + }, + dictWord{141, 10, 451}, + dictWord{4, 0, 407}, + dictWord{5, 11, 792}, + dictWord{133, 11, 900}, + dictWord{132, 0, 560}, + dictWord{135, 0, 183}, + dictWord{ + 13, + 0, + 490, + }, + dictWord{7, 10, 558}, + dictWord{136, 10, 353}, + dictWord{4, 0, 475}, + dictWord{6, 0, 731}, + dictWord{11, 0, 35}, + dictWord{13, 0, 71}, + dictWord{13, 0, 177}, + dictWord{14, 0, 422}, + dictWord{133, 10, 785}, + dictWord{8, 10, 81}, + dictWord{9, 10, 189}, + dictWord{9, 10, 201}, + dictWord{11, 10, 478}, + dictWord{11, 10, 712}, + dictWord{141, 10, 338}, + dictWord{4, 0, 418}, + dictWord{4, 0, 819}, + dictWord{133, 10, 353}, + dictWord{151, 10, 26}, + dictWord{4, 11, 901}, + dictWord{ + 133, + 11, + 776, + }, + dictWord{132, 0, 575}, + dictWord{7, 0, 818}, + dictWord{16, 0, 92}, + dictWord{17, 0, 14}, + dictWord{17, 0, 45}, + dictWord{18, 0, 75}, + dictWord{148, 0, 18}, + dictWord{ + 6, + 0, + 222, + }, + dictWord{7, 0, 636}, + dictWord{7, 0, 1620}, + dictWord{8, 0, 409}, + dictWord{9, 0, 693}, + dictWord{139, 0, 77}, + dictWord{6, 10, 25}, + dictWord{7, 10, 855}, + dictWord{7, 10, 1258}, + dictWord{144, 10, 32}, + dictWord{6, 0, 1880}, + dictWord{6, 0, 1887}, + dictWord{6, 0, 1918}, + dictWord{6, 0, 1924}, + dictWord{9, 0, 967}, + dictWord{9, 0, 995}, + dictWord{9, 0, 1015}, + dictWord{12, 0, 826}, + dictWord{12, 0, 849}, + dictWord{12, 0, 857}, + dictWord{12, 0, 860}, + dictWord{12, 0, 886}, + dictWord{ + 12, + 0, + 932, + }, + dictWord{18, 0, 228}, + dictWord{18, 0, 231}, + dictWord{146, 0, 240}, + dictWord{134, 0, 633}, + dictWord{134, 0, 1308}, + dictWord{4, 11, 37}, + dictWord{ + 5, + 11, + 334, + }, + dictWord{135, 11, 1253}, + dictWord{10, 0, 86}, + dictWord{4, 10, 4}, + dictWord{7, 10, 1118}, + dictWord{7, 10, 1320}, + dictWord{7, 10, 1706}, + dictWord{ + 8, + 10, + 277, + }, + dictWord{9, 10, 622}, + dictWord{11, 10, 724}, + dictWord{12, 10, 350}, + dictWord{12, 10, 397}, + dictWord{13, 10, 28}, + dictWord{13, 10, 159}, + dictWord{ + 15, + 10, + 89, + }, + dictWord{18, 10, 5}, + dictWord{19, 10, 9}, + dictWord{20, 10, 34}, + dictWord{150, 10, 47}, + dictWord{132, 11, 508}, + dictWord{137, 11, 448}, + dictWord{ + 12, + 11, + 107, + }, + dictWord{146, 11, 31}, + dictWord{132, 0, 817}, + dictWord{134, 0, 663}, + dictWord{133, 0, 882}, + dictWord{134, 0, 914}, + dictWord{132, 11, 540}, + dictWord{132, 11, 533}, + dictWord{136, 11, 608}, + dictWord{8, 0, 885}, + dictWord{138, 0, 865}, + dictWord{132, 0, 426}, + dictWord{6, 0, 58}, + dictWord{7, 0, 745}, + dictWord{7, 0, 1969}, + dictWord{8, 0, 399}, + dictWord{8, 0, 675}, + dictWord{9, 0, 479}, + dictWord{9, 0, 731}, + dictWord{10, 0, 330}, + dictWord{10, 0, 593}, + dictWord{ + 10, + 0, + 817, + }, + dictWord{11, 0, 32}, + dictWord{11, 0, 133}, + dictWord{11, 0, 221}, + dictWord{145, 0, 68}, + dictWord{134, 10, 255}, + dictWord{7, 0, 102}, + dictWord{ + 137, + 0, + 538, + }, + dictWord{137, 10, 216}, + dictWord{7, 11, 253}, + dictWord{136, 11, 549}, + dictWord{135, 11, 912}, + dictWord{9, 10, 183}, + dictWord{139, 10, 286}, + dictWord{11, 10, 956}, + dictWord{151, 10, 3}, + dictWord{8, 11, 527}, + dictWord{18, 11, 60}, + dictWord{147, 11, 24}, + dictWord{4, 10, 536}, + dictWord{7, 10, 1141}, + dictWord{10, 10, 723}, + dictWord{139, 10, 371}, + dictWord{133, 11, 920}, + dictWord{7, 0, 876}, + dictWord{135, 10, 285}, + dictWord{135, 10, 560}, + dictWord{ + 132, + 10, + 690, + }, + dictWord{142, 11, 126}, + dictWord{11, 10, 33}, + dictWord{12, 10, 571}, + dictWord{149, 10, 1}, + dictWord{133, 0, 566}, + dictWord{9, 0, 139}, + dictWord{ + 10, + 0, + 399, + }, + dictWord{11, 0, 469}, + dictWord{12, 0, 634}, + dictWord{13, 0, 223}, + dictWord{132, 11, 483}, + dictWord{6, 0, 48}, + dictWord{135, 0, 63}, + dictWord{18, 0, 12}, + dictWord{7, 10, 1862}, + dictWord{12, 10, 491}, + dictWord{12, 10, 520}, + dictWord{13, 10, 383}, + dictWord{142, 10, 244}, + dictWord{135, 11, 1665}, + dictWord{132, 11, 448}, + dictWord{9, 11, 495}, + dictWord{146, 11, 104}, + dictWord{6, 0, 114}, + dictWord{7, 0, 1224}, + dictWord{7, 0, 1556}, + dictWord{136, 0, 3}, + dictWord{ + 4, + 10, + 190, + }, + dictWord{133, 10, 554}, + dictWord{8, 0, 576}, + dictWord{9, 0, 267}, + dictWord{133, 10, 1001}, + dictWord{133, 10, 446}, + dictWord{133, 0, 933}, + dictWord{139, 11, 1009}, + dictWord{8, 11, 653}, + dictWord{13, 11, 93}, + dictWord{147, 11, 14}, + dictWord{6, 0, 692}, + dictWord{6, 0, 821}, + dictWord{134, 0, 1077}, + dictWord{5, 11, 172}, + dictWord{135, 11, 801}, + dictWord{138, 0, 752}, + dictWord{4, 0, 375}, + dictWord{134, 0, 638}, + dictWord{134, 0, 1011}, + dictWord{ + 140, + 11, + 540, + }, + dictWord{9, 0, 96}, + dictWord{133, 11, 260}, + dictWord{139, 11, 587}, + dictWord{135, 10, 1231}, + dictWord{12, 0, 30}, + dictWord{13, 0, 148}, + dictWord{ + 14, + 0, + 87, + }, + dictWord{14, 0, 182}, + dictWord{16, 0, 42}, + dictWord{20, 0, 70}, + dictWord{132, 10, 304}, + dictWord{6, 0, 1398}, + dictWord{7, 0, 56}, + dictWord{7, 0, 1989}, + dictWord{8, 0, 337}, + dictWord{8, 0, 738}, + dictWord{9, 0, 600}, + dictWord{12, 0, 37}, + dictWord{13, 0, 447}, + dictWord{142, 0, 92}, + dictWord{138, 0, 666}, + dictWord{ + 5, + 0, + 394, + }, + dictWord{7, 0, 487}, + dictWord{136, 0, 246}, + dictWord{9, 0, 437}, + dictWord{6, 10, 53}, + dictWord{6, 10, 199}, + dictWord{7, 10, 1408}, + dictWord{8, 10, 32}, + dictWord{8, 10, 93}, + dictWord{10, 10, 397}, + dictWord{10, 10, 629}, + dictWord{11, 10, 593}, + dictWord{11, 10, 763}, + dictWord{13, 10, 326}, + dictWord{145, 10, 35}, + dictWord{134, 10, 105}, + dictWord{9, 0, 320}, + dictWord{10, 0, 506}, + dictWord{138, 10, 794}, + dictWord{7, 11, 57}, + dictWord{8, 11, 167}, + dictWord{8, 11, 375}, + dictWord{9, 11, 82}, + dictWord{9, 11, 561}, + dictWord{10, 11, 620}, + dictWord{10, 11, 770}, + dictWord{11, 10, 704}, + dictWord{141, 10, 396}, + dictWord{6, 0, 1003}, + dictWord{5, 10, 114}, + dictWord{5, 10, 255}, + dictWord{141, 10, 285}, + dictWord{7, 0, 866}, + dictWord{135, 0, 1163}, + dictWord{133, 11, 531}, + dictWord{ + 132, + 0, + 328, + }, + dictWord{7, 10, 2035}, + dictWord{8, 10, 19}, + dictWord{9, 10, 89}, + dictWord{138, 10, 831}, + dictWord{8, 11, 194}, + dictWord{136, 11, 756}, + dictWord{ + 136, + 0, + 1000, + }, + dictWord{5, 11, 453}, + dictWord{134, 11, 441}, + dictWord{4, 0, 101}, + dictWord{5, 0, 833}, + dictWord{7, 0, 1171}, + dictWord{136, 0, 744}, + dictWord{ + 133, + 0, + 726, + }, + dictWord{136, 10, 746}, + dictWord{138, 0, 176}, + dictWord{6, 0, 9}, + dictWord{6, 0, 397}, + dictWord{7, 0, 53}, + dictWord{7, 0, 1742}, + dictWord{10, 0, 632}, + dictWord{11, 0, 828}, + dictWord{140, 0, 146}, + dictWord{135, 11, 22}, + dictWord{145, 11, 64}, + dictWord{132, 0, 839}, + dictWord{11, 0, 417}, + dictWord{12, 0, 223}, + dictWord{140, 0, 265}, + dictWord{4, 11, 102}, + dictWord{7, 11, 815}, + dictWord{7, 11, 1699}, + dictWord{139, 11, 964}, + dictWord{5, 10, 955}, + dictWord{ + 136, + 10, + 814, + }, + dictWord{6, 0, 1931}, + dictWord{6, 0, 2007}, + dictWord{18, 0, 246}, + dictWord{146, 0, 247}, + dictWord{8, 0, 198}, + dictWord{11, 0, 29}, + dictWord{140, 0, 534}, + dictWord{135, 0, 1771}, + dictWord{6, 0, 846}, + dictWord{7, 11, 1010}, + dictWord{11, 11, 733}, + dictWord{11, 11, 759}, + dictWord{12, 11, 563}, + dictWord{ + 13, + 11, + 34, + }, + dictWord{14, 11, 101}, + dictWord{18, 11, 45}, + dictWord{146, 11, 129}, + dictWord{4, 0, 186}, + dictWord{5, 0, 157}, + dictWord{8, 0, 168}, + dictWord{138, 0, 6}, + dictWord{132, 11, 899}, + dictWord{133, 10, 56}, + dictWord{148, 10, 100}, + dictWord{133, 0, 875}, + dictWord{5, 0, 773}, + dictWord{5, 0, 991}, + dictWord{6, 0, 1635}, + dictWord{134, 0, 1788}, + dictWord{6, 0, 1274}, + dictWord{9, 0, 477}, + dictWord{141, 0, 78}, + dictWord{4, 0, 639}, + dictWord{7, 0, 111}, + dictWord{8, 0, 581}, + dictWord{ + 12, + 0, + 177, + }, + dictWord{6, 11, 52}, + dictWord{9, 11, 104}, + dictWord{9, 11, 559}, + dictWord{10, 10, 4}, + dictWord{10, 10, 13}, + dictWord{11, 10, 638}, + dictWord{ + 12, + 11, + 308, + }, + dictWord{19, 11, 87}, + dictWord{148, 10, 57}, + dictWord{132, 11, 604}, + dictWord{4, 11, 301}, + dictWord{133, 10, 738}, + dictWord{133, 10, 758}, + dictWord{134, 0, 1747}, + dictWord{7, 11, 1440}, + dictWord{11, 11, 854}, + dictWord{11, 11, 872}, + dictWord{11, 11, 921}, + dictWord{12, 11, 551}, + dictWord{ + 13, + 11, + 472, + }, + dictWord{142, 11, 367}, + dictWord{7, 0, 1364}, + dictWord{7, 0, 1907}, + dictWord{141, 0, 158}, + dictWord{134, 0, 873}, + dictWord{4, 0, 404}, + dictWord{ + 4, + 0, + 659, + }, + dictWord{7, 0, 552}, + dictWord{135, 0, 675}, + dictWord{135, 10, 1112}, + dictWord{139, 10, 328}, + dictWord{7, 11, 508}, + dictWord{137, 10, 133}, + dictWord{133, 0, 391}, + dictWord{5, 10, 110}, + dictWord{6, 10, 169}, + dictWord{6, 10, 1702}, + dictWord{7, 10, 400}, + dictWord{8, 10, 538}, + dictWord{9, 10, 184}, + dictWord{ + 9, + 10, + 524, + }, + dictWord{140, 10, 218}, + dictWord{6, 11, 310}, + dictWord{7, 11, 1849}, + dictWord{8, 11, 72}, + dictWord{8, 11, 272}, + dictWord{8, 11, 431}, + dictWord{ + 9, + 11, + 12, + }, + dictWord{9, 11, 351}, + dictWord{10, 11, 563}, + dictWord{10, 11, 630}, + dictWord{10, 11, 810}, + dictWord{11, 11, 367}, + dictWord{11, 11, 599}, + dictWord{11, 11, 686}, + dictWord{140, 11, 672}, + dictWord{5, 0, 540}, + dictWord{6, 0, 1697}, + dictWord{136, 0, 668}, + dictWord{132, 0, 883}, + dictWord{134, 0, 78}, + dictWord{12, 0, 628}, + dictWord{18, 0, 79}, + dictWord{6, 10, 133}, + dictWord{9, 10, 353}, + dictWord{139, 10, 993}, + dictWord{6, 11, 181}, + dictWord{7, 11, 537}, + dictWord{ + 8, + 11, + 64, + }, + dictWord{9, 11, 127}, + dictWord{10, 11, 496}, + dictWord{12, 11, 510}, + dictWord{141, 11, 384}, + dictWord{6, 10, 93}, + dictWord{7, 10, 1422}, + dictWord{ + 7, + 10, + 1851, + }, + dictWord{8, 10, 673}, + dictWord{9, 10, 529}, + dictWord{140, 10, 43}, + dictWord{137, 10, 371}, + dictWord{134, 0, 1460}, + dictWord{134, 0, 962}, + dictWord{4, 11, 244}, + dictWord{135, 11, 233}, + dictWord{9, 10, 25}, + dictWord{10, 10, 467}, + dictWord{138, 10, 559}, + dictWord{4, 10, 335}, + dictWord{ + 135, + 10, + 942, + }, + dictWord{133, 0, 460}, + dictWord{135, 11, 334}, + dictWord{134, 11, 1650}, + dictWord{4, 0, 199}, + dictWord{139, 0, 34}, + dictWord{5, 10, 601}, + dictWord{ + 8, + 10, + 39, + }, + dictWord{10, 10, 773}, + dictWord{11, 10, 84}, + dictWord{12, 10, 205}, + dictWord{142, 10, 1}, + dictWord{133, 10, 870}, + dictWord{134, 0, 388}, + dictWord{14, 0, 474}, + dictWord{148, 0, 120}, + dictWord{133, 11, 369}, + dictWord{139, 0, 271}, + dictWord{4, 0, 511}, + dictWord{9, 0, 333}, + dictWord{9, 0, 379}, + dictWord{ + 10, + 0, + 602, + }, + dictWord{11, 0, 441}, + dictWord{11, 0, 723}, + dictWord{11, 0, 976}, + dictWord{12, 0, 357}, + dictWord{132, 10, 181}, + dictWord{134, 0, 608}, + dictWord{134, 10, 1652}, + dictWord{22, 0, 49}, + dictWord{137, 11, 338}, + dictWord{140, 0, 988}, + dictWord{134, 0, 617}, + dictWord{5, 0, 938}, + dictWord{136, 0, 707}, + dictWord{132, 10, 97}, + dictWord{5, 10, 147}, + dictWord{6, 10, 286}, + dictWord{7, 10, 1362}, + dictWord{141, 10, 176}, + dictWord{6, 0, 756}, + dictWord{ + 134, + 0, + 1149, + }, + dictWord{133, 11, 896}, + dictWord{6, 10, 375}, + dictWord{7, 10, 169}, + dictWord{7, 10, 254}, + dictWord{136, 10, 780}, + dictWord{134, 0, 1583}, + dictWord{135, 10, 1447}, + dictWord{139, 0, 285}, + dictWord{7, 11, 1117}, + dictWord{8, 11, 393}, + dictWord{136, 11, 539}, + dictWord{135, 0, 344}, + dictWord{ + 6, + 0, + 469, + }, + dictWord{7, 0, 1709}, + dictWord{138, 0, 515}, + dictWord{5, 10, 629}, + dictWord{135, 10, 1549}, + dictWord{5, 11, 4}, + dictWord{5, 11, 810}, + dictWord{ + 6, + 11, + 13, + }, + dictWord{6, 11, 538}, + dictWord{6, 11, 1690}, + dictWord{6, 11, 1726}, + dictWord{7, 11, 499}, + dictWord{7, 11, 1819}, + dictWord{8, 11, 148}, + dictWord{ + 8, + 11, + 696, + }, + dictWord{8, 11, 791}, + dictWord{12, 11, 125}, + dictWord{13, 11, 54}, + dictWord{143, 11, 9}, + dictWord{135, 11, 1268}, + dictWord{137, 0, 404}, + dictWord{ + 132, + 0, + 500, + }, + dictWord{5, 0, 68}, + dictWord{134, 0, 383}, + dictWord{11, 0, 216}, + dictWord{139, 0, 340}, + dictWord{4, 11, 925}, + dictWord{5, 11, 803}, + dictWord{ + 8, + 11, + 698, + }, + dictWord{138, 11, 828}, + dictWord{4, 0, 337}, + dictWord{6, 0, 353}, + dictWord{7, 0, 1934}, + dictWord{8, 0, 488}, + dictWord{137, 0, 429}, + dictWord{7, 0, 236}, + dictWord{7, 0, 1795}, + dictWord{8, 0, 259}, + dictWord{9, 0, 135}, + dictWord{9, 0, 177}, + dictWord{9, 0, 860}, + dictWord{10, 0, 825}, + dictWord{11, 0, 115}, + dictWord{ + 11, + 0, + 370, + }, + dictWord{11, 0, 405}, + dictWord{11, 0, 604}, + dictWord{12, 0, 10}, + dictWord{12, 0, 667}, + dictWord{12, 0, 669}, + dictWord{13, 0, 76}, + dictWord{14, 0, 310}, + dictWord{15, 0, 76}, + dictWord{15, 0, 147}, + dictWord{148, 0, 23}, + dictWord{4, 0, 15}, + dictWord{4, 0, 490}, + dictWord{5, 0, 22}, + dictWord{6, 0, 244}, + dictWord{7, 0, 40}, + dictWord{7, 0, 200}, + dictWord{7, 0, 906}, + dictWord{7, 0, 1199}, + dictWord{9, 0, 616}, + dictWord{10, 0, 716}, + dictWord{11, 0, 635}, + dictWord{11, 0, 801}, + dictWord{ + 140, + 0, + 458, + }, + dictWord{12, 0, 756}, + dictWord{132, 10, 420}, + dictWord{134, 0, 1504}, + dictWord{6, 0, 757}, + dictWord{133, 11, 383}, + dictWord{6, 0, 1266}, + dictWord{ + 135, + 0, + 1735, + }, + dictWord{5, 0, 598}, + dictWord{7, 0, 791}, + dictWord{8, 0, 108}, + dictWord{9, 0, 123}, + dictWord{7, 10, 1570}, + dictWord{140, 10, 542}, + dictWord{ + 142, + 11, + 410, + }, + dictWord{9, 11, 660}, + dictWord{138, 11, 347}, +} diff --git a/vendor/github.com/andybalholm/brotli/symbol_list.go b/vendor/github.com/andybalholm/brotli/symbol_list.go new file mode 100644 index 00000000000..c5cb49e5a9d --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/symbol_list.go @@ -0,0 +1,22 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Utilities for building Huffman decoding tables. */ + +type symbolList struct { + storage []uint16 + offset int +} + +func symbolListGet(sl symbolList, i int) uint16 { + return sl.storage[i+sl.offset] +} + +func symbolListPut(sl symbolList, i int, val uint16) { + sl.storage[i+sl.offset] = val +} diff --git a/vendor/github.com/andybalholm/brotli/transform.go b/vendor/github.com/andybalholm/brotli/transform.go new file mode 100644 index 00000000000..d2c043a6227 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/transform.go @@ -0,0 +1,641 @@ +package brotli + +const ( + transformIdentity = 0 + transformOmitLast1 = 1 + transformOmitLast2 = 2 + transformOmitLast3 = 3 + transformOmitLast4 = 4 + transformOmitLast5 = 5 + transformOmitLast6 = 6 + transformOmitLast7 = 7 + transformOmitLast8 = 8 + transformOmitLast9 = 9 + transformUppercaseFirst = 10 + transformUppercaseAll = 11 + transformOmitFirst1 = 12 + transformOmitFirst2 = 13 + transformOmitFirst3 = 14 + transformOmitFirst4 = 15 + transformOmitFirst5 = 16 + transformOmitFirst6 = 17 + transformOmitFirst7 = 18 + transformOmitFirst8 = 19 + transformOmitFirst9 = 20 + transformShiftFirst = 21 + transformShiftAll = 22 + iota - 22 + numTransformTypes +) + +const transformsMaxCutOff = transformOmitLast9 + +type transforms struct { + prefix_suffix_size uint16 + prefix_suffix []byte + prefix_suffix_map []uint16 + num_transforms uint32 + transforms []byte + params []byte + cutOffTransforms [transformsMaxCutOff + 1]int16 +} + +func transformPrefixId(t *transforms, I int) byte { + return t.transforms[(I*3)+0] +} + +func transformType(t *transforms, I int) byte { + return t.transforms[(I*3)+1] +} + +func transformSuffixId(t *transforms, I int) byte { + return t.transforms[(I*3)+2] +} + +func transformPrefix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformPrefixId(t, I)]:] +} + +func transformSuffix(t *transforms, I int) []byte { + return t.prefix_suffix[t.prefix_suffix_map[transformSuffixId(t, I)]:] +} + +/* RFC 7932 transforms string data */ +const kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000" + +var kPrefixSuffixMap = [50]uint16{ + 0x00, + 0x02, + 0x05, + 0x0E, + 0x13, + 0x16, + 0x18, + 0x1E, + 0x23, + 0x25, + 0x2A, + 0x2D, + 0x2F, + 0x32, + 0x34, + 0x3A, + 0x3E, + 0x45, + 0x47, + 0x4E, + 0x55, + 0x5A, + 0x5C, + 0x63, + 0x68, + 0x6D, + 0x72, + 0x77, + 0x7A, + 0x7C, + 0x80, + 0x83, + 0x88, + 0x8C, + 0x8E, + 0x91, + 0x97, + 0x9F, + 0xA5, + 0xA9, + 0xAD, + 0xB2, + 0xB7, + 0xBD, + 0xC2, + 0xC7, + 0xCA, + 0xCF, + 0xD5, + 0xD8, +} + +/* RFC 7932 transforms */ +var kTransformsData = []byte{ + 49, + transformIdentity, + 49, + 49, + transformIdentity, + 0, + 0, + transformIdentity, + 0, + 49, + transformOmitFirst1, + 49, + 49, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 47, + 0, + transformIdentity, + 49, + 4, + transformIdentity, + 0, + 49, + transformIdentity, + 3, + 49, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 6, + 49, + transformOmitFirst2, + 49, + 49, + transformOmitLast1, + 49, + 1, + transformIdentity, + 0, + 49, + transformIdentity, + 1, + 0, + transformUppercaseFirst, + 0, + 49, + transformIdentity, + 7, + 49, + transformIdentity, + 9, + 48, + transformIdentity, + 0, + 49, + transformIdentity, + 8, + 49, + transformIdentity, + 5, + 49, + transformIdentity, + 10, + 49, + transformIdentity, + 11, + 49, + transformOmitLast3, + 49, + 49, + transformIdentity, + 13, + 49, + transformIdentity, + 14, + 49, + transformOmitFirst3, + 49, + 49, + transformOmitLast2, + 49, + 49, + transformIdentity, + 15, + 49, + transformIdentity, + 16, + 0, + transformUppercaseFirst, + 49, + 49, + transformIdentity, + 12, + 5, + transformIdentity, + 49, + 0, + transformIdentity, + 1, + 49, + transformOmitFirst4, + 49, + 49, + transformIdentity, + 18, + 49, + transformIdentity, + 17, + 49, + transformIdentity, + 19, + 49, + transformIdentity, + 20, + 49, + transformOmitFirst5, + 49, + 49, + transformOmitFirst6, + 49, + 47, + transformIdentity, + 49, + 49, + transformOmitLast4, + 49, + 49, + transformIdentity, + 22, + 49, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 23, + 49, + transformIdentity, + 24, + 49, + transformIdentity, + 25, + 49, + transformOmitLast7, + 49, + 49, + transformOmitLast1, + 26, + 49, + transformIdentity, + 27, + 49, + transformIdentity, + 28, + 0, + transformIdentity, + 12, + 49, + transformIdentity, + 29, + 49, + transformOmitFirst9, + 49, + 49, + transformOmitFirst7, + 49, + 49, + transformOmitLast6, + 49, + 49, + transformIdentity, + 21, + 49, + transformUppercaseFirst, + 1, + 49, + transformOmitLast8, + 49, + 49, + transformIdentity, + 31, + 49, + transformIdentity, + 32, + 47, + transformIdentity, + 3, + 49, + transformOmitLast5, + 49, + 49, + transformOmitLast9, + 49, + 0, + transformUppercaseFirst, + 1, + 49, + transformUppercaseFirst, + 8, + 5, + transformIdentity, + 21, + 49, + transformUppercaseAll, + 0, + 49, + transformUppercaseFirst, + 10, + 49, + transformIdentity, + 30, + 0, + transformIdentity, + 5, + 35, + transformIdentity, + 49, + 47, + transformIdentity, + 2, + 49, + transformUppercaseFirst, + 17, + 49, + transformIdentity, + 36, + 49, + transformIdentity, + 33, + 5, + transformIdentity, + 0, + 49, + transformUppercaseFirst, + 21, + 49, + transformUppercaseFirst, + 5, + 49, + transformIdentity, + 37, + 0, + transformIdentity, + 30, + 49, + transformIdentity, + 38, + 0, + transformUppercaseAll, + 0, + 49, + transformIdentity, + 39, + 0, + transformUppercaseAll, + 49, + 49, + transformIdentity, + 34, + 49, + transformUppercaseAll, + 8, + 49, + transformUppercaseFirst, + 12, + 0, + transformIdentity, + 21, + 49, + transformIdentity, + 40, + 0, + transformUppercaseFirst, + 12, + 49, + transformIdentity, + 41, + 49, + transformIdentity, + 42, + 49, + transformUppercaseAll, + 17, + 49, + transformIdentity, + 43, + 0, + transformUppercaseFirst, + 5, + 49, + transformUppercaseAll, + 10, + 0, + transformIdentity, + 34, + 49, + transformUppercaseFirst, + 33, + 49, + transformIdentity, + 44, + 49, + transformUppercaseAll, + 5, + 45, + transformIdentity, + 49, + 0, + transformIdentity, + 33, + 49, + transformUppercaseFirst, + 30, + 49, + transformUppercaseAll, + 30, + 49, + transformIdentity, + 46, + 49, + transformUppercaseAll, + 1, + 49, + transformUppercaseFirst, + 34, + 0, + transformUppercaseFirst, + 33, + 0, + transformUppercaseAll, + 30, + 0, + transformUppercaseAll, + 1, + 49, + transformUppercaseAll, + 33, + 49, + transformUppercaseAll, + 21, + 49, + transformUppercaseAll, + 12, + 0, + transformUppercaseAll, + 5, + 49, + transformUppercaseAll, + 34, + 0, + transformUppercaseAll, + 12, + 0, + transformUppercaseFirst, + 30, + 0, + transformUppercaseAll, + 34, + 0, + transformUppercaseFirst, + 34, +} + +var kBrotliTransforms = transforms{ + 217, + []byte(kPrefixSuffix), + kPrefixSuffixMap[:], + 121, + kTransformsData, + nil, /* no extra parameters */ + [transformsMaxCutOff + 1]int16{0, 12, 27, 23, 42, 63, 56, 48, 59, 64}, +} + +func getTransforms() *transforms { + return &kBrotliTransforms +} + +func toUpperCase(p []byte) int { + if p[0] < 0xC0 { + if p[0] >= 'a' && p[0] <= 'z' { + p[0] ^= 32 + } + + return 1 + } + + /* An overly simplified uppercasing model for UTF-8. */ + if p[0] < 0xE0 { + p[1] ^= 32 + return 2 + } + + /* An arbitrary transform for three byte characters. */ + p[2] ^= 5 + + return 3 +} + +func shiftTransform(word []byte, word_len int, parameter uint16) int { + /* Limited sign extension: scalar < (1 << 24). */ + var scalar uint32 = (uint32(parameter) & 0x7FFF) + (0x1000000 - (uint32(parameter) & 0x8000)) + if word[0] < 0x80 { + /* 1-byte rune / 0sssssss / 7 bit scalar (ASCII). */ + scalar += uint32(word[0]) + + word[0] = byte(scalar & 0x7F) + return 1 + } else if word[0] < 0xC0 { + /* Continuation / 10AAAAAA. */ + return 1 + } else if word[0] < 0xE0 { + /* 2-byte rune / 110sssss AAssssss / 11 bit scalar. */ + if word_len < 2 { + return 1 + } + scalar += uint32(word[1]&0x3F | (word[0]&0x1F)<<6) + word[0] = byte(0xC0 | (scalar>>6)&0x1F) + word[1] = byte(uint32(word[1]&0xC0) | scalar&0x3F) + return 2 + } else if word[0] < 0xF0 { + /* 3-byte rune / 1110ssss AAssssss BBssssss / 16 bit scalar. */ + if word_len < 3 { + return word_len + } + scalar += uint32(word[2])&0x3F | uint32(word[1]&0x3F)<<6 | uint32(word[0]&0x0F)<<12 + word[0] = byte(0xE0 | (scalar>>12)&0x0F) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>6)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | scalar&0x3F) + return 3 + } else if word[0] < 0xF8 { + /* 4-byte rune / 11110sss AAssssss BBssssss CCssssss / 21 bit scalar. */ + if word_len < 4 { + return word_len + } + scalar += uint32(word[3])&0x3F | uint32(word[2]&0x3F)<<6 | uint32(word[1]&0x3F)<<12 | uint32(word[0]&0x07)<<18 + word[0] = byte(0xF0 | (scalar>>18)&0x07) + word[1] = byte(uint32(word[1]&0xC0) | (scalar>>12)&0x3F) + word[2] = byte(uint32(word[2]&0xC0) | (scalar>>6)&0x3F) + word[3] = byte(uint32(word[3]&0xC0) | scalar&0x3F) + return 4 + } + + return 1 +} + +func transformDictionaryWord(dst []byte, word []byte, len int, trans *transforms, transform_idx int) int { + var idx int = 0 + var prefix []byte = transformPrefix(trans, transform_idx) + var type_ byte = transformType(trans, transform_idx) + var suffix []byte = transformSuffix(trans, transform_idx) + { + var prefix_len int = int(prefix[0]) + prefix = prefix[1:] + for { + tmp1 := prefix_len + prefix_len-- + if tmp1 == 0 { + break + } + dst[idx] = prefix[0] + idx++ + prefix = prefix[1:] + } + } + { + var t int = int(type_) + var i int = 0 + if t <= transformOmitLast9 { + len -= t + } else if t >= transformOmitFirst1 && t <= transformOmitFirst9 { + var skip int = t - (transformOmitFirst1 - 1) + word = word[skip:] + len -= skip + } + + for i < len { + dst[idx] = word[i] + idx++ + i++ + } + if t == transformUppercaseFirst { + toUpperCase(dst[idx-len:]) + } else if t == transformUppercaseAll { + var uppercase []byte = dst + uppercase = uppercase[idx-len:] + for len > 0 { + var step int = toUpperCase(uppercase) + uppercase = uppercase[step:] + len -= step + } + } else if t == transformShiftFirst { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + shiftTransform(dst[idx-len:], int(len), param) + } else if t == transformShiftAll { + var param uint16 = uint16(trans.params[transform_idx*2]) + uint16(trans.params[transform_idx*2+1])<<8 + var shift []byte = dst + shift = shift[idx-len:] + for len > 0 { + var step int = shiftTransform(shift, int(len), param) + shift = shift[step:] + len -= step + } + } + } + { + var suffix_len int = int(suffix[0]) + suffix = suffix[1:] + for { + tmp2 := suffix_len + suffix_len-- + if tmp2 == 0 { + break + } + dst[idx] = suffix[0] + idx++ + suffix = suffix[1:] + } + return idx + } +} diff --git a/vendor/github.com/andybalholm/brotli/utf8_util.go b/vendor/github.com/andybalholm/brotli/utf8_util.go new file mode 100644 index 00000000000..3244247eecc --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/utf8_util.go @@ -0,0 +1,70 @@ +package brotli + +/* Copyright 2013 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Heuristics for deciding about the UTF8-ness of strings. */ + +const kMinUTF8Ratio float64 = 0.75 + +/* Returns 1 if at least min_fraction of the bytes between pos and + pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise + returns 0. */ +func parseAsUTF8(symbol *int, input []byte, size uint) uint { + /* ASCII */ + if input[0]&0x80 == 0 { + *symbol = int(input[0]) + if *symbol > 0 { + return 1 + } + } + + /* 2-byte UTF8 */ + if size > 1 && input[0]&0xE0 == 0xC0 && input[1]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x1F)<<6 | int(input[1])&0x3F + if *symbol > 0x7F { + return 2 + } + } + + /* 3-byte UFT8 */ + if size > 2 && input[0]&0xF0 == 0xE0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x0F)<<12 | (int(input[1])&0x3F)<<6 | int(input[2])&0x3F + if *symbol > 0x7FF { + return 3 + } + } + + /* 4-byte UFT8 */ + if size > 3 && input[0]&0xF8 == 0xF0 && input[1]&0xC0 == 0x80 && input[2]&0xC0 == 0x80 && input[3]&0xC0 == 0x80 { + *symbol = (int(input[0])&0x07)<<18 | (int(input[1])&0x3F)<<12 | (int(input[2])&0x3F)<<6 | int(input[3])&0x3F + if *symbol > 0xFFFF && *symbol <= 0x10FFFF { + return 4 + } + } + + /* Not UTF8, emit a special symbol above the UTF8-code space */ + *symbol = 0x110000 | int(input[0]) + + return 1 +} + +/* Returns 1 if at least min_fraction of the data is UTF8-encoded.*/ +func isMostlyUTF8(data []byte, pos uint, mask uint, length uint, min_fraction float64) bool { + var size_utf8 uint = 0 + var i uint = 0 + for i < length { + var symbol int + current_data := data[(pos+i)&mask:] + var bytes_read uint = parseAsUTF8(&symbol, current_data, length-i) + i += bytes_read + if symbol < 0x110000 { + size_utf8 += bytes_read + } + } + + return float64(size_utf8) > min_fraction*float64(length) +} diff --git a/vendor/github.com/andybalholm/brotli/util.go b/vendor/github.com/andybalholm/brotli/util.go new file mode 100644 index 00000000000..a84553a6396 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/util.go @@ -0,0 +1,7 @@ +package brotli + +func assert(cond bool) { + if !cond { + panic("assertion failure") + } +} diff --git a/vendor/github.com/andybalholm/brotli/write_bits.go b/vendor/github.com/andybalholm/brotli/write_bits.go new file mode 100644 index 00000000000..87299011985 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/write_bits.go @@ -0,0 +1,52 @@ +package brotli + +import "encoding/binary" + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +/* This function writes bits into bytes in increasing addresses, and within + a byte least-significant-bit first. + + The function can write up to 56 bits in one go with WriteBits + Example: let's assume that 3 bits (Rs below) have been written already: + + BYTE-0 BYTE+1 BYTE+2 + + 0000 0RRR 0000 0000 0000 0000 + + Now, we could write 5 or less bits in MSB by just sifting by 3 + and OR'ing to BYTE-0. + + For n bits, we take the last 5 bits, OR that with high bits in BYTE-0, + and locate the rest in BYTE+1, BYTE+2, etc. */ +func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) { + /* This branch of the code can write up to 56 bits at a time, + 7 bits are lost by being perhaps already in *p and at least + 1 bit is needed to initialize the bit-stream ahead (i.e. if 7 + bits are in *p and we write 57 bits, then the next write will + access a byte that was never initialized). */ + p := array[*pos>>3:] + v := uint64(p[0]) + v |= bits << (*pos & 7) + binary.LittleEndian.PutUint64(p, v) + *pos += n_bits +} + +func writeSingleBit(bit bool, pos *uint, array []byte) { + if bit { + writeBits(1, 1, pos, array) + } else { + writeBits(1, 0, pos, array) + } +} + +func writeBitsPrepareStorage(pos uint, array []byte) { + assert(pos&7 == 0) + array[pos>>3] = 0 +} diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go new file mode 100644 index 00000000000..8a688117d13 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -0,0 +1,162 @@ +package brotli + +import ( + "errors" + "io" + + "github.com/andybalholm/brotli/matchfinder" +) + +const ( + BestSpeed = 0 + BestCompression = 11 + DefaultCompression = 6 +) + +// WriterOptions configures Writer. +type WriterOptions struct { + // Quality controls the compression-speed vs compression-density trade-offs. + // The higher the quality, the slower the compression. Range is 0 to 11. + Quality int + // LGWin is the base 2 logarithm of the sliding window size. + // Range is 10 to 24. 0 indicates automatic configuration based on Quality. + LGWin int +} + +var ( + errEncode = errors.New("brotli: encode error") + errWriterClosed = errors.New("brotli: Writer is closed") +) + +// Writes to the returned writer are compressed and written to dst. +// It is the caller's responsibility to call Close on the Writer when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(dst io.Writer) *Writer { + return NewWriterLevel(dst, DefaultCompression) +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// The compression level can be DefaultCompression or any integer value between +// BestSpeed and BestCompression inclusive. +func NewWriterLevel(dst io.Writer, level int) *Writer { + return NewWriterOptions(dst, WriterOptions{ + Quality: level, + }) +} + +// NewWriterOptions is like NewWriter but specifies WriterOptions +func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer { + w := new(Writer) + w.options = options + w.Reset(dst) + return w +} + +// Reset discards the Writer's state and makes it equivalent to the result of +// its original state from NewWriter or NewWriterLevel, but writing to dst +// instead. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(dst io.Writer) { + encoderInitState(w) + w.params.quality = w.options.Quality + if w.options.LGWin > 0 { + w.params.lgwin = uint(w.options.LGWin) + } + w.dst = dst + w.err = nil +} + +func (w *Writer) writeChunk(p []byte, op int) (n int, err error) { + if w.dst == nil { + return 0, errWriterClosed + } + if w.err != nil { + return 0, w.err + } + + for { + availableIn := uint(len(p)) + nextIn := p + success := encoderCompressStream(w, op, &availableIn, &nextIn) + bytesConsumed := len(p) - int(availableIn) + p = p[bytesConsumed:] + n += bytesConsumed + if !success { + return n, errEncode + } + + if len(p) == 0 || w.err != nil { + return n, w.err + } + } +} + +// Flush outputs encoded data for all input provided to Write. The resulting +// output can be decoded to match all input before Flush, but the stream is +// not yet complete until after Close. +// Flush has a negative impact on compression. +func (w *Writer) Flush() error { + _, err := w.writeChunk(nil, operationFlush) + return err +} + +// Close flushes remaining data to the decorated writer. +func (w *Writer) Close() error { + // If stream is already closed, it is reported by `writeChunk`. + _, err := w.writeChunk(nil, operationFinish) + w.dst = nil + return err +} + +// Write implements io.Writer. Flush or Close must be called to ensure that the +// encoded bytes are actually flushed to the underlying Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + return w.writeChunk(p, operationProcess) +} + +type nopCloser struct { + io.Writer +} + +func (nopCloser) Close() error { return nil } + +// NewWriterV2 is like NewWriterLevel, but it uses the new implementation +// based on the matchfinder package. It currently supports up to level 7; +// if a higher level is specified, level 7 will be used. +func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer { + var mf matchfinder.MatchFinder + if level < 2 { + mf = matchfinder.M0{Lazy: level == 1} + } else { + hashLen := 6 + if level >= 6 { + hashLen = 5 + } + chainLen := 64 + switch level { + case 2: + chainLen = 0 + case 3: + chainLen = 1 + case 4: + chainLen = 2 + case 5: + chainLen = 4 + case 6: + chainLen = 8 + } + mf = &matchfinder.M4{ + MaxDistance: 1 << 20, + ChainLength: chainLen, + HashLen: hashLen, + DistanceBitCost: 57, + } + } + + return &matchfinder.Writer{ + Dest: dst, + MatchFinder: mf, + Encoder: &Encoder{}, + BlockSize: 1 << 16, + } +} diff --git a/vendor/github.com/parquet-go/bitpack/.gitignore b/vendor/github.com/parquet-go/bitpack/.gitignore new file mode 100644 index 00000000000..b3584c8d4de --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.py + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Emacs +*~ +#*# +.# diff --git a/vendor/github.com/parquet-go/bitpack/LICENSE b/vendor/github.com/parquet-go/bitpack/LICENSE new file mode 100644 index 00000000000..c3e15a69d15 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Achille Roussel, Filip Petkovski + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/parquet-go/bitpack/README.md b/vendor/github.com/parquet-go/bitpack/README.md new file mode 100644 index 00000000000..1f02d8f3bcb --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/README.md @@ -0,0 +1,31 @@ +# bitpack + +[![Go Reference](https://pkg.go.dev/badge/github.com/parquet-go/bitpack.svg)](https://pkg.go.dev/github.com/parquet-go/bitpack) + +A high-performance Go library for bit packing and unpacking integers of various bit widths. Part of +the [parquet-go](https://github.com/parquet-go/parquet-go) ecosystem. + +Includes AMD64 assembly optimizations with pure Go fallback for portability. + +```bash +go get github.com/parquet-go/bitpack +``` + +## Usage + +```go +import "github.com/parquet-go/bitpack" + +// Pack int32 values with 3-bit width +values := []int32{1, 2, 3, 4, 5} +bitWidth := uint(3) +packedSize := bitpack.ByteCount(uint(len(values)) * bitWidth) +dst := make([]byte, packedSize+bitpack.PaddingInt32) +bitpack.PackInt32(dst, values, bitWidth) + +// Unpack int32 values +unpacked := make([]int32, len(values)) +bitpack.UnpackInt32(unpacked, dst, bitWidth) +``` + +For complete working examples, see the [examples](./examples) directory. diff --git a/vendor/github.com/parquet-go/bitpack/bitpack.go b/vendor/github.com/parquet-go/bitpack/bitpack.go new file mode 100644 index 00000000000..4a55472515c --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/bitpack.go @@ -0,0 +1,14 @@ +// Package bitpack implements efficient bit packing and unpacking routines for +// integers of various bit widths. +package bitpack + +// Int is a type constraint representing the integer types that this package +// supports. +type Int interface { + ~int32 | ~uint32 | ~int64 | ~uint64 | ~int | ~uintptr +} + +// ByteCount returns the number of bytes needed to hold the given bit count. +func ByteCount(bitCount uint) int { + return int((bitCount + 7) / 8) +} diff --git a/vendor/github.com/parquet-go/bitpack/masks_int32_amd64.s b/vendor/github.com/parquet-go/bitpack/masks_int32_amd64.s new file mode 100644 index 00000000000..6ffe4e2c535 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/masks_int32_amd64.s @@ -0,0 +1,1288 @@ +//go:build !purego + +#include "textflag.h" + +// ----------------------------------------------------------------------------- +// Shuffle masks used to broadcast bytes of bit-packed valued into vector +// registers at positions where they can then be shifted into the right +// locations. +// ----------------------------------------------------------------------------- + +// Shuffle masks for unpacking values from bit widths 1 to 16. +// +// The masks are grouped in 32 bytes chunks containing 2 masks of 16 bytes, with +// the following layout: +// +// - The first mask is used to shuffle values from the 16 bytes of input into +// the lower 16 bytes of output. These values are then shifted RIGHT to be +// aligned on the begining of each 32 bit word. +// +// - The second mask selects values from the 16 bytes of input into the upper +// 16 bytes of output. These values are then shifted RIGHT to be aligned on +// the beginning of each 32 bit word. +// +// The bit width is intended to be used as an index into this array, using this +// formula to convert from the index to a byte offset: +// +// offset = 32 * (bitWidth - 1) +// +GLOBL ·shuffleInt32x1to16bits(SB), RODATA|NOPTR, $512 + +// 1 bit => 32 bits +// ----------------- +// 0: [a,b,c,d,e,f,g,h] +// ... +DATA ·shuffleInt32x1to16bits+0+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+4(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+8(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+12(SB)/4, $0x80808000 + +DATA ·shuffleInt32x1to16bits+0+16(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+20(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+24(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+0+28(SB)/4, $0x80808000 + +// 2 bits => 32 bits +// ----------------- +// 0: [a,a,b,b,c,c,d,d] +// 1: [e,e,f,f,g,g,h,h] +// ... +DATA ·shuffleInt32x1to16bits+32+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+32+4(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+32+8(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+32+12(SB)/4, $0x80808000 + +DATA ·shuffleInt32x1to16bits+32+16(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+32+20(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+32+24(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+32+28(SB)/4, $0x80808001 + +// 3 bits => 32 bits +// ----------------- +// 0: [a,a,a,b,b,b,c,c] +// 1: [c,d,d,d,e,e,e,f] +// 2: [f,f,g,g,g,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+64+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+64+4(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+64+8(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+64+12(SB)/4, $0x80808001 + +DATA ·shuffleInt32x1to16bits+64+16(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+64+20(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+64+24(SB)/4, $0x80808002 +DATA ·shuffleInt32x1to16bits+64+28(SB)/4, $0x80808002 + +// 4 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,b,b,b,b] +// 1: [c,c,c,c,d,d,d,d] +// 2: [e,e,e,e,f,f,f,f] +// 3: [g,g,g,g,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+96+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+96+4(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+96+8(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+96+12(SB)/4, $0x80808001 + +DATA ·shuffleInt32x1to16bits+96+16(SB)/4, $0x80808002 +DATA ·shuffleInt32x1to16bits+96+20(SB)/4, $0x80808002 +DATA ·shuffleInt32x1to16bits+96+24(SB)/4, $0x80808003 +DATA ·shuffleInt32x1to16bits+96+28(SB)/4, $0x80808003 + +// 5 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,a,b,b,b] +// 1: [b,b,c,c,c,c,c,d] +// 2: [d,d,d,d,e,e,e,e] +// 3: [e,f,f,f,f,f,g,g] +// 4: [g,g,g,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+128+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+128+4(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+128+8(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+128+12(SB)/4, $0x80800201 + +DATA ·shuffleInt32x1to16bits+128+16(SB)/4, $0x80800302 +DATA ·shuffleInt32x1to16bits+128+20(SB)/4, $0x80808003 +DATA ·shuffleInt32x1to16bits+128+24(SB)/4, $0x80800403 +DATA ·shuffleInt32x1to16bits+128+28(SB)/4, $0x80808004 + +// 6 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,a,a,b,b] +// 1: [b,b,b,b,c,c,c,c] +// 2: [c,c,d,d,d,d,d,d] +// 3: [e,e,e,e,e,e,f,f] +// 4: [f,f,f,f,g,g,g,g] +// 5: [g,g,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+160+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+160+4(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+160+8(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+160+12(SB)/4, $0x80808002 + +DATA ·shuffleInt32x1to16bits+160+16(SB)/4, $0x80808003 +DATA ·shuffleInt32x1to16bits+160+20(SB)/4, $0x80800403 +DATA ·shuffleInt32x1to16bits+160+24(SB)/4, $0x80800504 +DATA ·shuffleInt32x1to16bits+160+28(SB)/4, $0x80808005 + +// 7 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,a,a,a,b] +// 1: [b,b,b,b,b,b,c,c] +// 2: [c,c,c,c,c,d,d,d] +// 3: [d,d,d,d,e,e,e,e] +// 4: [e,e,e,f,f,f,f,f] +// 5: [f,f,g,g,g,g,g,g] +// 6: [g,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+192+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+192+4(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+192+8(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+192+12(SB)/4, $0x80800302 + +DATA ·shuffleInt32x1to16bits+192+16(SB)/4, $0x80800403 +DATA ·shuffleInt32x1to16bits+192+20(SB)/4, $0x80800504 +DATA ·shuffleInt32x1to16bits+192+24(SB)/4, $0x80800605 +DATA ·shuffleInt32x1to16bits+192+28(SB)/4, $0x80808006 + +// 8 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,a,a,a,a] +// 1: [b,b,b,b,b,b,b,b] +// 2: [c,c,c,c,c,c,c,c] +// 3: [d,d,d,d,d,d,d,d] +// 4: [e,e,e,e,e,e,e,e] +// 5: [f,f,f,f,f,f,f,f] +// 6: [g,g,g,g,g,g,g,g] +// 7: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+224+0(SB)/4, $0x80808000 +DATA ·shuffleInt32x1to16bits+224+4(SB)/4, $0x80808001 +DATA ·shuffleInt32x1to16bits+224+8(SB)/4, $0x80808002 +DATA ·shuffleInt32x1to16bits+224+12(SB)/4, $0x80808003 + +DATA ·shuffleInt32x1to16bits+224+16(SB)/4, $0x80808004 +DATA ·shuffleInt32x1to16bits+224+20(SB)/4, $0x80808005 +DATA ·shuffleInt32x1to16bits+224+24(SB)/4, $0x80808006 +DATA ·shuffleInt32x1to16bits+224+28(SB)/4, $0x80808007 + +// 9 bits => 32 bits +// ----------------- +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,b,b,b,b,b,b,b] +// 2: [b,b,c,c,c,c,c,c] +// 3: [c,c,c,d,d,d,d,d] +// 4: [d,d,d,d,e,e,e,e] +// 5: [e,e,e,e,e,f,f,f] +// 6: [f,f,f,f,f,f,g,g] +// 7: [g,g,g,g,g,g,g,h] +// 8: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+256+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+256+4(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+256+8(SB)/4, $0x80800302 +DATA ·shuffleInt32x1to16bits+256+12(SB)/4, $0x80800403 + +DATA ·shuffleInt32x1to16bits+256+16(SB)/4, $0x80800504 +DATA ·shuffleInt32x1to16bits+256+20(SB)/4, $0x80800605 +DATA ·shuffleInt32x1to16bits+256+24(SB)/4, $0x80800706 +DATA ·shuffleInt32x1to16bits+256+28(SB)/4, $0x80800807 + +// 10 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,b,b,b,b,b,b] +// 2: [b,b,b,b,c,c,c,c] +// 3: [c,c,c,c,c,c,d,d] +// 4: [d,d,d,d,d,d,d,d] +// 5: [e,e,e,e,e,e,e,e] +// 6: [e,e,f,f,f,f,f,f] +// 7: [f,f,f,f,g,g,g,g] +// 8: [g,g,g,g,g,g,h,h] +// 9: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+288+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+288+4(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+288+8(SB)/4, $0x80800302 +DATA ·shuffleInt32x1to16bits+288+12(SB)/4, $0x80800403 + +DATA ·shuffleInt32x1to16bits+288+16(SB)/4, $0x80800605 +DATA ·shuffleInt32x1to16bits+288+20(SB)/4, $0x80800706 +DATA ·shuffleInt32x1to16bits+288+24(SB)/4, $0x80800807 +DATA ·shuffleInt32x1to16bits+288+28(SB)/4, $0x80800908 + +// 11 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,b,b,b,b,b] +// 2: [b,b,b,b,b,b,c,c] +// 3: [c,c,c,c,c,c,c,c] +// 4: [c,d,d,d,d,d,d,d] +// 5: [d,d,d,d,e,e,e,e] +// 6: [e,e,e,e,e,e,e,f] +// 7: [f,f,f,f,f,f,f,f] +// 8: [f,f,g,g,g,g,g,g] +// 9: [g,g,g,g,g,h,h,h] +// A: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+320+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+320+4(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+320+8(SB)/4, $0x80040302 +DATA ·shuffleInt32x1to16bits+320+12(SB)/4, $0x80800504 + +DATA ·shuffleInt32x1to16bits+320+16(SB)/4, $0x80800605 +DATA ·shuffleInt32x1to16bits+320+20(SB)/4, $0x80080706 +DATA ·shuffleInt32x1to16bits+320+24(SB)/4, $0x80800908 +DATA ·shuffleInt32x1to16bits+320+28(SB)/4, $0x80800A09 + +// 12 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,b,b,b,b] +// 2: [b,b,b,b,b,b,b,b] +// 3: [c,c,c,c,c,c,c,c] +// 4: [c,c,c,c,d,d,d,d] +// 5: [d,d,d,d,d,d,d,d] +// 6: [e,e,e,e,e,e,e,e] +// 7: [e,e,e,e,f,f,f,f] +// 8: [f,f,f,f,f,f,f,f] +// 9: [g,g,g,g,g,g,g,g] +// A: [g,g,g,g,h,h,h,h] +// B: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+352+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+352+4(SB)/4, $0x80800201 +DATA ·shuffleInt32x1to16bits+352+8(SB)/4, $0x80080403 +DATA ·shuffleInt32x1to16bits+352+12(SB)/4, $0x80800504 + +DATA ·shuffleInt32x1to16bits+352+16(SB)/4, $0x80800706 +DATA ·shuffleInt32x1to16bits+352+20(SB)/4, $0x80800807 +DATA ·shuffleInt32x1to16bits+352+24(SB)/4, $0x80800A09 +DATA ·shuffleInt32x1to16bits+352+28(SB)/4, $0x80800B0A + +// 13 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,b,b,b] +// 2: [b,b,b,b,b,b,b,b] +// 3: [b,b,c,c,c,c,c,c] +// 4: [c,c,c,c,c,c,c,d] +// 5: [d,d,d,d,d,d,d,d] +// 6: [d,d,d,d,e,e,e,e] +// 7: [e,e,e,e,e,e,e,e] +// 8: [e,f,f,f,f,f,f,f] +// 9: [f,f,f,f,f,f,g,g] +// A: [g,g,g,g,g,g,g,g] +// B: [g,g,g,h,h,h,h,h] +// C: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+384+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+384+4(SB)/4, $0x80030201 +DATA ·shuffleInt32x1to16bits+384+8(SB)/4, $0x80800403 +DATA ·shuffleInt32x1to16bits+384+12(SB)/4, $0x80060504 + +DATA ·shuffleInt32x1to16bits+384+16(SB)/4, $0x80080706 +DATA ·shuffleInt32x1to16bits+384+20(SB)/4, $0x80800908 +DATA ·shuffleInt32x1to16bits+384+24(SB)/4, $0x800B0A09 +DATA ·shuffleInt32x1to16bits+384+28(SB)/4, $0x80800C0B + +// 14 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,b,b] +// 2: [b,b,b,b,b,b,b,b] +// 3: [b,b,b,b,c,c,c,c] +// 4: [c,c,c,c,c,c,c,c] +// 5: [c,c,d,d,d,d,d,d] +// 6: [d,d,d,d,d,d,d,d] +// 7: [e,e,e,e,e,e,e,e] +// 8: [e,e,e,e,e,e,f,f] +// 9: [f,f,f,f,f,f,f,f] +// A: [f,f,f,f,g,g,g,g] +// B: [g,g,g,g,g,g,g,g] +// C: [g,g,h,h,h,h,h,h] +// D: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+416+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+416+4(SB)/4, $0x80030201 +DATA ·shuffleInt32x1to16bits+416+8(SB)/4, $0x80050403 +DATA ·shuffleInt32x1to16bits+416+12(SB)/4, $0x80800605 + +DATA ·shuffleInt32x1to16bits+416+16(SB)/4, $0x80080807 +DATA ·shuffleInt32x1to16bits+416+20(SB)/4, $0x800A0908 +DATA ·shuffleInt32x1to16bits+416+24(SB)/4, $0x800C0B0A +DATA ·shuffleInt32x1to16bits+416+28(SB)/4, $0x80800D0C + +// 15 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,b] +// 2: [b,b,b,b,b,b,b,b] +// 3: [b,b,b,b,b,b,c,c] +// 4: [c,c,c,c,c,c,c,c] +// 5: [c,c,c,c,c,d,d,d] +// 6: [d,d,d,d,d,d,d,d] +// 7: [d,d,d,d,e,e,e,e] +// 8: [e,e,e,e,e,e,e,e] +// 9: [e,e,e,f,f,f,f,f] +// A: [f,f,f,f,f,f,f,f] +// B: [f,f,g,g,g,g,g,g] +// C: [g,g,g,g,g,g,g,g] +// D: [g,h,h,h,h,h,h,h] +// E: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+448+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+448+4(SB)/4, $0x80030201 +DATA ·shuffleInt32x1to16bits+448+8(SB)/4, $0x80050403 +DATA ·shuffleInt32x1to16bits+448+12(SB)/4, $0x80070605 + +DATA ·shuffleInt32x1to16bits+448+16(SB)/4, $0x80090807 +DATA ·shuffleInt32x1to16bits+448+20(SB)/4, $0x800B0A09 +DATA ·shuffleInt32x1to16bits+448+24(SB)/4, $0x800D0C0B +DATA ·shuffleInt32x1to16bits+448+28(SB)/4, $0x80800E0D + +// 16 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [b,b,b,b,b,b,b,b] +// 3: [b,b,b,b,b,b,c,b] +// 4: [c,c,c,c,c,c,c,c] +// 5: [c,c,c,c,c,c,c,c] +// 6: [d,d,d,d,d,d,d,d] +// 7: [d,d,d,d,d,d,d,d] +// 8: [e,e,e,e,e,e,e,e] +// 9: [e,e,e,e,e,e,e,e] +// A: [f,f,f,f,f,f,f,f] +// B: [f,f,f,f,f,f,f,f] +// C: [g,g,g,g,g,g,g,g] +// D: [g,g,g,g,g,g,g,g] +// E: [h,h,h,h,h,h,h,h] +// F: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x1to16bits+480+0(SB)/4, $0x80800100 +DATA ·shuffleInt32x1to16bits+480+4(SB)/4, $0x80800302 +DATA ·shuffleInt32x1to16bits+480+8(SB)/4, $0x80800504 +DATA ·shuffleInt32x1to16bits+480+12(SB)/4, $0x80800706 + +DATA ·shuffleInt32x1to16bits+480+16(SB)/4, $0x80800908 +DATA ·shuffleInt32x1to16bits+480+20(SB)/4, $0x80800B0A +DATA ·shuffleInt32x1to16bits+480+24(SB)/4, $0x80800D0C +DATA ·shuffleInt32x1to16bits+480+28(SB)/4, $0x80800F0E + +// Shuffle masks for unpacking values from bit widths 17 to 26. +// +// The masks are grouped in 48 bytes chunks containing 3 masks of 16 bytes, with +// the following layout: +// +// - The first mask is used to shuffle values from the first 16 bytes of input +// into the lower 16 bytes of output. These values are then shifted RIGHT to +// be aligned on the begining of each 32 bit word. +// +// - The second mask selects values from the first 16 bytes of input into the +// upper 16 bytes of output. These values are then shifted RIGHT to be aligned +// on the beginning of each 32 bit word. +// +// - The third mask selects values from the second 16 bytes of input into the +// upper 16 bytes of output. These values are then shifted RIGHT to be aligned +// on the beginning of each 32 bit word. +// +// The bit width is intended to be used as an index into this array, using this +// formula to convert from the index to a byte offset: +// +// offset = 48 * (bitWidth - 17) +// +GLOBL ·shuffleInt32x17to26bits(SB), RODATA|NOPTR, $480 + +// 17 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,b,b,b,b,b,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,c,c,c,c,c,c] +// 5: [c,c,c,c,c,c,c,c] +// 6: [c,c,c,d,d,d,d,d] +// 7: [d,d,d,d,d,d,d,d] +// 8: [d,d,d,d,e,e,e,e] +// 9: [e,e,e,e,e,e,e,e] +// A: [e,e,e,e,e,f,f,f] +// B: [f,f,f,f,f,f,f,f] +// C: [f,f,f,f,f,f,g,g] +// D: [g,g,g,g,g,g,g,g] +// E: [g,g,g,g,g,g,g,h] +// F: [h,h,h,h,h,h,h,h] +// --- +// 0: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+0+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+0+4(SB)/4, $0x80040302 +DATA ·shuffleInt32x17to26bits+0+8(SB)/4, $0x80060504 +DATA ·shuffleInt32x17to26bits+0+12(SB)/4, $0x80080706 + +DATA ·shuffleInt32x17to26bits+0+16(SB)/4, $0x800A0908 +DATA ·shuffleInt32x17to26bits+0+20(SB)/4, $0x800C0B0A +DATA ·shuffleInt32x17to26bits+0+24(SB)/4, $0x800E0D0C +DATA ·shuffleInt32x17to26bits+0+28(SB)/4, $0x80800F0E + +DATA ·shuffleInt32x17to26bits+0+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+0+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+0+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+0+44(SB)/4, $0x80008080 + +// 18 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,b,b,b,b,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,c,c,c,c] +// 5: [c,c,c,c,c,c,c,c] +// 6: [c,c,c,c,c,c,d,d] +// 7: [d,d,d,d,d,d,d,d] +// 8: [d,d,d,d,d,d,d,d] +// 9: [e,e,e,e,e,e,e,e] +// A: [e,e,e,e,e,e,e,e] +// B: [e,e,f,f,f,f,f,f] +// C: [f,f,f,f,f,f,f,f] +// D: [f,f,f,f,g,g,g,g] +// E: [g,g,g,g,g,g,g,g] +// F: [g,g,g,g,g,g,h,h] +// --- +// 0: [h,h,h,h,h,h,h,h] +// 1: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+48+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+48+4(SB)/4, $0x80040302 +DATA ·shuffleInt32x17to26bits+48+8(SB)/4, $0x80060504 +DATA ·shuffleInt32x17to26bits+48+12(SB)/4, $0x80080706 + +DATA ·shuffleInt32x17to26bits+48+16(SB)/4, $0x800B0A09 +DATA ·shuffleInt32x17to26bits+48+20(SB)/4, $0x800D0C0B +DATA ·shuffleInt32x17to26bits+48+24(SB)/4, $0x800F0E0D +DATA ·shuffleInt32x17to26bits+48+28(SB)/4, $0x8080800F + +DATA ·shuffleInt32x17to26bits+48+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+48+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+48+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+48+44(SB)/4, $0x80010080 + +// 19 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,b,b,b,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,c,c] +// 5: [c,c,c,c,c,c,c,c] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,d,d,d,d,d,d,d] +// 8: [d,d,d,d,d,d,d,d] +// 9: [d,d,d,d,e,e,e,e] +// A: [e,e,e,e,e,e,e,e] +// B: [e,e,e,e,e,e,e,f] +// C: [f,f,f,f,f,f,f,f] +// D: [f,f,f,f,f,f,f,f] +// E: [f,f,g,g,g,g,g,g] +// F: [g,g,g,g,g,g,g,g] +// --- +// 0: [g,g,g,g,g,h,h,h] +// 1: [h,h,h,h,h,h,h,h] +// 2: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+96+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+96+4(SB)/4, $0x80040302 +DATA ·shuffleInt32x17to26bits+96+8(SB)/4, $0x07060504 +DATA ·shuffleInt32x17to26bits+96+12(SB)/4, $0x80090807 + +DATA ·shuffleInt32x17to26bits+96+16(SB)/4, $0x800B0A09 +DATA ·shuffleInt32x17to26bits+96+20(SB)/4, $0x0E0D0C0B +DATA ·shuffleInt32x17to26bits+96+24(SB)/4, $0x80800F0E +DATA ·shuffleInt32x17to26bits+96+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+96+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+96+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+96+40(SB)/4, $0x80008080 +DATA ·shuffleInt32x17to26bits+96+44(SB)/4, $0x80020100 + +// 20 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,b,b,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [c,c,c,c,c,c,c,c] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,c,c,c,d,d,d,d] +// 8: [d,d,d,d,d,d,d,d] +// 9: [d,d,d,d,d,d,d,d] +// A: [e,e,e,e,e,e,e,e] +// B: [e,e,e,e,e,e,e,e] +// C: [e,e,e,e,f,f,f,f] +// D: [f,f,f,f,f,f,f,f] +// E: [f,f,f,f,f,f,f,f] +// F: [g,g,g,g,g,g,g,g] +// --- +// 0: [g,g,g,g,g,g,g,g] +// 1: [g,g,g,g,h,h,h,h] +// 2: [h,h,h,h,h,h,h,h] +// 3: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+144+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+144+4(SB)/4, $0x80040302 +DATA ·shuffleInt32x17to26bits+144+8(SB)/4, $0x80070605 +DATA ·shuffleInt32x17to26bits+144+12(SB)/4, $0x80090807 + +DATA ·shuffleInt32x17to26bits+144+16(SB)/4, $0x800C0B0A +DATA ·shuffleInt32x17to26bits+144+20(SB)/4, $0x800E0D0C +DATA ·shuffleInt32x17to26bits+144+24(SB)/4, $0x8080800F +DATA ·shuffleInt32x17to26bits+144+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+144+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+144+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+144+40(SB)/4, $0x80010080 +DATA ·shuffleInt32x17to26bits+144+44(SB)/4, $0x80030201 + +// 21 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,b,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,c,c,c,c,c,c] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,c,c,c,c,c,c,d] +// 8: [d,d,d,d,d,d,d,d] +// 9: [d,d,d,d,d,d,d,d] +// A: [d,d,d,d,e,e,e,e] +// B: [e,e,e,e,e,e,e,e] +// C: [e,e,e,e,e,e,e,e] +// D: [e,f,f,f,f,f,f,f] +// E: [f,f,f,f,f,f,f,f] +// F: [f,f,f,f,f,f,g,g] +// --- +// 0: [g,g,g,g,g,g,g,g] +// 1: [g,g,g,g,g,g,g,g] +// 2: [g,g,g,h,h,h,h,h] +// 3: [h,h,h,h,h,h,h,h] +// 4: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+192+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+192+4(SB)/4, $0x05040302 +DATA ·shuffleInt32x17to26bits+192+8(SB)/4, $0x80070605 +DATA ·shuffleInt32x17to26bits+192+12(SB)/4, $0x0A090807 + +DATA ·shuffleInt32x17to26bits+192+16(SB)/4, $0x0D0C0B0A +DATA ·shuffleInt32x17to26bits+192+20(SB)/4, $0x800F0E0D +DATA ·shuffleInt32x17to26bits+192+24(SB)/4, $0x8080800F +DATA ·shuffleInt32x17to26bits+192+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+192+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+192+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+192+40(SB)/4, $0x02010080 +DATA ·shuffleInt32x17to26bits+192+44(SB)/4, $0x80040302 + +// 22 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,b,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,c,c,c,c] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,d,d,d,d,d,d] +// 9: [d,d,d,d,d,d,d,d] +// A: [d,d,d,d,d,d,d,d] +// B: [e,e,e,e,e,e,e,e] +// C: [e,e,e,e,e,e,e,e] +// D: [e,e,e,e,e,e,f,f] +// E: [f,f,f,f,f,f,f,f] +// F: [f,f,f,f,f,f,f,f] +// --- +// 0: [f,f,f,f,g,g,g,g] +// 1: [g,g,g,g,g,g,g,g] +// 2: [g,g,g,g,g,g,g,g] +// 3: [g,g,h,h,h,h,h,h] +// 4: [h,h,h,h,h,h,h,h] +// 5: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+240+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+240+4(SB)/4, $0x05040302 +DATA ·shuffleInt32x17to26bits+240+8(SB)/4, $0x08070605 +DATA ·shuffleInt32x17to26bits+240+12(SB)/4, $0x800A0908 + +DATA ·shuffleInt32x17to26bits+240+16(SB)/4, $0x800D0C0B +DATA ·shuffleInt32x17to26bits+240+20(SB)/4, $0x800F0E0D +DATA ·shuffleInt32x17to26bits+240+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+240+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+240+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+240+36(SB)/4, $0x00808080 +DATA ·shuffleInt32x17to26bits+240+40(SB)/4, $0x03020100 +DATA ·shuffleInt32x17to26bits+240+44(SB)/4, $0x80050403 + +// 23 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,b] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,c,c] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,d,d,d] +// 9: [d,d,d,d,d,d,d,d] +// A: [d,d,d,d,d,d,d,d] +// B: [d,d,d,d,e,e,e,e] +// C: [e,e,e,e,e,e,e,e] +// D: [e,e,e,e,e,e,e,e] +// E: [e,e,e,f,f,f,f,f] +// F: [f,f,f,f,f,f,f,f] +// --- +// 0: [f,f,f,f,f,f,f,f] +// 1: [f,f,g,g,g,g,g,g] +// 2: [g,g,g,g,g,g,g,g] +// 3: [g,g,g,g,g,g,g,g] +// 4: [g,h,h,h,h,h,h,h] +// 5: [h,h,h,h,h,h,h,h] +// 6: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+288+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+288+4(SB)/4, $0x05040302 +DATA ·shuffleInt32x17to26bits+288+8(SB)/4, $0x08070605 +DATA ·shuffleInt32x17to26bits+288+12(SB)/4, $0x0B0A0908 + +DATA ·shuffleInt32x17to26bits+288+16(SB)/4, $0x0E0D0C0B +DATA ·shuffleInt32x17to26bits+288+20(SB)/4, $0x80800F0E +DATA ·shuffleInt32x17to26bits+288+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+288+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+288+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+288+36(SB)/4, $0x01008080 +DATA ·shuffleInt32x17to26bits+288+40(SB)/4, $0x04030201 +DATA ·shuffleInt32x17to26bits+288+44(SB)/4, $0x80060504 + +// 24 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [b,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [c,c,c,c,c,c,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [d,d,d,d,d,d,d,d] +// A: [d,d,d,d,d,d,d,d] +// B: [d,d,d,d,d,d,d,d] +// C: [e,e,e,e,e,e,e,e] +// D: [e,e,e,e,e,e,e,e] +// E: [e,e,e,e,e,e,e,e] +// F: [f,f,f,f,f,f,f,f] +// --- +// 0: [f,f,f,f,f,f,f,f] +// 1: [f,f,f,f,f,f,f,f] +// 2: [g,g,g,g,g,g,g,g] +// 3: [g,g,g,g,g,g,g,g] +// 4: [g,g,g,g,g,g,g,g] +// 5: [h,h,h,h,h,h,h,h] +// 6: [h,h,h,h,h,h,h,h] +// 7: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+336+0(SB)/4, $0x80020100 +DATA ·shuffleInt32x17to26bits+336+4(SB)/4, $0x80050403 +DATA ·shuffleInt32x17to26bits+336+8(SB)/4, $0x80080706 +DATA ·shuffleInt32x17to26bits+336+12(SB)/4, $0x800B0A09 + +DATA ·shuffleInt32x17to26bits+336+16(SB)/4, $0x800E0D0C +DATA ·shuffleInt32x17to26bits+336+20(SB)/4, $0x8080800F +DATA ·shuffleInt32x17to26bits+336+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+336+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+336+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+336+36(SB)/4, $0x80010080 +DATA ·shuffleInt32x17to26bits+336+40(SB)/4, $0x80040302 +DATA ·shuffleInt32x17to26bits+336+44(SB)/4, $0x80070605 + +// 25 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,b,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,c,c,c,c,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,d,d,d,d,d] +// A: [d,d,d,d,d,d,d,d] +// B: [d,d,d,d,d,d,d,d] +// C: [d,d,d,d,e,e,e,e] +// D: [e,e,e,e,e,e,e,e] +// E: [e,e,e,e,e,e,e,e] +// F: [e,e,e,e,e,f,f,f] +// --- +// 0: [f,f,f,f,f,f,f,f] +// 1: [f,f,f,f,f,f,f,f] +// 2: [f,f,f,f,f,f,g,g] +// 3: [g,g,g,g,g,g,g,g] +// 4: [g,g,g,g,g,g,g,g] +// 5: [g,g,g,g,g,g,g,h] +// 6: [h,h,h,h,h,h,h,h] +// 7: [h,h,h,h,h,h,h,h] +// 8: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+384+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x17to26bits+384+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x17to26bits+384+8(SB)/4, $0x09080706 +DATA ·shuffleInt32x17to26bits+384+12(SB)/4, $0x0C0B0A09 + +DATA ·shuffleInt32x17to26bits+384+16(SB)/4, $0x0F0E0D0C +DATA ·shuffleInt32x17to26bits+384+20(SB)/4, $0x8080800F +DATA ·shuffleInt32x17to26bits+384+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+384+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+384+32(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+384+36(SB)/4, $0x02010080 +DATA ·shuffleInt32x17to26bits+384+40(SB)/4, $0x05040302 +DATA ·shuffleInt32x17to26bits+384+44(SB)/4, $0x08070605 + +// 26 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,b,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,c,c,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,d,d] +// A: [d,d,d,d,d,d,d,d] +// B: [d,d,d,d,d,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [e,e,e,e,e,e,e,e] +// E: [e,e,e,e,e,e,e,e] +// F: [e,e,e,e,e,e,e,e] +// --- +// 0: [e,e,f,f,f,f,f,f] +// 1: [f,f,f,f,f,f,f,f] +// 2: [f,f,f,f,f,f,f,f] +// 3: [f,f,f,f,g,g,g,g] +// 4: [g,g,g,g,g,g,g,g] +// 5: [g,g,g,g,g,g,g,g] +// 6: [g,g,g,g,g,g,h,h] +// 7: [h,h,h,h,h,h,h,h] +// 8: [h,h,h,h,h,h,h,h] +// 9: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x17to26bits+432+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x17to26bits+432+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x17to26bits+432+8(SB)/4, $0x09080706 +DATA ·shuffleInt32x17to26bits+432+12(SB)/4, $0x0C0B0A09 + +DATA ·shuffleInt32x17to26bits+432+16(SB)/4, $0x800F0E0D +DATA ·shuffleInt32x17to26bits+432+20(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+432+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x17to26bits+432+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x17to26bits+432+32(SB)/4, $0x00808080 +DATA ·shuffleInt32x17to26bits+432+36(SB)/4, $0x03020100 +DATA ·shuffleInt32x17to26bits+432+40(SB)/4, $0x06050403 +DATA ·shuffleInt32x17to26bits+432+44(SB)/4, $0x09080706 + +// Shuffle masks for unpacking values from bit widths 27 to 31. +// +// The masks are grouped in 80 bytes chunks containing 5 masks of 16 bytes, with +// the following layout: +// +// - The first mask is used to shuffle values from the first 16 bytes of input +// into the lower 16 bytes of output. These values are then shifted RIGHT to +// be aligned on the begining of each 32 bit word. +// +// - The second mask is used to shuffle upper bits of bit-packed values of the +// first 16 bytes of input that spanned across 5 bytes. These extra bits cannot +// be selected by the first mask (which can select at most 4 bytes per word). +// The extra bits are then shifted LEFT to be positioned at the end of the +// words, after the bits extracted by the first mask. +// +// - The third mask selects values from the first 16 bytes of input into the +// upper 16 bytes of output. These values are then shifted RIGHT to be aligned +// on the beginning of each 32 bit word. +// +// - The fourth mask selects values from the second 16 bytes of input into the +// upper 16 bytes of output. These values are then shifted RIGHT to be aligned +// on the beginning of each 32 bit word. +// +// - The fifth mask is used to shuffle upper bits of bit-packed values values of +// second 16 bytes of input that spanned across 5 bytes. These values are then +// shifted LEFT to be aligned on the beginning of each 32 bit word. +// +// The bit width is intended to be used as an index into this array, using this +// formula to convert from the index to a byte offset: +// +// offset = 80 * (bitWidth - 27) +// +GLOBL ·shuffleInt32x27to31bits(SB), RODATA|NOPTR, $400 + +// 27 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,a,b,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,b,b,c,c] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,c,c] +// A: [c,d,d,d,d,d,d,d] +// B: [d,d,d,d,d,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [d,d,d,d,e,e,e,e] +// E: [e,e,e,e,e,e,e,e] +// F: [e,e,e,e,e,e,e,e] +// --- +// 0: [e,e,e,e,e,e,e,f] +// 1: [f,f,f,f,f,f,f,f] +// 2: [f,f,f,f,f,f,f,f] +// 3: [f,f,f,f,f,f,f,f] +// 4: [f,f,g,g,g,g,g,g] +// 5: [g,g,g,g,g,g,g,g] +// 6: [g,g,g,g,g,g,g,g] +// 7: [g,g,g,g,g,h,h,h] +// 8: [h,h,h,h,h,h,h,h] +// 9: [h,h,h,h,h,h,h,h] +// A: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x27to31bits+0+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+0+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+0+8(SB)/4, $0x09080706 +DATA ·shuffleInt32x27to31bits+0+12(SB)/4, $0x0D0C0B0A + +DATA ·shuffleInt32x27to31bits+0+16(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+20(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+24(SB)/4, $0x0A808080 +DATA ·shuffleInt32x27to31bits+0+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+0+32(SB)/4, $0x800F0E0D +DATA ·shuffleInt32x27to31bits+0+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+44(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+0+48(SB)/4, $0x00808080 +DATA ·shuffleInt32x27to31bits+0+52(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+0+56(SB)/4, $0x07060504 +DATA ·shuffleInt32x27to31bits+0+60(SB)/4, $0x0A090807 + +DATA ·shuffleInt32x27to31bits+0+64(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+68(SB)/4, $0x04808080 +DATA ·shuffleInt32x27to31bits+0+72(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+0+76(SB)/4, $0x80808080 + +// 28 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,a,a,b,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,b,b,b,b] +// 7: [c,c,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,c,c] +// A: [c,c,c,c,d,d,d,d] +// B: [d,d,d,d,d,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [d,d,d,d,d,d,d,d] +// E: [e,e,e,e,e,e,e,e] +// F: [e,e,e,e,e,e,e,e] +// --- +// 0: [e,e,e,e,e,e,e,e] +// 1: [e,e,e,e,f,f,f,f] +// 2: [f,f,f,f,f,f,f,f] +// 3: [f,f,f,f,f,f,f,f] +// 4: [f,f,f,f,f,f,f,f] +// 5: [g,g,g,g,g,g,g,g] +// 6: [g,g,g,g,g,g,g,g] +// 7: [g,g,g,g,g,g,g,g] +// 8: [g,g,g,g,h,h,h,h] +// 9: [h,h,h,h,h,h,h,h] +// A: [h,h,h,h,h,h,h,h] +// B: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x27to31bits+80+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+80+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+80+8(SB)/4, $0x0A090807 +DATA ·shuffleInt32x27to31bits+80+12(SB)/4, $0x0D0C0B0A + +DATA ·shuffleInt32x27to31bits+80+16(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+20(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+80+32(SB)/4, $0x80800F0E +DATA ·shuffleInt32x27to31bits+80+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+44(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+80+48(SB)/4, $0x01008080 +DATA ·shuffleInt32x27to31bits+80+52(SB)/4, $0x04030201 +DATA ·shuffleInt32x27to31bits+80+56(SB)/4, $0x08070605 +DATA ·shuffleInt32x27to31bits+80+60(SB)/4, $0x0B0A0908 + +DATA ·shuffleInt32x27to31bits+80+64(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+68(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+72(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+80+76(SB)/4, $0x80808080 + +// 29 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,a,a,a,b,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,b,b,b,b] +// 7: [b,b,c,c,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,c,c] +// A: [c,c,c,c,c,c,c,d] +// B: [d,d,d,d,d,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [d,d,d,d,d,d,d,d] +// E: [d,d,d,d,e,e,e,e] +// F: [e,e,e,e,e,e,e,e] +// --- +// 0: [e,e,e,e,e,e,e,e] +// 1: [e,e,e,e,e,e,e,e] +// 2: [e,f,f,f,f,f,f,f] +// 3: [f,f,f,f,f,f,f,f] +// 4: [f,f,f,f,f,f,f,f] +// 5: [f,f,f,f,f,f,g,g] +// 6: [g,g,g,g,g,g,g,g] +// 7: [g,g,g,g,g,g,g,g] +// 8: [g,g,g,g,g,g,g,g] +// 9: [g,g,g,h,h,h,h,h] +// A: [h,h,h,h,h,h,h,h] +// B: [h,h,h,h,h,h,h,h] +// C: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x27to31bits+160+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+160+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+160+8(SB)/4, $0x0A090807 +DATA ·shuffleInt32x27to31bits+160+12(SB)/4, $0x0D0C0B0A + +DATA ·shuffleInt32x27to31bits+160+16(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+160+20(SB)/4, $0x07808080 +DATA ·shuffleInt32x27to31bits+160+24(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+160+28(SB)/4, $0x0E808080 + +DATA ·shuffleInt32x27to31bits+160+32(SB)/4, $0x80800F0E +DATA ·shuffleInt32x27to31bits+160+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+160+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+160+44(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+160+48(SB)/4, $0x01008080 +DATA ·shuffleInt32x27to31bits+160+52(SB)/4, $0x05040302 +DATA ·shuffleInt32x27to31bits+160+56(SB)/4, $0x08070605 +DATA ·shuffleInt32x27to31bits+160+60(SB)/4, $0x0C0B0A09 + +DATA ·shuffleInt32x27to31bits+160+64(SB)/4, $0x02808080 +DATA ·shuffleInt32x27to31bits+160+68(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+160+72(SB)/4, $0x09808080 +DATA ·shuffleInt32x27to31bits+160+76(SB)/4, $0x80808080 + +// 30 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,a,a,a,a,b,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,b,b,b,b] +// 7: [b,b,b,b,c,c,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,c,c] +// A: [c,c,c,c,c,c,c,c] +// B: [c,c,d,d,d,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [d,d,d,d,d,d,d,d] +// E: [d,d,d,d,d,d,d,d] +// F: [e,e,e,e,e,e,e,e] +// --- +// 0: [e,e,e,e,e,e,e,e] +// 1: [e,e,e,e,e,e,e,e] +// 2: [e,e,e,e,e,e,f,f] +// 3: [f,f,f,f,f,f,f,f] +// 4: [f,f,f,f,f,f,f,f] +// 5: [f,f,f,f,f,f,f,f] +// 6: [f,f,f,f,g,g,g,g] +// 7: [g,g,g,g,g,g,g,g] +// 8: [g,g,g,g,g,g,g,g] +// 9: [g,g,g,g,g,g,g,g] +// A: [g,g,h,h,h,h,h,h] +// B: [h,h,h,h,h,h,h,h] +// C: [h,h,h,h,h,h,h,h] +// D: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x27to31bits+240+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+240+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+240+8(SB)/4, $0x0A090807 +DATA ·shuffleInt32x27to31bits+240+12(SB)/4, $0x0E0D0C0B + +DATA ·shuffleInt32x27to31bits+240+16(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+240+20(SB)/4, $0x07808080 +DATA ·shuffleInt32x27to31bits+240+24(SB)/4, $0x0B808080 +DATA ·shuffleInt32x27to31bits+240+28(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+240+32(SB)/4, $0x8080800F +DATA ·shuffleInt32x27to31bits+240+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+240+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+240+44(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+240+48(SB)/4, $0x02010080 +DATA ·shuffleInt32x27to31bits+240+52(SB)/4, $0x05040302 +DATA ·shuffleInt32x27to31bits+240+56(SB)/4, $0x09080706 +DATA ·shuffleInt32x27to31bits+240+60(SB)/4, $0x0D0C0B0A + +DATA ·shuffleInt32x27to31bits+240+64(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+240+68(SB)/4, $0x06808080 +DATA ·shuffleInt32x27to31bits+240+72(SB)/4, $0x0A808080 +DATA ·shuffleInt32x27to31bits+240+76(SB)/4, $0x80808080 + +// 31 bits => 32 bits +// ------------------ +// 0: [a,a,a,a,a,a,a,a] +// 1: [a,a,a,a,a,a,a,a] +// 2: [a,a,a,a,a,a,a,a] +// 3: [a,a,a,a,a,a,a,b] +// 4: [b,b,b,b,b,b,b,b] +// 5: [b,b,b,b,b,b,b,b] +// 6: [b,b,b,b,b,b,b,b] +// 7: [b,b,b,b,b,b,c,c] +// 8: [c,c,c,c,c,c,c,c] +// 9: [c,c,c,c,c,c,c,c] +// A: [c,c,c,c,c,c,c,c] +// B: [c,c,c,c,c,d,d,d] +// C: [d,d,d,d,d,d,d,d] +// D: [d,d,d,d,d,d,d,d] +// E: [d,d,d,d,d,d,d,d] +// F: [d,d,d,d,e,e,e,e] +// --- +// 0: [e,e,e,e,e,e,e,e] +// 1: [e,e,e,e,e,e,e,e] +// 2: [e,e,e,e,e,e,e,e] +// 3: [e,e,e,f,f,f,f,f] +// 4: [f,f,f,f,f,f,f,f] +// 5: [f,f,f,f,f,f,f,f] +// 6: [f,f,f,f,f,f,f,f] +// 7: [f,f,g,g,g,g,g,g] +// 8: [g,g,g,g,g,g,g,g] +// 9: [g,g,g,g,g,g,g,g] +// A: [g,g,g,g,g,g,g,g] +// B: [g,h,h,h,h,h,h,h] +// C: [h,h,h,h,h,h,h,h] +// D: [h,h,h,h,h,h,h,h] +// E: [h,h,h,h,h,h,h,h] +// ... +DATA ·shuffleInt32x27to31bits+320+0(SB)/4, $0x03020100 +DATA ·shuffleInt32x27to31bits+320+4(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+320+8(SB)/4, $0x0A090807 +DATA ·shuffleInt32x27to31bits+320+12(SB)/4, $0x0E0D0C0B + +DATA ·shuffleInt32x27to31bits+320+16(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+320+20(SB)/4, $0x07808080 +DATA ·shuffleInt32x27to31bits+320+24(SB)/4, $0x0B808080 +DATA ·shuffleInt32x27to31bits+320+28(SB)/4, $0x0F808080 + +DATA ·shuffleInt32x27to31bits+320+32(SB)/4, $0x8080800F +DATA ·shuffleInt32x27to31bits+320+36(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+320+40(SB)/4, $0x80808080 +DATA ·shuffleInt32x27to31bits+320+44(SB)/4, $0x80808080 + +DATA ·shuffleInt32x27to31bits+320+48(SB)/4, $0x02010080 +DATA ·shuffleInt32x27to31bits+320+52(SB)/4, $0x06050403 +DATA ·shuffleInt32x27to31bits+320+56(SB)/4, $0x0A090807 +DATA ·shuffleInt32x27to31bits+320+60(SB)/4, $0x0E0D0C0B + +DATA ·shuffleInt32x27to31bits+320+64(SB)/4, $0x03808080 +DATA ·shuffleInt32x27to31bits+320+68(SB)/4, $0x07808080 +DATA ·shuffleInt32x27to31bits+320+72(SB)/4, $0x0B808080 +DATA ·shuffleInt32x27to31bits+320+76(SB)/4, $0x80808080 + +// The RIGHT shifts to unpack 32 bits integers. +// +// The following formula was determined empirically as the expression which +// generates shift values: +// +// shift[i] = (i * bitWidth) % 8 +// +GLOBL ·shiftRightInt32(SB), RODATA|NOPTR, $256 + +DATA ·shiftRightInt32+0+0(SB)/4, $0 +DATA ·shiftRightInt32+0+4(SB)/4, $1 +DATA ·shiftRightInt32+0+8(SB)/4, $2 +DATA ·shiftRightInt32+0+12(SB)/4, $3 +DATA ·shiftRightInt32+0+16(SB)/4, $4 +DATA ·shiftRightInt32+0+20(SB)/4, $5 +DATA ·shiftRightInt32+0+24(SB)/4, $6 +DATA ·shiftRightInt32+0+28(SB)/4, $7 + +DATA ·shiftRightInt32+32+0(SB)/4, $0 +DATA ·shiftRightInt32+32+4(SB)/4, $2 +DATA ·shiftRightInt32+32+8(SB)/4, $4 +DATA ·shiftRightInt32+32+12(SB)/4, $6 +DATA ·shiftRightInt32+32+16(SB)/4, $0 +DATA ·shiftRightInt32+32+20(SB)/4, $2 +DATA ·shiftRightInt32+32+24(SB)/4, $4 +DATA ·shiftRightInt32+32+28(SB)/4, $6 + +DATA ·shiftRightInt32+64+0(SB)/4, $0 +DATA ·shiftRightInt32+64+4(SB)/4, $3 +DATA ·shiftRightInt32+64+8(SB)/4, $6 +DATA ·shiftRightInt32+64+12(SB)/4, $1 +DATA ·shiftRightInt32+64+16(SB)/4, $4 +DATA ·shiftRightInt32+64+20(SB)/4, $7 +DATA ·shiftRightInt32+64+24(SB)/4, $2 +DATA ·shiftRightInt32+64+28(SB)/4, $5 + +DATA ·shiftRightInt32+96+0(SB)/4, $0 +DATA ·shiftRightInt32+96+4(SB)/4, $4 +DATA ·shiftRightInt32+96+8(SB)/4, $0 +DATA ·shiftRightInt32+96+12(SB)/4, $4 +DATA ·shiftRightInt32+96+16(SB)/4, $0 +DATA ·shiftRightInt32+96+20(SB)/4, $4 +DATA ·shiftRightInt32+96+24(SB)/4, $0 +DATA ·shiftRightInt32+96+28(SB)/4, $4 + +DATA ·shiftRightInt32+128+0(SB)/4, $0 +DATA ·shiftRightInt32+128+4(SB)/4, $5 +DATA ·shiftRightInt32+128+8(SB)/4, $2 +DATA ·shiftRightInt32+128+12(SB)/4, $7 +DATA ·shiftRightInt32+128+16(SB)/4, $4 +DATA ·shiftRightInt32+128+20(SB)/4, $1 +DATA ·shiftRightInt32+128+24(SB)/4, $6 +DATA ·shiftRightInt32+128+28(SB)/4, $3 + +DATA ·shiftRightInt32+160+0(SB)/4, $0 +DATA ·shiftRightInt32+160+4(SB)/4, $6 +DATA ·shiftRightInt32+160+8(SB)/4, $4 +DATA ·shiftRightInt32+160+12(SB)/4, $2 +DATA ·shiftRightInt32+160+16(SB)/4, $0 +DATA ·shiftRightInt32+160+20(SB)/4, $6 +DATA ·shiftRightInt32+160+24(SB)/4, $4 +DATA ·shiftRightInt32+160+28(SB)/4, $2 + +DATA ·shiftRightInt32+192+0(SB)/4, $0 +DATA ·shiftRightInt32+192+4(SB)/4, $7 +DATA ·shiftRightInt32+192+8(SB)/4, $6 +DATA ·shiftRightInt32+192+12(SB)/4, $5 +DATA ·shiftRightInt32+192+16(SB)/4, $4 +DATA ·shiftRightInt32+192+20(SB)/4, $3 +DATA ·shiftRightInt32+192+24(SB)/4, $2 +DATA ·shiftRightInt32+192+28(SB)/4, $1 + +DATA ·shiftRightInt32+224+0(SB)/4, $0 +DATA ·shiftRightInt32+224+4(SB)/4, $0 +DATA ·shiftRightInt32+224+8(SB)/4, $0 +DATA ·shiftRightInt32+224+12(SB)/4, $0 +DATA ·shiftRightInt32+224+16(SB)/4, $0 +DATA ·shiftRightInt32+224+20(SB)/4, $0 +DATA ·shiftRightInt32+224+24(SB)/4, $0 +DATA ·shiftRightInt32+224+28(SB)/4, $0 + +// The LEFT shifts to unpack 32 bits integers. +// +// The following formula was determined empirically as the expression which +// generates shift values: +// +// shift[i] = (8 - (i * bitWidth)) % 8 +// +GLOBL ·shiftLeftInt32(SB), RODATA|NOPTR, $256 + +DATA ·shiftLeftInt32+0+0(SB)/4, $0 +DATA ·shiftLeftInt32+0+4(SB)/4, $7 +DATA ·shiftLeftInt32+0+8(SB)/4, $6 +DATA ·shiftLeftInt32+0+12(SB)/4, $5 +DATA ·shiftLeftInt32+0+16(SB)/4, $4 +DATA ·shiftLeftInt32+0+20(SB)/4, $3 +DATA ·shiftLeftInt32+0+24(SB)/4, $2 +DATA ·shiftLeftInt32+0+28(SB)/4, $1 + +DATA ·shiftLeftInt32+32+0(SB)/4, $0 +DATA ·shiftLeftInt32+32+4(SB)/4, $6 +DATA ·shiftLeftInt32+32+8(SB)/4, $4 +DATA ·shiftLeftInt32+32+12(SB)/4, $2 +DATA ·shiftLeftInt32+32+16(SB)/4, $0 +DATA ·shiftLeftInt32+32+20(SB)/4, $6 +DATA ·shiftLeftInt32+32+24(SB)/4, $4 +DATA ·shiftLeftInt32+32+28(SB)/4, $2 + +DATA ·shiftLeftInt32+64+0(SB)/4, $0 +DATA ·shiftLeftInt32+64+4(SB)/4, $5 +DATA ·shiftLeftInt32+64+8(SB)/4, $2 +DATA ·shiftLeftInt32+64+12(SB)/4, $7 +DATA ·shiftLeftInt32+64+16(SB)/4, $4 +DATA ·shiftLeftInt32+64+20(SB)/4, $1 +DATA ·shiftLeftInt32+64+24(SB)/4, $6 +DATA ·shiftLeftInt32+64+28(SB)/4, $3 + +DATA ·shiftLeftInt32+96+0(SB)/4, $0 +DATA ·shiftLeftInt32+96+4(SB)/4, $4 +DATA ·shiftLeftInt32+96+8(SB)/4, $0 +DATA ·shiftLeftInt32+96+12(SB)/4, $4 +DATA ·shiftLeftInt32+96+16(SB)/4, $0 +DATA ·shiftLeftInt32+96+20(SB)/4, $4 +DATA ·shiftLeftInt32+96+24(SB)/4, $0 +DATA ·shiftLeftInt32+96+28(SB)/4, $4 + +DATA ·shiftLeftInt32+128+0(SB)/4, $0 +DATA ·shiftLeftInt32+128+4(SB)/4, $3 +DATA ·shiftLeftInt32+128+8(SB)/4, $6 +DATA ·shiftLeftInt32+128+12(SB)/4, $1 +DATA ·shiftLeftInt32+128+16(SB)/4, $4 +DATA ·shiftLeftInt32+128+20(SB)/4, $7 +DATA ·shiftLeftInt32+128+24(SB)/4, $2 +DATA ·shiftLeftInt32+128+28(SB)/4, $5 + +DATA ·shiftLeftInt32+160+0(SB)/4, $0 +DATA ·shiftLeftInt32+160+4(SB)/4, $2 +DATA ·shiftLeftInt32+160+8(SB)/4, $4 +DATA ·shiftLeftInt32+160+12(SB)/4, $6 +DATA ·shiftLeftInt32+160+16(SB)/4, $0 +DATA ·shiftLeftInt32+160+20(SB)/4, $2 +DATA ·shiftLeftInt32+160+24(SB)/4, $4 +DATA ·shiftLeftInt32+160+28(SB)/4, $6 + +DATA ·shiftLeftInt32+192+0(SB)/4, $0 +DATA ·shiftLeftInt32+192+4(SB)/4, $1 +DATA ·shiftLeftInt32+192+8(SB)/4, $2 +DATA ·shiftLeftInt32+192+12(SB)/4, $3 +DATA ·shiftLeftInt32+192+16(SB)/4, $4 +DATA ·shiftLeftInt32+192+20(SB)/4, $5 +DATA ·shiftLeftInt32+192+24(SB)/4, $6 +DATA ·shiftLeftInt32+192+28(SB)/4, $7 + +DATA ·shiftLeftInt32+224+0(SB)/4, $0 +DATA ·shiftLeftInt32+224+4(SB)/4, $0 +DATA ·shiftLeftInt32+224+8(SB)/4, $0 +DATA ·shiftLeftInt32+224+12(SB)/4, $0 +DATA ·shiftLeftInt32+224+16(SB)/4, $0 +DATA ·shiftLeftInt32+224+20(SB)/4, $0 +DATA ·shiftLeftInt32+224+24(SB)/4, $0 +DATA ·shiftLeftInt32+224+28(SB)/4, $0 diff --git a/vendor/github.com/parquet-go/bitpack/masks_int64_amd64.s b/vendor/github.com/parquet-go/bitpack/masks_int64_amd64.s new file mode 100644 index 00000000000..39d64714cf4 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/masks_int64_amd64.s @@ -0,0 +1,427 @@ +//go:build !purego + +// Auto-generated by gen_int64_masks.go - DO NOT EDIT + +#include "textflag.h" + +// Pre-computed permutation and shift tables for int64 unpacking +// Each entry is 128 bytes: [permute_0_3][permute_4_7][shifts_0_3][shifts_4_7] +// Format: 4 YMM registers worth of data per bitWidth + +GLOBL ·permuteInt64Table(SB), RODATA|NOPTR, $2944 + +// BitWidth 9 (offset 0) +DATA ·permuteInt64Table+0(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+8(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+16(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+24(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+32(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+40(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+48(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+56(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+64(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+72(SB)/8, $0x0000000000000009 +DATA ·permuteInt64Table+80(SB)/8, $0x0000000000000012 +DATA ·permuteInt64Table+88(SB)/8, $0x000000000000001b +DATA ·permuteInt64Table+96(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+104(SB)/8, $0x000000000000000d +DATA ·permuteInt64Table+112(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+120(SB)/8, $0x000000000000001f + +// BitWidth 10 (offset 128) +DATA ·permuteInt64Table+128(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+136(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+144(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+152(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+160(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+168(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+176(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+184(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+192(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+200(SB)/8, $0x000000000000000a +DATA ·permuteInt64Table+208(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+216(SB)/8, $0x000000000000001e +DATA ·permuteInt64Table+224(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+232(SB)/8, $0x0000000000000012 +DATA ·permuteInt64Table+240(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+248(SB)/8, $0x0000000000000006 + +// BitWidth 11 (offset 256) +DATA ·permuteInt64Table+256(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+264(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+272(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+280(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+288(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+296(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+304(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+312(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+320(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+328(SB)/8, $0x000000000000000b +DATA ·permuteInt64Table+336(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+344(SB)/8, $0x0000000000000001 +DATA ·permuteInt64Table+352(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+360(SB)/8, $0x0000000000000017 +DATA ·permuteInt64Table+368(SB)/8, $0x0000000000000002 +DATA ·permuteInt64Table+376(SB)/8, $0x000000000000000d + +// BitWidth 12 (offset 384) +DATA ·permuteInt64Table+384(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+392(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+400(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+408(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+416(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+424(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+432(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+440(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+448(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+456(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+464(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+472(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+480(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+488(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+496(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+504(SB)/8, $0x0000000000000014 + +// BitWidth 13 (offset 512) +DATA ·permuteInt64Table+512(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+520(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+528(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+536(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+544(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+552(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+560(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+568(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+576(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+584(SB)/8, $0x000000000000000d +DATA ·permuteInt64Table+592(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+600(SB)/8, $0x0000000000000007 +DATA ·permuteInt64Table+608(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+616(SB)/8, $0x0000000000000001 +DATA ·permuteInt64Table+624(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+632(SB)/8, $0x000000000000001b + +// BitWidth 14 (offset 640) +DATA ·permuteInt64Table+640(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+648(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+656(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+664(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+672(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+680(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+688(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+696(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+704(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+712(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+720(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+728(SB)/8, $0x000000000000000a +DATA ·permuteInt64Table+736(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+744(SB)/8, $0x0000000000000006 +DATA ·permuteInt64Table+752(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+760(SB)/8, $0x0000000000000002 + +// BitWidth 15 (offset 768) +DATA ·permuteInt64Table+768(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+776(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+784(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+792(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+800(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+808(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+816(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+824(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+832(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+840(SB)/8, $0x000000000000000f +DATA ·permuteInt64Table+848(SB)/8, $0x000000000000001e +DATA ·permuteInt64Table+856(SB)/8, $0x000000000000000d +DATA ·permuteInt64Table+864(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+872(SB)/8, $0x000000000000000b +DATA ·permuteInt64Table+880(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+888(SB)/8, $0x0000000000000009 + +// BitWidth 16 (offset 896) +DATA ·permuteInt64Table+896(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+904(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+912(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+920(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+928(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+936(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+944(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+952(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+960(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+968(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+976(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+984(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+992(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1000(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+1008(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1016(SB)/8, $0x0000000000000010 + +// BitWidth 17 (offset 1024) +DATA ·permuteInt64Table+1024(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1032(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1040(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1048(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1056(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1064(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1072(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1080(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1088(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1096(SB)/8, $0x0000000000000011 +DATA ·permuteInt64Table+1104(SB)/8, $0x0000000000000002 +DATA ·permuteInt64Table+1112(SB)/8, $0x0000000000000013 +DATA ·permuteInt64Table+1120(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+1128(SB)/8, $0x0000000000000015 +DATA ·permuteInt64Table+1136(SB)/8, $0x0000000000000006 +DATA ·permuteInt64Table+1144(SB)/8, $0x0000000000000017 + +// BitWidth 18 (offset 1152) +DATA ·permuteInt64Table+1152(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1160(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1168(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1176(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1184(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1192(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1200(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1208(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1216(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1224(SB)/8, $0x0000000000000012 +DATA ·permuteInt64Table+1232(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+1240(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+1248(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+1256(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+1264(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+1272(SB)/8, $0x000000000000001e + +// BitWidth 19 (offset 1280) +DATA ·permuteInt64Table+1280(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1288(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1296(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1304(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1312(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1320(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1328(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1336(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1344(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1352(SB)/8, $0x0000000000000013 +DATA ·permuteInt64Table+1360(SB)/8, $0x0000000000000006 +DATA ·permuteInt64Table+1368(SB)/8, $0x0000000000000019 +DATA ·permuteInt64Table+1376(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+1384(SB)/8, $0x000000000000001f +DATA ·permuteInt64Table+1392(SB)/8, $0x0000000000000012 +DATA ·permuteInt64Table+1400(SB)/8, $0x0000000000000005 + +// BitWidth 20 (offset 1408) +DATA ·permuteInt64Table+1408(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1416(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1424(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1432(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1440(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1448(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1456(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1464(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1472(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1480(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+1488(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+1496(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+1504(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+1512(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+1520(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+1528(SB)/8, $0x000000000000000c + +// BitWidth 21 (offset 1536) +DATA ·permuteInt64Table+1536(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1544(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1552(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1560(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1568(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1576(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1584(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1592(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1600(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1608(SB)/8, $0x0000000000000015 +DATA ·permuteInt64Table+1616(SB)/8, $0x000000000000000a +DATA ·permuteInt64Table+1624(SB)/8, $0x000000000000001f +DATA ·permuteInt64Table+1632(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+1640(SB)/8, $0x0000000000000009 +DATA ·permuteInt64Table+1648(SB)/8, $0x000000000000001e +DATA ·permuteInt64Table+1656(SB)/8, $0x0000000000000013 + +// BitWidth 22 (offset 1664) +DATA ·permuteInt64Table+1664(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1672(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1680(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1688(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1696(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1704(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1712(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1720(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1728(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1736(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+1744(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+1752(SB)/8, $0x0000000000000002 +DATA ·permuteInt64Table+1760(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+1768(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+1776(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+1784(SB)/8, $0x000000000000001a + +// BitWidth 23 (offset 1792) +DATA ·permuteInt64Table+1792(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1800(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1808(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1816(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1824(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1832(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1840(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1848(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+1856(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1864(SB)/8, $0x0000000000000017 +DATA ·permuteInt64Table+1872(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+1880(SB)/8, $0x0000000000000005 +DATA ·permuteInt64Table+1888(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+1896(SB)/8, $0x0000000000000013 +DATA ·permuteInt64Table+1904(SB)/8, $0x000000000000000a +DATA ·permuteInt64Table+1912(SB)/8, $0x0000000000000001 + +// BitWidth 24 (offset 1920) +DATA ·permuteInt64Table+1920(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1928(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+1936(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+1944(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+1952(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1960(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+1968(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+1976(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+1984(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+1992(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+2000(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+2008(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+2016(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2024(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+2032(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+2040(SB)/8, $0x0000000000000008 + +// BitWidth 25 (offset 2048) +DATA ·permuteInt64Table+2048(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2056(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2064(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2072(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2080(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2088(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2096(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2104(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2112(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2120(SB)/8, $0x0000000000000019 +DATA ·permuteInt64Table+2128(SB)/8, $0x0000000000000012 +DATA ·permuteInt64Table+2136(SB)/8, $0x000000000000000b +DATA ·permuteInt64Table+2144(SB)/8, $0x0000000000000004 +DATA ·permuteInt64Table+2152(SB)/8, $0x000000000000001d +DATA ·permuteInt64Table+2160(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+2168(SB)/8, $0x000000000000000f + +// BitWidth 26 (offset 2176) +DATA ·permuteInt64Table+2176(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2184(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2192(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2200(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2208(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2216(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2224(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2232(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2240(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2248(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+2256(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+2264(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+2272(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+2280(SB)/8, $0x0000000000000002 +DATA ·permuteInt64Table+2288(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+2296(SB)/8, $0x0000000000000016 + +// BitWidth 27 (offset 2304) +DATA ·permuteInt64Table+2304(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2312(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2320(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2328(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2336(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2344(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2352(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2360(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2368(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2376(SB)/8, $0x000000000000001b +DATA ·permuteInt64Table+2384(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+2392(SB)/8, $0x0000000000000011 +DATA ·permuteInt64Table+2400(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+2408(SB)/8, $0x0000000000000007 +DATA ·permuteInt64Table+2416(SB)/8, $0x0000000000000002 +DATA ·permuteInt64Table+2424(SB)/8, $0x000000000000001d + +// BitWidth 28 (offset 2432) +DATA ·permuteInt64Table+2432(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2440(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2448(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2456(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2464(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2472(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2480(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2488(SB)/8, $0x0000000700000006 +DATA ·permuteInt64Table+2496(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2504(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+2512(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+2520(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+2528(SB)/8, $0x0000000000000010 +DATA ·permuteInt64Table+2536(SB)/8, $0x000000000000000c +DATA ·permuteInt64Table+2544(SB)/8, $0x0000000000000008 +DATA ·permuteInt64Table+2552(SB)/8, $0x0000000000000004 + +// BitWidth 29 (offset 2560) +DATA ·permuteInt64Table+2560(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2568(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2576(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2584(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2592(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2600(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2608(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2616(SB)/8, $0x0000000700000006 +DATA ·permuteInt64Table+2624(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2632(SB)/8, $0x000000000000001d +DATA ·permuteInt64Table+2640(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+2648(SB)/8, $0x0000000000000017 +DATA ·permuteInt64Table+2656(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+2664(SB)/8, $0x0000000000000011 +DATA ·permuteInt64Table+2672(SB)/8, $0x000000000000000e +DATA ·permuteInt64Table+2680(SB)/8, $0x000000000000000b + +// BitWidth 30 (offset 2688) +DATA ·permuteInt64Table+2688(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2696(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2704(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2712(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2720(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2728(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2736(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2744(SB)/8, $0x0000000700000006 +DATA ·permuteInt64Table+2752(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2760(SB)/8, $0x000000000000001e +DATA ·permuteInt64Table+2768(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+2776(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+2784(SB)/8, $0x0000000000000018 +DATA ·permuteInt64Table+2792(SB)/8, $0x0000000000000016 +DATA ·permuteInt64Table+2800(SB)/8, $0x0000000000000014 +DATA ·permuteInt64Table+2808(SB)/8, $0x0000000000000012 + +// BitWidth 31 (offset 2816) +DATA ·permuteInt64Table+2816(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2824(SB)/8, $0x0000000100000000 +DATA ·permuteInt64Table+2832(SB)/8, $0x0000000200000001 +DATA ·permuteInt64Table+2840(SB)/8, $0x0000000300000002 +DATA ·permuteInt64Table+2848(SB)/8, $0x0000000400000003 +DATA ·permuteInt64Table+2856(SB)/8, $0x0000000500000004 +DATA ·permuteInt64Table+2864(SB)/8, $0x0000000600000005 +DATA ·permuteInt64Table+2872(SB)/8, $0x0000000700000006 +DATA ·permuteInt64Table+2880(SB)/8, $0x0000000000000000 +DATA ·permuteInt64Table+2888(SB)/8, $0x000000000000001f +DATA ·permuteInt64Table+2896(SB)/8, $0x000000000000001e +DATA ·permuteInt64Table+2904(SB)/8, $0x000000000000001d +DATA ·permuteInt64Table+2912(SB)/8, $0x000000000000001c +DATA ·permuteInt64Table+2920(SB)/8, $0x000000000000001b +DATA ·permuteInt64Table+2928(SB)/8, $0x000000000000001a +DATA ·permuteInt64Table+2936(SB)/8, $0x0000000000000019 + +// Total size: 2944 bytes diff --git a/vendor/github.com/parquet-go/bitpack/pack.go b/vendor/github.com/parquet-go/bitpack/pack.go new file mode 100644 index 00000000000..a30bd8d3ea0 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/pack.go @@ -0,0 +1,19 @@ +package bitpack + +import ( + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" +) + +// Pack packs values from src to dst, each value is packed into the given +// bit width regardless of how many bits are needed to represent it. +func Pack[T Int](dst []byte, src []T, bitWidth uint) { + _ = dst[:ByteCount(bitWidth*uint(len(src)))] + switch unsafe.Sizeof(T(0)) { + case 4: + packInt32(dst, unsafecast.Slice[int32](src), bitWidth) + default: + packInt64(dst, unsafecast.Slice[int64](src), bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/pack_arm64.go b/vendor/github.com/parquet-go/bitpack/pack_arm64.go new file mode 100644 index 00000000000..202950d5d66 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/pack_arm64.go @@ -0,0 +1,31 @@ +//go:build !purego + +package bitpack + +//go:noescape +func packInt32ARM64(dst []byte, src []int32, bitWidth uint) + +//go:noescape +func packInt32NEON(dst []byte, src []int32, bitWidth uint) + +//go:noescape +func packInt64ARM64(dst []byte, src []int64, bitWidth uint) + +//go:noescape +func packInt64NEON(dst []byte, src []int64, bitWidth uint) + +func packInt32(dst []byte, src []int32, bitWidth uint) { + if bitWidth <= 8 { + packInt32NEON(dst, src, bitWidth) + } else { + packInt32ARM64(dst, src, bitWidth) + } +} + +func packInt64(dst []byte, src []int64, bitWidth uint) { + if bitWidth <= 8 { + packInt64NEON(dst, src, bitWidth) + } else { + packInt64ARM64(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/pack_int32_arm64.s b/vendor/github.com/parquet-go/bitpack/pack_int32_arm64.s new file mode 100644 index 00000000000..5c47327cd99 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/pack_int32_arm64.s @@ -0,0 +1,462 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func packInt32ARM64(dst []byte, src []int32, bitWidth uint) +TEXT ·packInt32ARM64(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD src_base+24(FP), R1 // R1 = src pointer + MOVD src_len+32(FP), R2 // R2 = src length + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Handle bitWidth == 0 + CBZ R3, done + + // R4 = bitMask = (1 << bitWidth) - 1 + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 + + // R5 = buffer (64-bit accumulator) + // R6 = bufferedBits + // R7 = byteIndex + // R8 = loop counter (src index) + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD $0, R8 + + // Main loop: process each value from src +loop: + CMP R2, R8 + BEQ flush_remaining + + // Load value from src[R8] + LSL $2, R8, R16 // R16 = R8 * 4 + MOVWU (R1)(R16), R9 // R9 = src[R8] + + // Mask the value: R9 = value & bitMask + AND R4, R9, R9 + + // Add to buffer: buffer |= (value << bufferedBits) + LSL R6, R9, R10 // R10 = value << bufferedBits + ORR R10, R5, R5 // buffer |= R10 + + // bufferedBits += bitWidth + ADD R3, R6, R6 + + // Increment source index + ADD $1, R8, R8 + +flush_loop: + // While bufferedBits >= 32, flush 32-bit words + CMP $32, R6 + BLT loop + + // Write 32-bit word to dst[byteIndex] + MOVW R5, (R0)(R7) + + // buffer >>= 32 + LSR $32, R5, R5 + + // bufferedBits -= 32 + SUB $32, R6, R6 + + // byteIndex += 4 + ADD $4, R7, R7 + + B flush_loop + +flush_remaining: + // If no bits remaining, we're done + CBZ R6, done + + // Calculate remaining bytes = (bufferedBits + 7) / 8 + ADD $7, R6, R11 + LSR $3, R11, R11 // R11 = remainingBytes + + MOVD $0, R12 // R12 = i (byte counter) + +flush_byte_loop: + CMP R11, R12 + BEQ done + + // dst[byteIndex] = byte(buffer) + MOVB R5, (R0)(R7) + + // buffer >>= 8 + LSR $8, R5, R5 + + // byteIndex++, i++ + ADD $1, R7, R7 + ADD $1, R12, R12 + + B flush_byte_loop + +done: + RET + +// func packInt32NEON(dst []byte, src []int32, bitWidth uint) +TEXT ·packInt32NEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD src_base+24(FP), R1 // R1 = src pointer + MOVD src_len+32(FP), R2 // R2 = src length + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Handle bitWidth == 0 + CBZ R3, neon_done + + // Initialize processed count to 0 + MOVD $0, R5 + + // Check if we have at least 4 values to process with NEON paths + CMP $4, R2 + BLT neon_done // Not enough values, return and let Go wrapper handle it + + // Determine which NEON path to use based on bitWidth + CMP $1, R3 + BEQ neon_1bit + CMP $2, R3 + BEQ neon_2bit + CMP $3, R3 + BEQ neon_3bit + CMP $4, R3 + BEQ neon_4bit + CMP $5, R3 + BEQ neon_5bit + CMP $6, R3 + BEQ neon_6bit + CMP $7, R3 + BEQ neon_7bit + CMP $8, R3 + BEQ neon_8bit + + // For other bit widths, return without processing + // The Go wrapper will call the scalar version + RET + +neon_1bit: + // BitWidth 1: Pack 8 int32 values into 1 byte + MOVD R2, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ neon_done + +neon_1bit_loop: + MOVWU (R1), R6 + AND $1, R6, R6 + MOVWU 4(R1), R7 + AND $1, R7, R7 + ORR R7<<1, R6, R6 + MOVWU 8(R1), R7 + AND $1, R7, R7 + ORR R7<<2, R6, R6 + MOVWU 12(R1), R7 + AND $1, R7, R7 + ORR R7<<3, R6, R6 + MOVWU 16(R1), R7 + AND $1, R7, R7 + ORR R7<<4, R6, R6 + MOVWU 20(R1), R7 + AND $1, R7, R7 + ORR R7<<5, R6, R6 + MOVWU 24(R1), R7 + AND $1, R7, R7 + ORR R7<<6, R6, R6 + MOVWU 28(R1), R7 + AND $1, R7, R7 + ORR R7<<7, R6, R6 + MOVB R6, (R0) + ADD $32, R1, R1 + ADD $1, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_1bit_loop + B neon_done + +neon_2bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_2bit_loop: + MOVWU (R1), R6 + AND $3, R6, R6 + MOVWU 4(R1), R7 + AND $3, R7, R7 + ORR R7<<2, R6, R6 + MOVWU 8(R1), R7 + AND $3, R7, R7 + ORR R7<<4, R6, R6 + MOVWU 12(R1), R7 + AND $3, R7, R7 + ORR R7<<6, R6, R6 + MOVB R6, (R0) + ADD $16, R1, R1 + ADD $1, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_2bit_loop + B neon_done + +neon_3bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_3bit_loop: + MOVWU (R1), R6 + AND $7, R6, R6 + MOVWU 4(R1), R7 + AND $7, R7, R7 + ORR R7<<3, R6, R6 + MOVWU 8(R1), R7 + AND $7, R7, R7 + ORR R7<<6, R6, R6 + MOVWU 12(R1), R7 + AND $7, R7, R7 + ORR R7<<9, R6, R6 + MOVWU 16(R1), R7 + AND $7, R7, R7 + ORR R7<<12, R6, R6 + MOVWU 20(R1), R7 + AND $7, R7, R7 + ORR R7<<15, R6, R6 + MOVWU 24(R1), R7 + AND $7, R7, R7 + ORR R7<<18, R6, R6 + MOVWU 28(R1), R7 + AND $7, R7, R7 + ORR R7<<21, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + ADD $32, R1, R1 + ADD $3, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_3bit_loop + B neon_done + +neon_4bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_4bit_loop: + MOVWU (R1), R6 + AND $15, R6, R6 + MOVWU 4(R1), R7 + AND $15, R7, R7 + ORR R7<<4, R6, R6 + MOVWU 8(R1), R7 + AND $15, R7, R7 + ORR R7<<8, R6, R6 + MOVWU 12(R1), R7 + AND $15, R7, R7 + ORR R7<<12, R6, R6 + MOVH R6, (R0) + ADD $16, R1, R1 + ADD $2, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_4bit_loop + B neon_done + +neon_5bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_5bit_loop: + MOVD $0, R6 + MOVWU (R1), R7 + AND $31, R7, R7 + ORR R7, R6, R6 + MOVWU 4(R1), R7 + AND $31, R7, R7 + ORR R7<<5, R6, R6 + MOVWU 8(R1), R7 + AND $31, R7, R7 + ORR R7<<10, R6, R6 + MOVWU 12(R1), R7 + AND $31, R7, R7 + ORR R7<<15, R6, R6 + MOVWU 16(R1), R7 + AND $31, R7, R7 + ORR R7<<20, R6, R6 + MOVWU 20(R1), R7 + AND $31, R7, R7 + ORR R7<<25, R6, R6 + MOVWU 24(R1), R7 + AND $31, R7, R7 + ORR R7<<30, R6, R6 + MOVWU 28(R1), R7 + AND $31, R7, R7 + ORR R7<<35, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + LSR $24, R6, R7 + MOVB R7, 3(R0) + LSR $32, R6, R7 + MOVB R7, 4(R0) + ADD $32, R1, R1 + ADD $5, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_5bit_loop + B neon_done + +neon_6bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_6bit_loop: + MOVWU (R1), R6 + AND $63, R6, R6 + MOVWU 4(R1), R7 + AND $63, R7, R7 + ORR R7<<6, R6, R6 + MOVWU 8(R1), R7 + AND $63, R7, R7 + ORR R7<<12, R6, R6 + MOVWU 12(R1), R7 + AND $63, R7, R7 + ORR R7<<18, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + ADD $16, R1, R1 + ADD $3, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_6bit_loop + B neon_done + +neon_7bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_7bit_loop: + MOVD $0, R6 + MOVWU (R1), R7 + AND $127, R7, R7 + ORR R7, R6, R6 + MOVWU 4(R1), R7 + AND $127, R7, R7 + ORR R7<<7, R6, R6 + MOVWU 8(R1), R7 + AND $127, R7, R7 + ORR R7<<14, R6, R6 + MOVWU 12(R1), R7 + AND $127, R7, R7 + ORR R7<<21, R6, R6 + MOVWU 16(R1), R7 + AND $127, R7, R7 + ORR R7<<28, R6, R6 + MOVWU 20(R1), R7 + AND $127, R7, R7 + ORR R7<<35, R6, R6 + MOVWU 24(R1), R7 + AND $127, R7, R7 + ORR R7<<42, R6, R6 + MOVWU 28(R1), R7 + AND $127, R7, R7 + ORR R7<<49, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + LSR $24, R6, R7 + MOVB R7, 3(R0) + LSR $32, R6, R7 + MOVB R7, 4(R0) + LSR $40, R6, R7 + MOVB R7, 5(R0) + LSR $48, R6, R7 + MOVB R7, 6(R0) + ADD $32, R1, R1 + ADD $7, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_7bit_loop + B neon_done + +neon_8bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_8bit_loop: + MOVWU (R1), R6 + MOVB R6, (R0) + MOVWU 4(R1), R6 + MOVB R6, 1(R0) + MOVWU 8(R1), R6 + MOVB R6, 2(R0) + MOVWU 12(R1), R6 + MOVB R6, 3(R0) + ADD $16, R1, R1 + ADD $4, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_8bit_loop + +neon_done: + // After NEON processing, handle any remainder with scalar code + // Check if there are remaining values to process + CMP R2, R5 // R5 = processed count, R2 = total length + BGE neon_ret // If processed >= total, we're done + + // Calculate remainder: adjust src/dst pointers and length + // Advance src pointer by (R5 * 4) bytes + LSL $2, R5, R16 + ADD R16, R1, R1 + + // Calculate packed bytes for processed values and advance dst + MUL R3, R5, R16 // R16 = processed * bitWidth (in bits) + LSR $3, R16, R16 // R16 = packed bytes + ADD R16, R0, R0 + + // Update remaining length + SUB R5, R2, R2 + + // Jump to scalar implementation for remainder + B ·packInt32ARM64(SB) + +neon_ret: + RET diff --git a/vendor/github.com/parquet-go/bitpack/pack_int64_arm64.s b/vendor/github.com/parquet-go/bitpack/pack_int64_arm64.s new file mode 100644 index 00000000000..9b4137d110d --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/pack_int64_arm64.s @@ -0,0 +1,514 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func packInt64ARM64(dst []byte, src []int64, bitWidth uint) +TEXT ·packInt64ARM64(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD src_base+24(FP), R1 // R1 = src pointer + MOVD src_len+32(FP), R2 // R2 = src length + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Handle bitWidth == 0 + CBZ R3, done + + // Special case: bitWidth == 64 (no packing needed) + CMP $64, R3 + BEQ copy_direct + + // R4 = bitMask = (1 << bitWidth) - 1 + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 + + // R5 = bufferLo (64-bit accumulator) + // R6 = bufferHi (overflow buffer) + // R7 = bufferedBits + // R8 = byteIndex + // R9 = loop counter (src index) + MOVD $0, R5 + MOVD $0, R6 + MOVD $0, R7 + MOVD $0, R8 + MOVD $0, R9 + + // Main loop: process each value from src +loop: + CMP R2, R9 + BEQ flush_remaining + + // Load value from src[R9] + LSL $3, R9, R16 // R16 = R9 * 8 + MOVD (R1)(R16), R10 // R10 = src[R9] + + // Mask the value: R10 = value & bitMask + AND R4, R10, R10 + + // Check if value fits entirely in low buffer + ADD R3, R7, R11 // R11 = bufferedBits + bitWidth + CMP $64, R11 + BGT spans_buffers + + // Value fits in low buffer + LSL R7, R10, R12 // R12 = value << bufferedBits + ORR R12, R5, R5 // bufferLo |= R12 + MOVD R11, R7 // bufferedBits = R11 + B increment_index + +spans_buffers: + // Value spans low and high buffers + // bitsInLo = 64 - bufferedBits + MOVD $64, R12 + SUB R7, R12, R12 // R12 = bitsInLo + + // bufferLo |= value << bufferedBits + LSL R7, R10, R13 + ORR R13, R5, R5 + + // bufferHi = value >> bitsInLo + LSR R12, R10, R6 + + // bufferedBits += bitWidth + MOVD R11, R7 + +increment_index: + // Increment source index + ADD $1, R9, R9 + +flush_loop: + // While bufferedBits >= 64, flush 64-bit words + CMP $64, R7 + BLT loop + + // Write 64-bit word to dst[byteIndex] + MOVD R5, (R0)(R8) + + // bufferLo = bufferHi + MOVD R6, R5 + + // bufferHi = 0 + MOVD $0, R6 + + // bufferedBits -= 64 + SUB $64, R7, R7 + + // byteIndex += 8 + ADD $8, R8, R8 + + B flush_loop + +flush_remaining: + // If no bits remaining, we're done + CBZ R7, done + + // Calculate remaining bytes = (bufferedBits + 7) / 8 + ADD $7, R7, R11 + LSR $3, R11, R11 // R11 = remainingBytes + + MOVD $0, R12 // R12 = i (byte counter) + +flush_byte_loop: + CMP R11, R12 + BEQ done + + // dst[byteIndex] = byte(bufferLo) + MOVB R5, (R0)(R8) + + // bufferLo >>= 8 + LSR $8, R5, R5 + + // byteIndex++, i++ + ADD $1, R8, R8 + ADD $1, R12, R12 + + B flush_byte_loop + +copy_direct: + // bitWidth == 64: direct copy + MOVD $0, R9 // R9 = index + MOVD $0, R10 // R10 = byte offset + +copy_loop: + CMP R2, R9 + BEQ done + + // Load src[i] + LSL $3, R9, R16 + MOVD (R1)(R16), R11 + + // Store to dst[i*8] + MOVD R11, (R0)(R10) + + // i++, offset += 8 + ADD $1, R9, R9 + ADD $8, R10, R10 + + B copy_loop + +done: + RET + +// func packInt64NEON(dst []byte, src []int64, bitWidth uint) +TEXT ·packInt64NEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD src_base+24(FP), R1 // R1 = src pointer + MOVD src_len+32(FP), R2 // R2 = src length + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Handle bitWidth == 0 + CBZ R3, neon_done + + // Initialize processed count to 0 + MOVD $0, R5 + + // Check if we have at least 4 values to process with NEON paths + CMP $4, R2 + BLT neon_done // Not enough values, return and let Go wrapper handle it + + // Determine which NEON path to use based on bitWidth + CMP $1, R3 + BEQ neon_1bit + CMP $2, R3 + BEQ neon_2bit + CMP $3, R3 + BEQ neon_3bit + CMP $4, R3 + BEQ neon_4bit + CMP $5, R3 + BEQ neon_5bit + CMP $6, R3 + BEQ neon_6bit + CMP $7, R3 + BEQ neon_7bit + CMP $8, R3 + BEQ neon_8bit + + // For other bit widths, return without processing + // The Go wrapper will call the scalar version + RET + +neon_1bit: + // BitWidth 1: Pack 8 int64 values into 1 byte + MOVD R2, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ neon_done + +neon_1bit_loop: + MOVD (R1), R6 + AND $1, R6, R6 + MOVD 8(R1), R7 + AND $1, R7, R7 + ORR R7<<1, R6, R6 + MOVD 16(R1), R7 + AND $1, R7, R7 + ORR R7<<2, R6, R6 + MOVD 24(R1), R7 + AND $1, R7, R7 + ORR R7<<3, R6, R6 + MOVD 32(R1), R7 + AND $1, R7, R7 + ORR R7<<4, R6, R6 + MOVD 40(R1), R7 + AND $1, R7, R7 + ORR R7<<5, R6, R6 + MOVD 48(R1), R7 + AND $1, R7, R7 + ORR R7<<6, R6, R6 + MOVD 56(R1), R7 + AND $1, R7, R7 + ORR R7<<7, R6, R6 + MOVB R6, (R0) + ADD $64, R1, R1 + ADD $1, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_1bit_loop + B neon_done + +neon_2bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_2bit_loop: + MOVD (R1), R6 + AND $3, R6, R6 + MOVD 8(R1), R7 + AND $3, R7, R7 + ORR R7<<2, R6, R6 + MOVD 16(R1), R7 + AND $3, R7, R7 + ORR R7<<4, R6, R6 + MOVD 24(R1), R7 + AND $3, R7, R7 + ORR R7<<6, R6, R6 + MOVB R6, (R0) + ADD $32, R1, R1 + ADD $1, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_2bit_loop + B neon_done + +neon_3bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_3bit_loop: + MOVD (R1), R6 + AND $7, R6, R6 + MOVD 8(R1), R7 + AND $7, R7, R7 + ORR R7<<3, R6, R6 + MOVD 16(R1), R7 + AND $7, R7, R7 + ORR R7<<6, R6, R6 + MOVD 24(R1), R7 + AND $7, R7, R7 + ORR R7<<9, R6, R6 + MOVD 32(R1), R7 + AND $7, R7, R7 + ORR R7<<12, R6, R6 + MOVD 40(R1), R7 + AND $7, R7, R7 + ORR R7<<15, R6, R6 + MOVD 48(R1), R7 + AND $7, R7, R7 + ORR R7<<18, R6, R6 + MOVD 56(R1), R7 + AND $7, R7, R7 + ORR R7<<21, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + ADD $64, R1, R1 + ADD $3, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_3bit_loop + B neon_done + +neon_4bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_4bit_loop: + MOVD (R1), R6 + AND $15, R6, R6 + MOVD 8(R1), R7 + AND $15, R7, R7 + ORR R7<<4, R6, R6 + MOVD 16(R1), R7 + AND $15, R7, R7 + ORR R7<<8, R6, R6 + MOVD 24(R1), R7 + AND $15, R7, R7 + ORR R7<<12, R6, R6 + MOVH R6, (R0) + ADD $32, R1, R1 + ADD $2, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_4bit_loop + B neon_done + +neon_5bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_5bit_loop: + MOVD $0, R6 + MOVD (R1), R7 + AND $31, R7, R7 + ORR R7, R6, R6 + MOVD 8(R1), R7 + AND $31, R7, R7 + ORR R7<<5, R6, R6 + MOVD 16(R1), R7 + AND $31, R7, R7 + ORR R7<<10, R6, R6 + MOVD 24(R1), R7 + AND $31, R7, R7 + ORR R7<<15, R6, R6 + MOVD 32(R1), R7 + AND $31, R7, R7 + ORR R7<<20, R6, R6 + MOVD 40(R1), R7 + AND $31, R7, R7 + ORR R7<<25, R6, R6 + MOVD 48(R1), R7 + AND $31, R7, R7 + ORR R7<<30, R6, R6 + MOVD 56(R1), R7 + AND $31, R7, R7 + ORR R7<<35, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + LSR $24, R6, R7 + MOVB R7, 3(R0) + LSR $32, R6, R7 + MOVB R7, 4(R0) + ADD $64, R1, R1 + ADD $5, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_5bit_loop + B neon_done + +neon_6bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_6bit_loop: + MOVD (R1), R6 + AND $63, R6, R6 + MOVD 8(R1), R7 + AND $63, R7, R7 + ORR R7<<6, R6, R6 + MOVD 16(R1), R7 + AND $63, R7, R7 + ORR R7<<12, R6, R6 + MOVD 24(R1), R7 + AND $63, R7, R7 + ORR R7<<18, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + ADD $32, R1, R1 + ADD $3, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_6bit_loop + B neon_done + +neon_7bit: + MOVD R2, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_7bit_loop: + MOVD $0, R6 + MOVD (R1), R7 + AND $127, R7, R7 + ORR R7, R6, R6 + MOVD 8(R1), R7 + AND $127, R7, R7 + ORR R7<<7, R6, R6 + MOVD 16(R1), R7 + AND $127, R7, R7 + ORR R7<<14, R6, R6 + MOVD 24(R1), R7 + AND $127, R7, R7 + ORR R7<<21, R6, R6 + MOVD 32(R1), R7 + AND $127, R7, R7 + ORR R7<<28, R6, R6 + MOVD 40(R1), R7 + AND $127, R7, R7 + ORR R7<<35, R6, R6 + MOVD 48(R1), R7 + AND $127, R7, R7 + ORR R7<<42, R6, R6 + MOVD 56(R1), R7 + AND $127, R7, R7 + ORR R7<<49, R6, R6 + MOVB R6, (R0) + LSR $8, R6, R7 + MOVB R7, 1(R0) + LSR $16, R6, R7 + MOVB R7, 2(R0) + LSR $24, R6, R7 + MOVB R7, 3(R0) + LSR $32, R6, R7 + MOVB R7, 4(R0) + LSR $40, R6, R7 + MOVB R7, 5(R0) + LSR $48, R6, R7 + MOVB R7, 6(R0) + ADD $64, R1, R1 + ADD $7, R0, R0 + ADD $8, R5, R5 + CMP R4, R5 + BLT neon_7bit_loop + B neon_done + +neon_8bit: + MOVD R2, R4 + LSR $2, R4, R4 + LSL $2, R4, R4 + MOVD $0, R5 + CMP $0, R4 + BEQ neon_done + +neon_8bit_loop: + MOVD (R1), R6 + MOVB R6, (R0) + MOVD 8(R1), R6 + MOVB R6, 1(R0) + MOVD 16(R1), R6 + MOVB R6, 2(R0) + MOVD 24(R1), R6 + MOVB R6, 3(R0) + ADD $32, R1, R1 + ADD $4, R0, R0 + ADD $4, R5, R5 + CMP R4, R5 + BLT neon_8bit_loop + +neon_done: + // After NEON processing, handle any remainder with scalar code + // Check if there are remaining values to process + CMP R2, R5 // R5 = processed count, R2 = total length + BGE neon_ret // If processed >= total, we're done + + // Calculate remainder: adjust src/dst pointers and length + // Advance src pointer by (R5 * 8) bytes + LSL $3, R5, R16 + ADD R16, R1, R1 + + // Calculate packed bytes for processed values and advance dst + MUL R3, R5, R16 // R16 = processed * bitWidth (in bits) + LSR $3, R16, R16 // R16 = packed bytes + ADD R16, R0, R0 + + // Update remaining length + SUB R5, R2, R2 + + // Jump to scalar implementation for remainder + B ·packInt64ARM64(SB) + +neon_ret: + RET diff --git a/vendor/github.com/parquet-go/bitpack/pack_purego.go b/vendor/github.com/parquet-go/bitpack/pack_purego.go new file mode 100644 index 00000000000..8d02d75ffbc --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/pack_purego.go @@ -0,0 +1,94 @@ +//go:build purego || !arm64 + +package bitpack + +import "encoding/binary" + +func packInt32(dst []byte, src []int32, bitWidth uint) { + if bitWidth == 0 { + return + } + + bitMask := uint32(1<= 32 { + binary.LittleEndian.PutUint32(dst[byteIndex:], uint32(buffer)) + buffer >>= 32 + bufferedBits -= 32 + byteIndex += 4 + } + } + + // Flush remaining bits + if bufferedBits > 0 { + // Only write the bytes we need + remainingBytes := (bufferedBits + 7) / 8 + for i := uint(0); i < remainingBytes; i++ { + dst[byteIndex] = byte(buffer) + buffer >>= 8 + byteIndex++ + } + } +} + +func packInt64(dst []byte, src []int64, bitWidth uint) { + if bitWidth == 0 { + return + } + if bitWidth == 64 { + // Special case: no packing needed, direct copy + for i, v := range src { + binary.LittleEndian.PutUint64(dst[i*8:], uint64(v)) + } + return + } + + bitMask := uint64(1<> bitsInLo + bufferedBits += bitWidth + } + + // Flush complete 64-bit words + for bufferedBits >= 64 { + binary.LittleEndian.PutUint64(dst[byteIndex:], bufferLo) + bufferLo = bufferHi + bufferHi = 0 + bufferedBits -= 64 + byteIndex += 8 + } + } + + // Flush remaining bits + if bufferedBits > 0 { + remainingBytes := (bufferedBits + 7) / 8 + for i := uint(0); i < remainingBytes; i++ { + dst[byteIndex] = byte(bufferLo) + bufferLo >>= 8 + byteIndex++ + } + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack.go b/vendor/github.com/parquet-go/bitpack/unpack.go new file mode 100644 index 00000000000..2396e0dd960 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack.go @@ -0,0 +1,29 @@ +package bitpack + +import ( + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" +) + +// PaddingInt32 is the padding expected to exist after the end of input buffers +// for the UnpackInt32 algorithm to avoid reading beyond the end of the input. +const PaddingInt32 = 16 + +// PaddingInt64 is the padding expected to exist after the end of input buffers +// for the UnpackInt32 algorithm to avoid reading beyond the end of the input. +const PaddingInt64 = 32 + +// Unpack unpacks values from src to dst, each value is unpacked from the given +// bit width regardless of how many bits are needed to represent it. +func Unpack[T Int](dst []T, src []byte, bitWidth uint) { + sizeofT := uint(unsafe.Sizeof(T(0))) + padding := (8 * sizeofT) / 2 // 32 bits => 16, 64 bits => 32 + _ = src[:ByteCount(bitWidth*uint(len(dst))+8*padding)] + switch sizeofT { + case 4: + unpackInt32(unsafecast.Slice[int32](dst), src, bitWidth) + default: + unpackInt64(unsafecast.Slice[int64](dst), src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_1bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_1bit_arm64.s new file mode 100644 index 00000000000..6f6cb28379c --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_1bit_arm64.s @@ -0,0 +1,184 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt32x1bitNEON implements NEON unpacking for bitWidth=1 using direct bit manipulation +// Each byte contains 8 bits: [bit7][bit6][bit5][bit4][bit3][bit2][bit1][bit0] +// +// func unpackInt32x1bitNEON(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x1bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 1) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 64 values to process + CMP $64, R1 + BLT neon1_tail + + // Round down to multiple of 64 for NEON processing + MOVD R1, R4 + LSR $6, R4, R4 // R4 = len / 64 + LSL $6, R4, R4 // R4 = aligned length (multiple of 64) + + // Load mask for 1 bit (0x01010101...) + MOVD $0x0101010101010101, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for single bits + +neon1_loop: + // Load 8 bytes (contains 64 x 1-bit values) + VLD1 (R2), [V0.B8] + + // Extract each bit position (8 separate streams) + VAND V31.B16, V0.B16, V1.B16 // V1 = bit 0 + + VUSHR $1, V0.B16, V2.B16 + VAND V31.B16, V2.B16, V2.B16 // V2 = bit 1 + + VUSHR $2, V0.B16, V3.B16 + VAND V31.B16, V3.B16, V3.B16 // V3 = bit 2 + + VUSHR $3, V0.B16, V4.B16 + VAND V31.B16, V4.B16, V4.B16 // V4 = bit 3 + + VUSHR $4, V0.B16, V5.B16 + VAND V31.B16, V5.B16, V5.B16 // V5 = bit 4 + + VUSHR $5, V0.B16, V6.B16 + VAND V31.B16, V6.B16, V6.B16 // V6 = bit 5 + + VUSHR $6, V0.B16, V7.B16 + VAND V31.B16, V7.B16, V7.B16 // V7 = bit 6 + + VUSHR $7, V0.B16, V8.B16 + VAND V31.B16, V8.B16, V8.B16 // V8 = bit 7 + + // Stage 1: ZIP pairs (8 streams → 4 streams of pairs) + VZIP1 V2.B8, V1.B8, V9.B8 // V9 = [bit0,bit1] interleaved + VZIP1 V4.B8, V3.B8, V10.B8 // V10 = [bit2,bit3] interleaved + VZIP1 V6.B8, V5.B8, V11.B8 // V11 = [bit4,bit5] interleaved + VZIP1 V8.B8, V7.B8, V12.B8 // V12 = [bit6,bit7] interleaved + + VZIP2 V2.B8, V1.B8, V13.B8 // V13 = [bit0,bit1] upper half + VZIP2 V4.B8, V3.B8, V14.B8 // V14 = [bit2,bit3] upper half + VZIP2 V6.B8, V5.B8, V15.B8 // V15 = [bit4,bit5] upper half + VZIP2 V8.B8, V7.B8, V16.B8 // V16 = [bit6,bit7] upper half + + // Stage 2: ZIP quads (4 streams → 2 streams of quads) + VZIP1 V10.H4, V9.H4, V17.H4 // V17 = [0,1,2,3] interleaved + VZIP1 V12.H4, V11.H4, V18.H4 // V18 = [4,5,6,7] interleaved + VZIP2 V10.H4, V9.H4, V19.H4 // V19 = [0,1,2,3] next + VZIP2 V12.H4, V11.H4, V20.H4 // V20 = [4,5,6,7] next + + VZIP1 V14.H4, V13.H4, V21.H4 // V21 = upper [0,1,2,3] + VZIP1 V16.H4, V15.H4, V22.H4 // V22 = upper [4,5,6,7] + VZIP2 V14.H4, V13.H4, V23.H4 // V23 = upper [0,1,2,3] next + VZIP2 V16.H4, V15.H4, V24.H4 // V24 = upper [4,5,6,7] next + + // Stage 3: ZIP octets (2 streams → fully sequential) + VZIP1 V18.S2, V17.S2, V25.S2 // V25 = values 0-7 + VZIP2 V18.S2, V17.S2, V26.S2 // V26 = values 8-15 + VZIP1 V20.S2, V19.S2, V27.S2 // V27 = values 16-23 + VZIP2 V20.S2, V19.S2, V28.S2 // V28 = values 24-31 + VZIP1 V22.S2, V21.S2, V1.S2 // V1 = values 32-39 + VZIP2 V22.S2, V21.S2, V2.S2 // V2 = values 40-47 + VZIP1 V24.S2, V23.S2, V3.S2 // V3 = values 48-55 + VZIP2 V24.S2, V23.S2, V4.S2 // V4 = values 56-63 + + // Widen to int32 and store - Process first 32 values + USHLL_8H_8B(5, 25) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 26) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 27) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 28) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + // Process second 32 values + USHLL_8H_8B(5, 1) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 2) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 3) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + USHLL_8H_8B(5, 4) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $64, R5, R5 // index += 64 + + CMP R4, R5 + BLT neon1_loop + +neon1_tail: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon1_done + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $1, R4 // bitMask = 1 + MOVD $0, R6 // bitOffset = 0 + MOVD $0, R7 // index = 0 + B neon1_scalar_test + +neon1_scalar_loop: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $1, R9, R9 // Mask to get bit + MOVW R9, (R0) // Store as int32 + + ADD $4, R0, R0 // dst++ + ADD $1, R6, R6 // bitOffset++ + ADD $1, R7, R7 // index++ + +neon1_scalar_test: + CMP R1, R7 + BLT neon1_scalar_loop + +neon1_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_2bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_2bit_arm64.s new file mode 100644 index 00000000000..7acea640e75 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_2bit_arm64.s @@ -0,0 +1,136 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt32x2bitNEON implements NEON unpacking for bitWidth=2 using direct bit manipulation +// Each byte contains 4 values of 2 bits each: [bits 6-7][bits 4-5][bits 2-3][bits 0-1] +// +// func unpackInt32x2bitNEON(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x2bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 2) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 32 values to process + CMP $32, R1 + BLT neon2_tail + + // Round down to multiple of 32 for NEON processing + MOVD R1, R4 + LSR $5, R4, R4 // R4 = len / 32 + LSL $5, R4, R4 // R4 = aligned length (multiple of 32) + + // Load mask for 2 bits (0x03030303...) + MOVD $0x0303030303030303, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for 2-bit values + +neon2_loop: + // Load 8 bytes (contains 32 x 2-bit values) + VLD1 (R2), [V0.B8] + + // Extract bits [1:0] from each byte (values at positions 0,4,8,12,...) + VAND V31.B16, V0.B16, V1.B16 + + // Extract bits [3:2] from each byte (values at positions 1,5,9,13,...) + VUSHR $2, V0.B16, V2.B16 + VAND V31.B16, V2.B16, V2.B16 + + // Extract bits [5:4] from each byte (values at positions 2,6,10,14,...) + VUSHR $4, V0.B16, V3.B16 + VAND V31.B16, V3.B16, V3.B16 + + // Extract bits [7:6] from each byte (values at positions 3,7,11,15,...) + VUSHR $6, V0.B16, V4.B16 + VAND V31.B16, V4.B16, V4.B16 + + // Interleave using two stages of ZIP operations + // Stage 1: ZIP pairs at byte level + VZIP1 V2.B8, V1.B8, V5.B8 // V5 = [V1[0],V2[0],V1[1],V2[1],V1[2],V2[2],V1[3],V2[3]] + VZIP1 V4.B8, V3.B8, V6.B8 // V6 = [V3[0],V4[0],V3[1],V4[1],V3[2],V4[2],V3[3],V4[3]] + VZIP2 V2.B8, V1.B8, V7.B8 // V7 = [V1[4],V2[4],V1[5],V2[5],V1[6],V2[6],V1[7],V2[7]] + VZIP2 V4.B8, V3.B8, V8.B8 // V8 = [V3[4],V4[4],V3[5],V4[5],V3[6],V4[6],V3[7],V4[7]] + + // Stage 2: ZIP quads at 16-bit level to get final sequential order + VZIP1 V6.H4, V5.H4, V13.H4 // V13 = [V1[0],V2[0],V3[0],V4[0],V1[1],V2[1],V3[1],V4[1]] = values 0-7 + VZIP2 V6.H4, V5.H4, V14.H4 // V14 = [V1[2],V2[2],V3[2],V4[2],V1[3],V2[3],V3[3],V4[3]] = values 8-15 + VZIP1 V8.H4, V7.H4, V15.H4 // V15 = [V1[4],V2[4],V3[4],V4[4],V1[5],V2[5],V3[5],V4[5]] = values 16-23 + VZIP2 V8.H4, V7.H4, V16.H4 // V16 = [V1[6],V2[6],V3[6],V4[6],V1[7],V2[7],V3[7],V4[7]] = values 24-31 + + // Widen first 8 values (V13) to int32 + USHLL_8H_8B(17, 13) // V17.8H ← V13.8B + USHLL_4S_4H(18, 17) // V18.4S ← V17.4H (values 0-3) + USHLL2_4S_8H(19, 17) // V19.4S ← V17.8H (values 4-7) + + // Widen second 8 values (V14) to int32 + USHLL_8H_8B(20, 14) // V20.8H ← V14.8B + USHLL_4S_4H(21, 20) // V21.4S ← V20.4H (values 8-11) + USHLL2_4S_8H(22, 20) // V22.4S ← V20.8H (values 12-15) + + // Widen third 8 values (V15) to int32 + USHLL_8H_8B(23, 15) // V23.8H ← V15.8B + USHLL_4S_4H(24, 23) // V24.4S ← V23.4H (values 16-19) + USHLL2_4S_8H(25, 23) // V25.4S ← V23.8H (values 20-23) + + // Widen fourth 8 values (V16) to int32 + USHLL_8H_8B(26, 16) // V26.8H ← V16.8B + USHLL_4S_4H(27, 26) // V27.4S ← V26.4H (values 24-27) + USHLL2_4S_8H(28, 26) // V28.4S ← V26.8H (values 28-31) + + // Store 32 int32 values (128 bytes) + VST1 [V18.S4, V19.S4], (R0) + ADD $32, R0, R0 + VST1 [V21.S4, V22.S4], (R0) + ADD $32, R0, R0 + VST1 [V24.S4, V25.S4], (R0) + ADD $32, R0, R0 + VST1 [V27.S4, V28.S4], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $32, R5, R5 // index += 32 + + CMP R4, R5 + BLT neon2_loop + +neon2_tail: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon2_done + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $3, R4 // bitMask = 3 (0b11 for 2 bits) + MOVD $0, R6 // bitOffset = 0 + MOVD $0, R7 // index = 0 + B neon2_scalar_test + +neon2_scalar_loop: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $3, R9, R9 // Mask to get 2 bits + MOVW R9, (R0) // Store as int32 + + ADD $4, R0, R0 // dst++ + ADD $2, R6, R6 // bitOffset += 2 + ADD $1, R7, R7 // index++ + +neon2_scalar_test: + CMP R1, R7 + BLT neon2_scalar_loop + +neon2_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_4bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_4bit_arm64.s new file mode 100644 index 00000000000..05360b4a467 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_4bit_arm64.s @@ -0,0 +1,106 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt32x4bitNEON implements NEON unpacking for bitWidth=4 using direct bit manipulation +// Each byte contains 2 values of 4 bits each +// +// func unpackInt32x4bitNEON(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x4bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 4) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 16 values to process + CMP $16, R1 + BLT neon4_tail + + // Round down to multiple of 16 for NEON processing + MOVD R1, R4 + LSR $4, R4, R4 // R4 = len / 16 + LSL $4, R4, R4 // R4 = aligned length (multiple of 16) + + // Load mask for 4 bits (0x0F0F0F0F...) + MOVD $0x0F0F0F0F0F0F0F0F, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for low nibbles + +neon4_loop: + // Load 8 bytes (contains 16 x 4-bit values) + VLD1 (R2), [V0.B8] + + // Extract low nibbles (values at even nibble positions) + VAND V31.B16, V0.B16, V1.B16 // V1 = low nibbles + + // Extract high nibbles (values at odd nibble positions) + VUSHR $4, V0.B16, V2.B16 // V2 = high nibbles (shifted down) + VAND V31.B16, V2.B16, V2.B16 // V2 = high nibbles (masked) + + // Now V1 has values [0,2,4,6,8,10,12,14] and V2 has [1,3,5,7,9,11,13,15] + // We need to interleave them: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + VZIP1 V2.B8, V1.B8, V3.B8 // V3 = interleaved low half + VZIP2 V2.B8, V1.B8, V4.B8 // V4 = interleaved high half + + // Widen first 8 values (V3) to int32 + USHLL_8H_8B(5, 3) // V5.8H ← V3.8B + USHLL_4S_4H(6, 5) // V6.4S ← V5.4H (values 0-3) + USHLL2_4S_8H(7, 5) // V7.4S ← V5.8H (values 4-7) + + // Widen second 8 values (V4) to int32 + USHLL_8H_8B(8, 4) // V8.8H ← V4.8B + USHLL_4S_4H(9, 8) // V9.4S ← V8.4H (values 8-11) + USHLL2_4S_8H(10, 8) // V10.4S ← V8.8H (values 12-15) + + // Store 16 int32 values (64 bytes) + VST1 [V6.S4, V7.S4], (R0) + ADD $32, R0, R0 + VST1 [V9.S4, V10.S4], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $16, R5, R5 // index += 16 + + CMP R4, R5 + BLT neon4_loop + +neon4_tail: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon4_done + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $0x0F, R4 // bitMask = 0x0F (4 bits) + MOVD $0, R6 // bitOffset = 0 (start from current R2 position) + MOVD $0, R7 // loop counter = 0 + B neon4_scalar_test + +neon4_scalar_loop: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte from current position + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $0x0F, R9, R9 // Mask to get 4 bits + MOVW R9, (R0) // Store as int32 + + ADD $4, R0, R0 // dst++ + ADD $4, R6, R6 // bitOffset += 4 + ADD $1, R7, R7 // counter++ + +neon4_scalar_test: + CMP R1, R7 + BLT neon4_scalar_loop + +neon4_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_8bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_8bit_arm64.s new file mode 100644 index 00000000000..40d5bf088a3 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_8bit_arm64.s @@ -0,0 +1,65 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt32x8bitNEON implements NEON unpacking for bitWidth=8 +// Each byte is already a complete value - just widen to int32 +// Processes 8 values at a time using NEON +// +// func unpackInt32x8bitNEON(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x8bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 8) + + MOVD $0, R5 // R5 = index + + // Check if we have at least 8 values to process + CMP $8, R1 + BLT tbl8_tail + + // Round down to multiple of 8 for NEON processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + +tbl8_loop: + // Load 8 bytes (8 x 8-bit values) + VLD1 (R2), [V0.B8] + + // Widen to int32: byte → short → int + USHLL_8H_8B(1, 0) // V1.8H ← V0.8B (8x8-bit → 8x16-bit) + USHLL_4S_4H(2, 1) // V2.4S ← V1.4H (lower 4x16-bit → 4x32-bit) + USHLL2_4S_8H(3, 1) // V3.4S ← V1.8H (upper 4x16-bit → 4x32-bit) + + // Store 8 int32 values + VST1 [V2.S4, V3.S4], (R0) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $32, R0, R0 // dst += 8 int32 (32 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT tbl8_loop + +tbl8_tail: + // Handle remaining elements (0-7) one by one + CMP R1, R5 + BGE tbl8_done + +tbl8_tail_loop: + MOVBU (R2), R6 // Load byte + MOVW R6, (R0) // Store as int32 (zero-extended) + + ADD $1, R2, R2 // src++ + ADD $4, R0, R0 // dst++ + ADD $1, R5, R5 // index++ + + CMP R1, R5 + BLT tbl8_tail_loop + +tbl8_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.go b/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.go new file mode 100644 index 00000000000..8783f3f82d1 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.go @@ -0,0 +1,36 @@ +//go:build !purego + +package bitpack + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "golang.org/x/sys/cpu" +) + +//go:noescape +func unpackInt32Default(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x1to16bitsAVX2(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x17to26bitsAVX2(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x27to31bitsAVX2(dst []int32, src []byte, bitWidth uint) + +func unpackInt32(dst []int32, src []byte, bitWidth uint) { + hasAVX2 := cpu.X86.HasAVX2 + switch { + case hasAVX2 && bitWidth <= 16: + unpackInt32x1to16bitsAVX2(dst, src, bitWidth) + case hasAVX2 && bitWidth <= 26: + unpackInt32x17to26bitsAVX2(dst, src, bitWidth) + case hasAVX2 && bitWidth <= 31: + unpackInt32x27to31bitsAVX2(dst, src, bitWidth) + case bitWidth == 32: + copy(dst, unsafecast.Slice[int32](src)) + default: + unpackInt32Default(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.s new file mode 100644 index 00000000000..e0f2bf4a4d6 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_amd64.s @@ -0,0 +1,352 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func unpackInt32Default(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32Default(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + MOVQ $1, R8 // bitMask = (1 << bitWidth) - 1 + SHLQ CX, R8 + DECQ R8 + MOVQ CX, R9 // bitWidth + + XORQ DI, DI // bitOffset + XORQ SI, SI // index + JMP test +loop: + MOVQ DI, R10 + MOVQ DI, CX + SHRQ $5, R10 // i = bitOffset / 32 + ANDQ $0b11111, CX // j = bitOffset % 32 + + MOVL (BX)(R10*4), R11 + MOVL R8, R12 // d = bitMask + SHLL CX, R12 // d = d << j + ANDL R12, R11 // d = src[i] & d + SHRL CX, R11 // d = d >> j + + MOVL CX, R13 + ADDL R9, R13 + CMPL R13, $32 + JBE next // j+bitWidth <= 32 ? + + MOVL 4(BX)(R10*4), R14 + MOVL CX, R12 + MOVL $32, CX + SUBL R12, CX // k = 32 - j + MOVL R8, R12 // c = bitMask + SHRL CX, R12 // c = c >> k + ANDL R12, R14 // c = src[i+1] & c + SHLL CX, R14 // c = c << k + ORL R14, R11 // d = d | c +next: + MOVL R11, (AX)(SI*4) // dst[n] = d + ADDQ R9, DI // bitOffset += bitWidth + INCQ SI +test: + CMPQ SI, DX + JNE loop + RET + +// ----------------------------------------------------------------------------- +// The unpack* functions below are adaptations of the algorithms +// described in "Decoding billions of integers per second through vectorization" +// from D. Lemire & L. Boytsov, the following changes were made: +// +// - The paper described two methods for decoding integers called "horizontal" +// and "vertical". The "horizontal" version is the one that applies the best +// to the bit packing done in the Parquet delta encoding; however, it also +// differs in some ways, many compression techniques discussed in the paper +// are not implemented in the Parquet format. +// +// - The paper focuses on implementations based on SSE instructions, which +// describes how to use PMULLD to emulate the lack of variable bit shift +// for packed integers. Our version of the bit unpacking algorithms here +// uses AVX2 and can perform variable bit shifts using VPSRLVD, which yields +// better throughput since the instruction latency is a single CPU cycle, +// vs 10 for VPMULLD. +// +// - The reference implementation at https://github.com/lemire/FastPFor/ uses +// specializations for each bit size, resulting in 32 unique functions. +// Our version here are more generic, we provide 3 variations of the +// algorithm for bit widths 1 to 16, 17 to 26, and 27 to 31 (unpacking 32 +// bits values is a simple copy). In that regard, our implementation is +// somewhat an improvement over the reference, since it uses less code and +// less memory to hold the shuffle masks and shift tables. +// +// Technically, each specialization of our functions could be expressed by the +// algorithm used for unpacking values of 27 to 31 bits. However, multiple steps +// of the main loop can be removed for lower bit widths, providing up to ~35% +// better throughput for smaller sizes. Since we expect delta encoding to often +// result in bit packing values to smaller bit widths, the specializations are +// worth the extra complexity. +// +// For more details, see: https://arxiv.org/pdf/1209.2137v5.pdf +// ----------------------------------------------------------------------------- + +// unpackInt32x1to16bitsAVX2 is the implementation of the bit unpacking +// algorithm for inputs of bit width 1 to 16. +// +// In this version of the algorithm, we can perform a single memory load in each +// loop iteration since we know that 8 values will fit in a single XMM register. +// +// func unpackInt32x1to16bitsAVX2(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x1to16bitsAVX2(SB), NOSPLIT, $56-56 + NO_LOCAL_POINTERS + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + MOVQ R8, X0 + VPBROADCASTD X0, X0 // bitMask = (1 << bitWidth) - 1 + + MOVQ CX, R9 + DECQ R9 + SHLQ $5, R9 // 32 * (bitWidth - 1) + + MOVQ CX, R10 + DECQ R10 + SHLQ $5, R10 + ANDQ $0xFF, R10 // (32 * (bitWidth - 1)) % 256 + + LEAQ ·shuffleInt32x1to16bits(SB), R11 + VMOVDQA (R11)(R9*1), X1 + VMOVDQA 16(R11)(R9*1), X2 + + LEAQ ·shiftRightInt32(SB), R12 + VMOVDQA (R12)(R10*1), X3 + VMOVDQA 16(R12)(R10*1), X4 +loop: + VMOVDQU (BX), X7 + + VPSHUFB X1, X7, X5 + VPSHUFB X2, X7, X6 + + VPSRLVD X3, X5, X5 + VPSRLVD X4, X6, X6 + + VPAND X0, X5, X5 + VPAND X0, X6, X6 + + VMOVDQU X5, (AX)(SI*4) + VMOVDQU X6, 16(AX)(SI*4) + + ADDQ CX, BX + ADDQ $8, SI + CMPQ SI, DI + JNE loop + VZEROUPPER + + CMPQ SI, DX + JE done + LEAQ (AX)(SI*4), AX + SUBQ SI, DX +tail: + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt32Default(SB) +done: + RET + +// unpackInt32x17to26bitsAVX2 is the implementation of the bit unpacking +// algorithm for inputs of bit width 17 to 26. +// +// In this version of the algorithm, we need to 32 bytes at each loop iteration +// because 8 bit-packed values will span across two XMM registers. +// +// func unpackInt32x17to26bitsAVX2(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x17to26bitsAVX2(SB), NOSPLIT, $56-56 + NO_LOCAL_POINTERS + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + MOVQ R8, X0 + VPBROADCASTD X0, X0 + + MOVQ CX, R9 + SUBQ $17, R9 + IMULQ $48, R9 // 48 * (bitWidth - 17) + + MOVQ CX, R10 + DECQ R10 + SHLQ $5, R10 + ANDQ $0xFF, R10 // (32 * (bitWidth - 1)) % 256 + + LEAQ ·shuffleInt32x17to26bits(SB), R11 + VMOVDQA (R11)(R9*1), X1 + VMOVDQA 16(R11)(R9*1), X2 + VMOVDQA 32(R11)(R9*1), X3 + + LEAQ ·shiftRightInt32(SB), R12 + VMOVDQA (R12)(R10*1), X4 + VMOVDQA 16(R12)(R10*1), X5 +loop: + VMOVDQU (BX), X6 + VMOVDQU 16(BX), X7 + + VPSHUFB X1, X6, X8 + VPSHUFB X2, X6, X9 + VPSHUFB X3, X7, X10 + VPOR X10, X9, X9 + + VPSRLVD X4, X8, X8 + VPSRLVD X5, X9, X9 + + VPAND X0, X8, X8 + VPAND X0, X9, X9 + + VMOVDQU X8, (AX)(SI*4) + VMOVDQU X9, 16(AX)(SI*4) + + ADDQ CX, BX + ADDQ $8, SI + CMPQ SI, DI + JNE loop + VZEROUPPER + + CMPQ SI, DX + JE done + LEAQ (AX)(SI*4), AX + SUBQ SI, DX +tail: + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt32Default(SB) +done: + RET + +// unpackInt32x27to31bitsAVX2 is the implementation of the bit unpacking +// algorithm for inputs of bit width 27 to 31. +// +// In this version of the algorithm the bit-packed values may span across up to +// 5 bytes. The simpler approach for smaller bit widths where we could perform a +// single shuffle + shift to unpack the values do not work anymore. +// +// Values are unpacked in two steps: the first one extracts lower bits which are +// shifted RIGHT to align on the beginning of 32 bit words, the second extracts +// upper bits which are shifted LEFT to be moved to the end of the 32 bit words. +// +// The amount of LEFT shifts is always "8 minus the amount of RIGHT shift". +// +// func unpackInt32x27to31bitsAVX2(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x27to31bitsAVX2(SB), NOSPLIT, $56-56 + NO_LOCAL_POINTERS + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + MOVQ R8, X0 + VPBROADCASTD X0, X0 + + MOVQ CX, R9 + SUBQ $27, R9 + IMULQ $80, R9 // (80 * (bitWidth - 27)) + + MOVQ CX, R10 + DECQ R10 + SHLQ $5, R10 + ANDQ $0xFF, R10 // (32 * (bitWidth - 1)) % 256 + + LEAQ ·shuffleInt32x27to31bits(SB), R11 + VMOVDQA (R11)(R9*1), X1 + VMOVDQA 16(R11)(R9*1), X2 + VMOVDQA 32(R11)(R9*1), X3 + VMOVDQA 48(R11)(R9*1), X4 + VMOVDQA 64(R11)(R9*1), X5 + + LEAQ ·shiftRightInt32(SB), R12 + LEAQ ·shiftLeftInt32(SB), R13 + VMOVDQA (R12)(R10*1), X6 + VMOVDQA (R13)(R10*1), X7 + VMOVDQA 16(R12)(R10*1), X8 + VMOVDQA 16(R13)(R10*1), X9 +loop: + VMOVDQU (BX), X10 + VMOVDQU 16(BX), X11 + + VPSHUFB X1, X10, X12 + VPSHUFB X2, X10, X13 + VPSHUFB X3, X10, X14 + VPSHUFB X4, X11, X15 + VPSHUFB X5, X11, X11 + + VPSRLVD X6, X12, X12 + VPSLLVD X7, X13, X13 + VPSRLVD X8, X14, X14 + VPSRLVD X8, X15, X15 + VPSLLVD X9, X11, X11 + + VPOR X13, X12, X12 + VPOR X15, X14, X14 + VPOR X11, X14, X14 + + VPAND X0, X12, X12 + VPAND X0, X14, X14 + + VMOVDQU X12, (AX)(SI*4) + VMOVDQU X14, 16(AX)(SI*4) + + ADDQ CX, BX + ADDQ $8, SI + CMPQ SI, DI + JNE loop + VZEROUPPER + + CMPQ SI, DX + JE done + LEAQ (AX)(SI*4), AX + SUBQ SI, DX +tail: + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt32Default(SB) +done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.go b/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.go new file mode 100644 index 00000000000..e73d1fdf71e --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.go @@ -0,0 +1,48 @@ +//go:build !purego + +package bitpack + +import ( + "github.com/parquet-go/bitpack/unsafecast" +) + +//go:noescape +func unpackInt32Default(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x1to16bitsARM64(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x1bitNEON(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x2bitNEON(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x3bitNEON(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x4bitNEON(dst []int32, src []byte, bitWidth uint) + +//go:noescape +func unpackInt32x8bitNEON(dst []int32, src []byte, bitWidth uint) + +func unpackInt32(dst []int32, src []byte, bitWidth uint) { + switch { + case bitWidth == 1: + unpackInt32x1bitNEON(dst, src, bitWidth) + case bitWidth == 2: + unpackInt32x2bitNEON(dst, src, bitWidth) + case bitWidth == 4: + unpackInt32x4bitNEON(dst, src, bitWidth) + case bitWidth == 8: + unpackInt32x8bitNEON(dst, src, bitWidth) + // bitWidth == 3,5,6,7: Skip NEON table (don't divide evenly into 8) + case bitWidth <= 16: + unpackInt32x1to16bitsARM64(dst, src, bitWidth) + case bitWidth == 32: + copy(dst, unsafecast.Slice[int32](src)) + default: + unpackInt32Default(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.s new file mode 100644 index 00000000000..d3c0bda5486 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_arm64.s @@ -0,0 +1,732 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func unpackInt32Default(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32Default(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + MOVD $1, R4 // R4 = bitMask = (1 << bitWidth) - 1 + LSL R3, R4, R4 + SUB $1, R4, R4 + + MOVD $0, R5 // R5 = bitOffset + MOVD $0, R6 // R6 = index + B test + +loop: + MOVD R5, R7 // R7 = i = bitOffset / 32 + LSR $5, R7, R7 + + MOVD R5, R8 // R8 = j = bitOffset % 32 + AND $31, R8, R8 + + LSL $2, R7, R16 // R16 = i * 4 + MOVWU (R2)(R16), R9 // R9 = src[i] + MOVW R4, R10 // R10 = bitMask + LSL R8, R10, R10 // R10 = bitMask << j + AND R10, R9, R9 // R9 = src[i] & (bitMask << j) + LSR R8, R9, R9 // R9 = d = (src[i] & (bitMask << j)) >> j + + ADD R3, R8, R11 // R11 = j + bitWidth + CMP $32, R11 + BLE next // if j+bitWidth <= 32, skip to next + + ADD $1, R7, R12 // R12 = i + 1 + LSL $2, R12, R16 // R16 = (i + 1) * 4 + MOVWU (R2)(R16), R13 // R13 = src[i+1] + + MOVD $32, R14 // R14 = k = 32 - j + SUB R8, R14, R14 + + MOVW R4, R15 // R15 = bitMask + LSR R14, R15, R15 // R15 = bitMask >> k + AND R15, R13, R13 // R13 = src[i+1] & (bitMask >> k) + LSL R14, R13, R13 // R13 = (src[i+1] & (bitMask >> k)) << k + ORR R13, R9, R9 // R9 = d | c + +next: + LSL $2, R6, R16 // R16 = index * 4 + MOVW R9, (R0)(R16) // dst[index] = d + ADD R3, R5, R5 // bitOffset += bitWidth + ADD $1, R6, R6 // index++ + +test: + CMP R1, R6 + BNE loop + RET + +// unpackInt32x1to16bitsARM64 implements optimized unpacking for bit widths 1-16 +// Uses optimized scalar ARM64 operations with batched processing +// +// func unpackInt32x1to16bitsARM64(dst []int32, src []byte, bitWidth uint) +TEXT ·unpackInt32x1to16bitsARM64(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Check if we have at least 4 values to process + CMP $4, R1 + BLT scalar_fallback + + // Determine which NEON path to use based on bitWidth + CMP $1, R3 + BEQ neon_1bit + CMP $2, R3 + BEQ neon_2bit + CMP $3, R3 + BEQ neon_3bit + CMP $4, R3 + BEQ neon_4bit + CMP $5, R3 + BEQ neon_5bit + CMP $6, R3 + BEQ neon_6bit + CMP $7, R3 + BEQ neon_7bit + CMP $8, R3 + BEQ neon_8bit + CMP $16, R3 + BEQ neon_16bit + + // For other bit widths, fall back to scalar + B scalar_fallback + +neon_1bit: + // BitWidth 1: 8 int32 values packed in 1 byte + // Process 8 values at a time using scalar operations + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback + +neon_1bit_loop: + // Load 1 byte (contains 8 values, 1 bit each) + MOVBU (R2), R6 + + // Extract 8 values manually (bits 0-7) + // Value 0: bit 0 + AND $1, R6, R7 + MOVW R7, (R0) + + // Value 1: bit 1 + LSR $1, R6, R7 + AND $1, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bit 2 + LSR $2, R6, R7 + AND $1, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bit 3 + LSR $3, R6, R7 + AND $1, R7, R7 + MOVW R7, 12(R0) + + // Value 4: bit 4 + LSR $4, R6, R7 + AND $1, R7, R7 + MOVW R7, 16(R0) + + // Value 5: bit 5 + LSR $5, R6, R7 + AND $1, R7, R7 + MOVW R7, 20(R0) + + // Value 6: bit 6 + LSR $6, R6, R7 + AND $1, R7, R7 + MOVW R7, 24(R0) + + // Value 7: bit 7 + LSR $7, R6, R7 + AND $1, R7, R7 + MOVW R7, 28(R0) + + // Advance pointers + ADD $1, R2, R2 // src += 1 byte (8 values) + ADD $32, R0, R0 // dst += 8 int32 (32 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT neon_1bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_2bit: + // BitWidth 2: 4 int32 values packed in 1 byte + // Process 4 values at a time using scalar operations + + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = aligned length (multiple of 4) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_2bit_loop: + // Load 1 byte (contains 4 values, 2 bits each) + MOVBU (R2), R6 + + // Extract 4 values manually (bits 0-1, 2-3, 4-5, 6-7) + // Value 0: bits 0-1 + AND $3, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 2-3 + LSR $2, R6, R7 + AND $3, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 4-5 + LSR $4, R6, R7 + AND $3, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 6-7 + LSR $6, R6, R7 + AND $3, R7, R7 + MOVW R7, 12(R0) + + // Advance pointers + ADD $1, R2, R2 // src += 1 byte (4 values) + ADD $16, R0, R0 // dst += 4 int32 (16 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT neon_2bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_3bit: + // BitWidth 3: 8 int32 values packed in 3 bytes + // Process 8 values at a time using scalar operations + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_3bit_loop: + // Load 3 bytes as 32-bit value (4th byte will be ignored) + // Bytes 0-2 contain: [val7:val6:val5:val4:val3:val2:val1:val0] + // Bits layout: [23:21][20:18][17:15][14:12][11:9][8:6][5:3][2:0] + MOVWU (R2), R6 + + // Value 0: bits 0-2 + AND $7, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 3-5 + LSR $3, R6, R7 + AND $7, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 6-8 + LSR $6, R6, R7 + AND $7, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 9-11 + LSR $9, R6, R7 + AND $7, R7, R7 + MOVW R7, 12(R0) + + // Value 4: bits 12-14 + LSR $12, R6, R7 + AND $7, R7, R7 + MOVW R7, 16(R0) + + // Value 5: bits 15-17 + LSR $15, R6, R7 + AND $7, R7, R7 + MOVW R7, 20(R0) + + // Value 6: bits 18-20 + LSR $18, R6, R7 + AND $7, R7, R7 + MOVW R7, 24(R0) + + // Value 7: bits 21-23 + LSR $21, R6, R7 + AND $7, R7, R7 + MOVW R7, 28(R0) + + // Advance pointers + ADD $3, R2, R2 // src += 3 bytes (8 values) + ADD $32, R0, R0 // dst += 8 int32 (32 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT neon_3bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_4bit: + // BitWidth 4: 4 int32 values packed in 2 bytes + // Process 4 values at a time using scalar operations + + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = aligned length (multiple of 4) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_4bit_loop: + // Load 2 bytes (contains 4 values, 4 bits each) + MOVHU (R2), R6 + + // Extract 4 values manually (nibbles) + // Value 0: bits 0-3 + AND $15, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 4-7 + LSR $4, R6, R7 + AND $15, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 8-11 + LSR $8, R6, R7 + AND $15, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 12-15 + LSR $12, R6, R7 + AND $15, R7, R7 + MOVW R7, 12(R0) + + // Advance pointers + ADD $2, R2, R2 // src += 2 bytes (4 values) + ADD $16, R0, R0 // dst += 4 int32 (16 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT neon_4bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_5bit: + // BitWidth 5: 8 int32 values packed in 5 bytes + // Process 8 values at a time using scalar operations + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_5bit_loop: + // Load 5 bytes as 64-bit value (upper bytes will be ignored) + // 8 values × 5 bits = 40 bits = 5 bytes + // Bits layout: [39:35][34:30][29:25][24:20][19:15][14:10][9:5][4:0] + MOVD (R2), R6 + + // Value 0: bits 0-4 + AND $31, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 5-9 + LSR $5, R6, R7 + AND $31, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 10-14 + LSR $10, R6, R7 + AND $31, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 15-19 + LSR $15, R6, R7 + AND $31, R7, R7 + MOVW R7, 12(R0) + + // Value 4: bits 20-24 + LSR $20, R6, R7 + AND $31, R7, R7 + MOVW R7, 16(R0) + + // Value 5: bits 25-29 + LSR $25, R6, R7 + AND $31, R7, R7 + MOVW R7, 20(R0) + + // Value 6: bits 30-34 + LSR $30, R6, R7 + AND $31, R7, R7 + MOVW R7, 24(R0) + + // Value 7: bits 35-39 + LSR $35, R6, R7 + AND $31, R7, R7 + MOVW R7, 28(R0) + + // Advance pointers + ADD $5, R2, R2 // src += 5 bytes (8 values) + ADD $32, R0, R0 // dst += 8 int32 (32 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT neon_5bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_6bit: + // BitWidth 6: 4 int32 values packed in 3 bytes + // Process 4 values at a time using scalar operations + + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = aligned length (multiple of 4) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_6bit_loop: + // Load 3 bytes as 32-bit value (4th byte will be ignored) + // 4 values × 6 bits = 24 bits = 3 bytes + // Bits layout: [23:18][17:12][11:6][5:0] + MOVWU (R2), R6 + + // Value 0: bits 0-5 + AND $63, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 6-11 + LSR $6, R6, R7 + AND $63, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 12-17 + LSR $12, R6, R7 + AND $63, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 18-23 + LSR $18, R6, R7 + AND $63, R7, R7 + MOVW R7, 12(R0) + + // Advance pointers + ADD $3, R2, R2 // src += 3 bytes (4 values) + ADD $16, R0, R0 // dst += 4 int32 (16 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT neon_6bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_7bit: + // BitWidth 7: 8 int32 values packed in 7 bytes + // Process 8 values at a time using scalar operations + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback + +neon_7bit_loop: + // Load 7 bytes as 64-bit value (8th byte will be ignored) + // 8 values × 7 bits = 56 bits = 7 bytes + // Bits layout: [55:49][48:42][41:35][34:28][27:21][20:14][13:7][6:0] + MOVD (R2), R6 + + // Value 0: bits 0-6 + AND $127, R6, R7 + MOVW R7, (R0) + + // Value 1: bits 7-13 + LSR $7, R6, R7 + AND $127, R7, R7 + MOVW R7, 4(R0) + + // Value 2: bits 14-20 + LSR $14, R6, R7 + AND $127, R7, R7 + MOVW R7, 8(R0) + + // Value 3: bits 21-27 + LSR $21, R6, R7 + AND $127, R7, R7 + MOVW R7, 12(R0) + + // Value 4: bits 28-34 + LSR $28, R6, R7 + AND $127, R7, R7 + MOVW R7, 16(R0) + + // Value 5: bits 35-41 + LSR $35, R6, R7 + AND $127, R7, R7 + MOVW R7, 20(R0) + + // Value 6: bits 42-48 + LSR $42, R6, R7 + AND $127, R7, R7 + MOVW R7, 24(R0) + + // Value 7: bits 49-55 + LSR $49, R6, R7 + AND $127, R7, R7 + MOVW R7, 28(R0) + + // Advance pointers + ADD $7, R2, R2 // src += 7 bytes (8 values) + ADD $32, R0, R0 // dst += 8 int32 (32 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT neon_7bit_loop + + CMP R1, R5 + BEQ neon_done + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_8bit: + // BitWidth 8: 4 int32 values packed in 4 bytes + // Process 4 values at a time using NEON + + // Calculate how many full groups of 4 we can process + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = (len / 4) * 4 = aligned length + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback + +neon_8bit_loop: + // Load 4 bytes as 4 uint8 values into lower part of V0 + // We need to load bytes and zero-extend to 32-bit + + // Load 4 bytes to W6 + MOVWU (R2), R6 + + // Extract bytes and write as int32 + // Byte 0 + AND $0xFF, R6, R7 + MOVW R7, (R0) + + // Byte 1 + LSR $8, R6, R7 + AND $0xFF, R7, R7 + MOVW R7, 4(R0) + + // Byte 2 + LSR $16, R6, R7 + AND $0xFF, R7, R7 + MOVW R7, 8(R0) + + // Byte 3 + LSR $24, R6, R7 + MOVW R7, 12(R0) + + // Advance pointers + ADD $4, R2, R2 // src += 4 bytes + ADD $16, R0, R0 // dst += 4 int32 (16 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT neon_8bit_loop + + // Handle tail with scalar + CMP R1, R5 + BEQ neon_done + + // Calculate remaining elements + SUB R5, R1, R1 // R1 = remaining elements + B scalar_fallback_entry + +neon_16bit: + // BitWidth 16: 4 int32 values packed in 8 bytes + // Process 4 values at a time + + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = (len / 4) * 4 + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback + +neon_16bit_loop: + // Load 8 bytes as 4 uint16 values + MOVD (R2), R6 // Load 8 bytes into R6 + + // Extract 16-bit values and write as int32 + // Value 0 (bits 0-15) + AND $0xFFFF, R6, R7 + MOVW R7, (R0) + + // Value 1 (bits 16-31) + LSR $16, R6, R7 + AND $0xFFFF, R7, R7 + MOVW R7, 4(R0) + + // Value 2 (bits 32-47) + LSR $32, R6, R7 + AND $0xFFFF, R7, R7 + MOVW R7, 8(R0) + + // Value 3 (bits 48-63) + LSR $48, R6, R7 + MOVW R7, 12(R0) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $16, R0, R0 // dst += 4 int32 (16 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT neon_16bit_loop + + // Handle tail with scalar + CMP R1, R5 + BEQ neon_done + + SUB R5, R1, R1 + B scalar_fallback_entry + +neon_done: + RET + +scalar_fallback: + MOVD $0, R5 // Start from beginning + // R0, R1, R2, R3 already set from function args + +scalar_fallback_entry: + // R0 = current dst position (already advanced) + // R1 = remaining elements + // R2 = current src position (already advanced) + // R3 = bitWidth + // R5 = elements already processed + + // Fall back to scalar implementation for remaining elements + CMP $0, R1 + BEQ scalar_done // No remaining elements + + MOVD $1, R4 // R4 = bitMask = (1 << bitWidth) - 1 + LSL R3, R4, R4 + SUB $1, R4, R4 + + // bitOffset starts from 0 relative to current R2 position + // (not total offset, since R2 is already advanced) + MOVD $0, R6 // R6 = bitOffset (relative to current R2) + MOVD $0, R7 // R7 = index (within remaining elements) + B scalar_test + +scalar_loop: + MOVD R6, R8 // R8 = i = bitOffset / 32 + LSR $5, R8, R8 + + MOVD R6, R9 // R9 = j = bitOffset % 32 + AND $31, R9, R9 + + LSL $2, R8, R10 // R10 = i * 4 + MOVWU (R2)(R10), R11 // R11 = src[i] (relative to current R2) + MOVW R4, R12 // R12 = bitMask + LSL R9, R12, R12 // R12 = bitMask << j + AND R12, R11, R11 // R11 = src[i] & (bitMask << j) + LSR R9, R11, R11 // R11 = d = (src[i] & (bitMask << j)) >> j + + ADD R3, R9, R12 // R12 = j + bitWidth + CMP $32, R12 + BLE scalar_next // if j+bitWidth <= 32, skip to next + + ADD $1, R8, R13 // R13 = i + 1 + LSL $2, R13, R10 // R10 = (i + 1) * 4 + MOVWU (R2)(R10), R14 // R14 = src[i+1] + + MOVD $32, R15 // R15 = k = 32 - j + SUB R9, R15, R15 + + MOVW R4, R16 // R16 = bitMask + LSR R15, R16, R16 // R16 = bitMask >> k + AND R16, R14, R14 // R14 = src[i+1] & (bitMask >> k) + LSL R15, R14, R14 // R14 = (src[i+1] & (bitMask >> k)) << k + ORR R14, R11, R11 // R11 = d | c + +scalar_next: + LSL $2, R7, R10 // R10 = index * 4 + MOVW R11, (R0)(R10) // dst[index] = d (relative to current R0) + ADD R3, R6, R6 // bitOffset += bitWidth + ADD $1, R7, R7 // index++ + +scalar_test: + CMP R1, R7 + BLT scalar_loop + +scalar_done: + RET + +// Macro definitions for unsupported NEON instructions using WORD encodings +// USHLL Vd.8H, Vn.8B, #0 - widen 8x8-bit to 8x16-bit +#define USHLL_8H_8B(vd, vn) WORD $(0x2f08a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.8H, Vn.16B, #0 - widen upper 8x8-bit to 8x16-bit +#define USHLL2_8H_16B(vd, vn) WORD $(0x6f08a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.4S, Vn.4H, #0 - widen 4x16-bit to 4x32-bit +#define USHLL_4S_4H(vd, vn) WORD $(0x2f10a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.4S, Vn.8H, #0 - widen upper 4x16-bit to 4x32-bit +#define USHLL2_4S_8H(vd, vn) WORD $(0x6f10a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.2D, Vn.2S, #0 - widen 2x32-bit to 2x64-bit +#define USHLL_2D_2S(vd, vn) WORD $(0x2f20a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.2D, Vn.4S, #0 - widen upper 2x32-bit to 2x64-bit +#define USHLL2_2D_4S(vd, vn) WORD $(0x6f20a400 | (vd) | ((vn)<<5)) + +// Bit expansion lookup table defined in bitexpand_table_arm64.s + +// unpackInt32x1bitNEON implements table-based NEON unpacking for bitWidth=1 +// Uses lookup tables for parallel bit expansion +// +// func unpackInt32x1bitNEON(dst []int32, src []byte, bitWidth uint) diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_be.go b/vendor/github.com/parquet-go/bitpack/unpack_int32_be.go new file mode 100644 index 00000000000..0f4ba054c42 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_be.go @@ -0,0 +1,15 @@ +//go:build s390x + +package bitpack + +import "encoding/binary" + +func unsafecastBytesToUint32(src []byte) []uint32 { + out := make([]uint32, len(src)/4) + idx := 0 + for k := range out { + out[k] = binary.LittleEndian.Uint32((src)[idx:(4 + idx)]) + idx += 4 + } + return out +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_le.go b/vendor/github.com/parquet-go/bitpack/unpack_int32_le.go new file mode 100644 index 00000000000..035f6341ea3 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_le.go @@ -0,0 +1,9 @@ +//go:build !s390x + +package bitpack + +import "github.com/parquet-go/bitpack/unsafecast" + +func unsafecastBytesToUint32(src []byte) []uint32 { + return unsafecast.Slice[uint32](src) +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int32_purego.go b/vendor/github.com/parquet-go/bitpack/unpack_int32_purego.go new file mode 100644 index 00000000000..71477f6e725 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int32_purego.go @@ -0,0 +1,21 @@ +//go:build purego || (!amd64 && !arm64) + +package bitpack + +func unpackInt32(dst []int32, src []byte, bitWidth uint) { + bits := unsafecastBytesToUint32(src) + bitMask := uint32(1<> j + if j+bitWidth > 32 { + k := 32 - j + d |= (bits[i+1] & (bitMask >> k)) << k + } + dst[n] = int32(d) + bitOffset += bitWidth + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_amd64.s new file mode 100644 index 00000000000..dae464e27c5 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_amd64.s @@ -0,0 +1,123 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// unpackInt64x1bitAVX2 implements optimized unpacking for bitWidth=1 using AVX2 +// Each byte contains 8 bits, processes 8 values at a time +// +// func unpackInt64x1bitAVX2(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x1bitAVX2(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX // AX = dst pointer + MOVQ dst_len+8(FP), DX // DX = dst length + MOVQ src_base+24(FP), BX // BX = src pointer + MOVQ bitWidth+48(FP), CX // CX = bitWidth (should be 1) + + // Check if we have at least 8 values to process + CMPQ DX, $8 + JB avx2_1bit_tail + + // Round down to multiple of 8 for AVX2 processing + MOVQ DX, DI + SHRQ $3, DI // DI = len / 8 + SHLQ $3, DI // DI = aligned length (multiple of 8) + XORQ SI, SI // SI = index + +avx2_1bit_loop: + // Load 1 byte (contains 8 x 1-bit values) + MOVBQZX (BX), R8 + + // Extract each bit and store as int64 + // Bit 0 + MOVQ R8, R9 + ANDQ $1, R9 + MOVQ R9, (AX) + + // Bit 1 + MOVQ R8, R9 + SHRQ $1, R9 + ANDQ $1, R9 + MOVQ R9, 8(AX) + + // Bit 2 + MOVQ R8, R9 + SHRQ $2, R9 + ANDQ $1, R9 + MOVQ R9, 16(AX) + + // Bit 3 + MOVQ R8, R9 + SHRQ $3, R9 + ANDQ $1, R9 + MOVQ R9, 24(AX) + + // Bit 4 + MOVQ R8, R9 + SHRQ $4, R9 + ANDQ $1, R9 + MOVQ R9, 32(AX) + + // Bit 5 + MOVQ R8, R9 + SHRQ $5, R9 + ANDQ $1, R9 + MOVQ R9, 40(AX) + + // Bit 6 + MOVQ R8, R9 + SHRQ $6, R9 + ANDQ $1, R9 + MOVQ R9, 48(AX) + + // Bit 7 + MOVQ R8, R9 + SHRQ $7, R9 + ANDQ $1, R9 + MOVQ R9, 56(AX) + + // Advance pointers + ADDQ $1, BX // src += 1 byte + ADDQ $64, AX // dst += 8 int64 (64 bytes) + ADDQ $8, SI // index += 8 + + CMPQ SI, DI + JNE avx2_1bit_loop + +avx2_1bit_tail: + // Handle remaining elements with scalar fallback + CMPQ SI, DX + JE avx2_1bit_done + + // Compute remaining elements + SUBQ SI, DX + + // Calculate bit offset for remaining elements + // Each processed element consumes 1 bit, so bitOffset = SI * 1 + MOVQ SI, R9 // bitOffset = SI (number of bits already processed) + XORQ R10, R10 // index = 0 (within remaining elements) + JMP avx2_1bit_scalar_test + +avx2_1bit_scalar_loop: + MOVQ R9, R11 + SHRQ $3, R11 // byte_index = bitOffset / 8 + MOVQ src_base+24(FP), R14 // Get original src pointer + MOVBQZX (R14)(R11*1), R12 // Load byte from original src + + MOVQ R9, R13 + ANDQ $7, R13 // bit_offset = bitOffset % 8 + + MOVQ R13, CX // Move bit offset to CX for shift + SHRQ CL, R12 // Shift right by bit offset + ANDQ $1, R12 // Mask to get bit + MOVQ R12, (AX) // Store as int64 + + ADDQ $8, AX // dst++ + ADDQ $1, R9 // bitOffset++ + ADDQ $1, R10 // index++ + +avx2_1bit_scalar_test: + CMPQ R10, DX + JNE avx2_1bit_scalar_loop + +avx2_1bit_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_arm64.s new file mode 100644 index 00000000000..605d6bd3e8d --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_1bit_arm64.s @@ -0,0 +1,239 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt64x1bitNEON implements NEON unpacking for bitWidth=1 using direct bit manipulation +// Each byte contains 8 bits: [bit7][bit6][bit5][bit4][bit3][bit2][bit1][bit0] +// +// func unpackInt64x1bitNEON(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x1bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 1) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 64 values to process + CMP $64, R1 + BLT neon1_tail_int64 + + // Round down to multiple of 64 for NEON processing + MOVD R1, R4 + LSR $6, R4, R4 // R4 = len / 64 + LSL $6, R4, R4 // R4 = aligned length (multiple of 64) + + // Load mask for 1 bit (0x01010101...) + MOVD $0x0101010101010101, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for single bits + +neon1_loop_int64: + // Load 8 bytes (contains 64 x 1-bit values) + VLD1 (R2), [V0.B8] + + // Extract each bit position (8 separate streams) + VAND V31.B16, V0.B16, V1.B16 // V1 = bit 0 + + VUSHR $1, V0.B16, V2.B16 + VAND V31.B16, V2.B16, V2.B16 // V2 = bit 1 + + VUSHR $2, V0.B16, V3.B16 + VAND V31.B16, V3.B16, V3.B16 // V3 = bit 2 + + VUSHR $3, V0.B16, V4.B16 + VAND V31.B16, V4.B16, V4.B16 // V4 = bit 3 + + VUSHR $4, V0.B16, V5.B16 + VAND V31.B16, V5.B16, V5.B16 // V5 = bit 4 + + VUSHR $5, V0.B16, V6.B16 + VAND V31.B16, V6.B16, V6.B16 // V6 = bit 5 + + VUSHR $6, V0.B16, V7.B16 + VAND V31.B16, V7.B16, V7.B16 // V7 = bit 6 + + VUSHR $7, V0.B16, V8.B16 + VAND V31.B16, V8.B16, V8.B16 // V8 = bit 7 + + // Stage 1: ZIP pairs (8 streams → 4 streams of pairs) + VZIP1 V2.B8, V1.B8, V9.B8 // V9 = [bit0,bit1] interleaved + VZIP1 V4.B8, V3.B8, V10.B8 // V10 = [bit2,bit3] interleaved + VZIP1 V6.B8, V5.B8, V11.B8 // V11 = [bit4,bit5] interleaved + VZIP1 V8.B8, V7.B8, V12.B8 // V12 = [bit6,bit7] interleaved + + VZIP2 V2.B8, V1.B8, V13.B8 // V13 = [bit0,bit1] upper half + VZIP2 V4.B8, V3.B8, V14.B8 // V14 = [bit2,bit3] upper half + VZIP2 V6.B8, V5.B8, V15.B8 // V15 = [bit4,bit5] upper half + VZIP2 V8.B8, V7.B8, V16.B8 // V16 = [bit6,bit7] upper half + + // Stage 2: ZIP quads (4 streams → 2 streams of quads) + VZIP1 V10.H4, V9.H4, V17.H4 // V17 = [0,1,2,3] interleaved + VZIP1 V12.H4, V11.H4, V18.H4 // V18 = [4,5,6,7] interleaved + VZIP2 V10.H4, V9.H4, V19.H4 // V19 = [0,1,2,3] next + VZIP2 V12.H4, V11.H4, V20.H4 // V20 = [4,5,6,7] next + + VZIP1 V14.H4, V13.H4, V21.H4 // V21 = upper [0,1,2,3] + VZIP1 V16.H4, V15.H4, V22.H4 // V22 = upper [4,5,6,7] + VZIP2 V14.H4, V13.H4, V23.H4 // V23 = upper [0,1,2,3] next + VZIP2 V16.H4, V15.H4, V24.H4 // V24 = upper [4,5,6,7] next + + // Stage 3: ZIP octets (2 streams → fully sequential) + VZIP1 V18.S2, V17.S2, V25.S2 // V25 = values 0-7 + VZIP2 V18.S2, V17.S2, V26.S2 // V26 = values 8-15 + VZIP1 V20.S2, V19.S2, V27.S2 // V27 = values 16-23 + VZIP2 V20.S2, V19.S2, V28.S2 // V28 = values 24-31 + VZIP1 V22.S2, V21.S2, V1.S2 // V1 = values 32-39 + VZIP2 V22.S2, V21.S2, V2.S2 // V2 = values 40-47 + VZIP1 V24.S2, V23.S2, V3.S2 // V3 = values 48-55 + VZIP2 V24.S2, V23.S2, V4.S2 // V4 = values 56-63 + + // Widen to int64 and store - each group of 8 values + // Values 0-7 + USHLL_8H_8B(5, 25) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 8-15 + USHLL_8H_8B(5, 26) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 16-23 + USHLL_8H_8B(5, 27) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 24-31 + USHLL_8H_8B(5, 28) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 32-39 + USHLL_8H_8B(5, 1) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 40-47 + USHLL_8H_8B(5, 2) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 48-55 + USHLL_8H_8B(5, 3) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Values 56-63 + USHLL_8H_8B(5, 4) + USHLL_4S_4H(6, 5) + USHLL2_4S_8H(7, 5) + USHLL_2D_2S(8, 6) + USHLL2_2D_4S(9, 6) + USHLL_2D_2S(10, 7) + USHLL2_2D_4S(11, 7) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $64, R5, R5 // index += 64 + + CMP R4, R5 + BLT neon1_loop_int64 + +neon1_tail_int64: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon1_done_int64 + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $1, R4 // bitMask = 1 + MOVD $0, R6 // bitOffset = 0 + MOVD $0, R7 // index = 0 + B neon1_scalar_test_int64 + +neon1_scalar_loop_int64: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $1, R9, R9 // Mask to get bit + MOVD R9, (R0) // Store as int64 + + ADD $8, R0, R0 // dst++ + ADD $1, R6, R6 // bitOffset++ + ADD $1, R7, R7 // index++ + +neon1_scalar_test_int64: + CMP R1, R7 + BLT neon1_scalar_loop_int64 + +neon1_done_int64: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_amd64.s new file mode 100644 index 00000000000..b0204e0e2e8 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_amd64.s @@ -0,0 +1,124 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// unpackInt64x2bitAVX2 implements optimized unpacking for bitWidth=2 using AVX2 +// Each byte contains 4 x 2-bit values, processes 8 values at a time +// +// func unpackInt64x2bitAVX2(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x2bitAVX2(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX // AX = dst pointer + MOVQ dst_len+8(FP), DX // DX = dst length + MOVQ src_base+24(FP), BX // BX = src pointer + MOVQ bitWidth+48(FP), CX // CX = bitWidth (should be 2) + + // Check if we have at least 8 values to process + CMPQ DX, $8 + JB avx2_2bit_tail + + // Round down to multiple of 8 for AVX2 processing + MOVQ DX, DI + SHRQ $3, DI // DI = len / 8 + SHLQ $3, DI // DI = aligned length (multiple of 8) + XORQ SI, SI // SI = index + +avx2_2bit_loop: + // Load 2 bytes (contains 8 x 2-bit values) + MOVWQZX (BX), R8 + + // Extract each 2-bit value and store as int64 + // Value 0 (bits 0-1) + MOVQ R8, R9 + ANDQ $3, R9 + MOVQ R9, (AX) + + // Value 1 (bits 2-3) + MOVQ R8, R9 + SHRQ $2, R9 + ANDQ $3, R9 + MOVQ R9, 8(AX) + + // Value 2 (bits 4-5) + MOVQ R8, R9 + SHRQ $4, R9 + ANDQ $3, R9 + MOVQ R9, 16(AX) + + // Value 3 (bits 6-7) + MOVQ R8, R9 + SHRQ $6, R9 + ANDQ $3, R9 + MOVQ R9, 24(AX) + + // Value 4 (bits 8-9) + MOVQ R8, R9 + SHRQ $8, R9 + ANDQ $3, R9 + MOVQ R9, 32(AX) + + // Value 5 (bits 10-11) + MOVQ R8, R9 + SHRQ $10, R9 + ANDQ $3, R9 + MOVQ R9, 40(AX) + + // Value 6 (bits 12-13) + MOVQ R8, R9 + SHRQ $12, R9 + ANDQ $3, R9 + MOVQ R9, 48(AX) + + // Value 7 (bits 14-15) + MOVQ R8, R9 + SHRQ $14, R9 + ANDQ $3, R9 + MOVQ R9, 56(AX) + + // Advance pointers + ADDQ $2, BX // src += 2 bytes + ADDQ $64, AX // dst += 8 int64 (64 bytes) + ADDQ $8, SI // index += 8 + + CMPQ SI, DI + JNE avx2_2bit_loop + +avx2_2bit_tail: + // Handle remaining elements with scalar fallback + CMPQ SI, DX + JE avx2_2bit_done + + // Compute remaining elements + SUBQ SI, DX + + // Calculate bit offset for remaining elements + // Each processed element consumes 2 bits, so bitOffset = SI * 2 + MOVQ SI, R9 + SHLQ $1, R9 // bitOffset = SI * 2 + XORQ R10, R10 // index = 0 (within remaining elements) + JMP avx2_2bit_scalar_test + +avx2_2bit_scalar_loop: + MOVQ R9, R11 + SHRQ $3, R11 // byte_index = bitOffset / 8 + MOVQ src_base+24(FP), R14 // Get original src pointer + MOVBQZX (R14)(R11*1), R12 // Load byte from original src + + MOVQ R9, R13 + ANDQ $7, R13 // bit_offset = bitOffset % 8 + + MOVQ R13, CX // Move bit offset to CX for shift + SHRQ CL, R12 // Shift right by bit offset + ANDQ $3, R12 // Mask to get 2 bits + MOVQ R12, (AX) // Store as int64 + + ADDQ $8, AX // dst++ + ADDQ $2, R9 // bitOffset += 2 + ADDQ $1, R10 // index++ + +avx2_2bit_scalar_test: + CMPQ R10, DX + JNE avx2_2bit_scalar_loop + +avx2_2bit_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_arm64.s new file mode 100644 index 00000000000..3b7ac771354 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_2bit_arm64.s @@ -0,0 +1,161 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt64x2bitNEON implements NEON unpacking for bitWidth=2 using direct bit manipulation +// Each byte contains 4 values of 2 bits each: [bits 6-7][bits 4-5][bits 2-3][bits 0-1] +// +// func unpackInt64x2bitNEON(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x2bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 2) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 32 values to process + CMP $32, R1 + BLT neon2_tail_int64 + + // Round down to multiple of 32 for NEON processing + MOVD R1, R4 + LSR $5, R4, R4 // R4 = len / 32 + LSL $5, R4, R4 // R4 = aligned length (multiple of 32) + + // Load mask for 2 bits (0x03030303...) + MOVD $0x0303030303030303, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for 2-bit values + +neon2_loop_int64: + // Load 8 bytes (contains 32 x 2-bit values) + VLD1 (R2), [V0.B8] + + // Extract bits [1:0] from each byte (values at positions 0,4,8,12,...) + VAND V31.B16, V0.B16, V1.B16 + + // Extract bits [3:2] from each byte (values at positions 1,5,9,13,...) + VUSHR $2, V0.B16, V2.B16 + VAND V31.B16, V2.B16, V2.B16 + + // Extract bits [5:4] from each byte (values at positions 2,6,10,14,...) + VUSHR $4, V0.B16, V3.B16 + VAND V31.B16, V3.B16, V3.B16 + + // Extract bits [7:6] from each byte (values at positions 3,7,11,15,...) + VUSHR $6, V0.B16, V4.B16 + VAND V31.B16, V4.B16, V4.B16 + + // Use multiple ZIP stages to interleave + VZIP1 V2.B8, V1.B8, V5.B8 // V5 = [V1[0],V2[0],V1[1],V2[1],V1[2],V2[2],V1[3],V2[3]] + VZIP1 V4.B8, V3.B8, V6.B8 // V6 = [V3[0],V4[0],V3[1],V4[1],V3[2],V4[2],V3[3],V4[3]] + VZIP2 V2.B8, V1.B8, V7.B8 // V7 = [V1[4],V2[4],V1[5],V2[5],V1[6],V2[6],V1[7],V2[7]] + VZIP2 V4.B8, V3.B8, V8.B8 // V8 = [V3[4],V4[4],V3[5],V4[5],V3[6],V4[6],V3[7],V4[7]] + + // Now ZIP the pairs + VZIP1 V6.H4, V5.H4, V13.H4 // V13 = [V1[0],V2[0],V3[0],V4[0],V1[1],V2[1],V3[1],V4[1]] + VZIP2 V6.H4, V5.H4, V14.H4 // V14 = [V1[2],V2[2],V3[2],V4[2],V1[3],V2[3],V3[3],V4[3]] + VZIP1 V8.H4, V7.H4, V15.H4 // V15 = [V1[4],V2[4],V3[4],V4[4],V1[5],V2[5],V3[5],V4[5]] + VZIP2 V8.H4, V7.H4, V16.H4 // V16 = [V1[6],V2[6],V3[6],V4[6],V1[7],V2[7],V3[7],V4[7]] + + // Widen first 8 values (V13) to int64 + USHLL_8H_8B(17, 13) // V17.8H ← V13.8B + USHLL_4S_4H(18, 17) // V18.4S ← V17.4H + USHLL2_4S_8H(19, 17) // V19.4S ← V17.8H + USHLL_2D_2S(20, 18) // V20.2D ← V18.2S (values 0-1) + USHLL2_2D_4S(21, 18) // V21.2D ← V18.4S (values 2-3) + USHLL_2D_2S(22, 19) // V22.2D ← V19.2S (values 4-5) + USHLL2_2D_4S(23, 19) // V23.2D ← V19.4S (values 6-7) + + // Widen second 8 values (V14) to int64 + USHLL_8H_8B(24, 14) // V24.8H ← V14.8B + USHLL_4S_4H(25, 24) // V25.4S ← V24.4H + USHLL2_4S_8H(26, 24) // V26.4S ← V24.8H + USHLL_2D_2S(27, 25) // V27.2D ← V25.2S (values 8-9) + USHLL2_2D_4S(28, 25) // V28.2D ← V25.4S (values 10-11) + USHLL_2D_2S(29, 26) // V29.2D ← V26.2S (values 12-13) + USHLL2_2D_4S(30, 26) // V30.2D ← V26.4S (values 14-15) + + // Store first 16 int64 values (128 bytes) + VST1 [V20.D2, V21.D2], (R0) + ADD $32, R0, R0 + VST1 [V22.D2, V23.D2], (R0) + ADD $32, R0, R0 + VST1 [V27.D2, V28.D2], (R0) + ADD $32, R0, R0 + VST1 [V29.D2, V30.D2], (R0) + ADD $32, R0, R0 + + // Widen third 8 values (V15) to int64 + USHLL_8H_8B(17, 15) // V17.8H ← V15.8B (reuse V17) + USHLL_4S_4H(18, 17) // V18.4S ← V17.4H + USHLL2_4S_8H(19, 17) // V19.4S ← V17.8H + USHLL_2D_2S(20, 18) // V20.2D ← V18.2S (values 16-17) + USHLL2_2D_4S(21, 18) // V21.2D ← V18.4S (values 18-19) + USHLL_2D_2S(22, 19) // V22.2D ← V19.2S (values 20-21) + USHLL2_2D_4S(23, 19) // V23.2D ← V19.4S (values 22-23) + + // Widen fourth 8 values (V16) to int64 + USHLL_8H_8B(24, 16) // V24.8H ← V16.8B (reuse V24) + USHLL_4S_4H(25, 24) // V25.4S ← V24.4H + USHLL2_4S_8H(26, 24) // V26.4S ← V24.8H + USHLL_2D_2S(27, 25) // V27.2D ← V25.2S (values 24-25) + USHLL2_2D_4S(28, 25) // V28.2D ← V25.4S (values 26-27) + USHLL_2D_2S(29, 26) // V29.2D ← V26.2S (values 28-29) + USHLL2_2D_4S(30, 26) // V30.2D ← V26.4S (values 30-31) + + // Store second 16 int64 values (128 bytes) + VST1 [V20.D2, V21.D2], (R0) + ADD $32, R0, R0 + VST1 [V22.D2, V23.D2], (R0) + ADD $32, R0, R0 + VST1 [V27.D2, V28.D2], (R0) + ADD $32, R0, R0 + VST1 [V29.D2, V30.D2], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $32, R5, R5 // index += 32 + + CMP R4, R5 + BLT neon2_loop_int64 + +neon2_tail_int64: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon2_done_int64 + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $3, R4 // bitMask = 3 (0b11 for 2 bits) + MOVD $0, R6 // bitOffset = 0 + MOVD $0, R7 // index = 0 + B neon2_scalar_test_int64 + +neon2_scalar_loop_int64: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $3, R9, R9 // Mask to get 2 bits + MOVD R9, (R0) // Store as int64 + + ADD $8, R0, R0 // dst++ + ADD $2, R6, R6 // bitOffset += 2 + ADD $1, R7, R7 // index++ + +neon2_scalar_test_int64: + CMP R1, R7 + BLT neon2_scalar_loop_int64 + +neon2_done_int64: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_amd64.s new file mode 100644 index 00000000000..ab6c81cd18a --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_amd64.s @@ -0,0 +1,124 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// unpackInt64x4bitAVX2 implements optimized unpacking for bitWidth=4 using AVX2 +// Each byte contains 2 x 4-bit values, processes 8 values at a time +// +// func unpackInt64x4bitAVX2(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x4bitAVX2(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX // AX = dst pointer + MOVQ dst_len+8(FP), DX // DX = dst length + MOVQ src_base+24(FP), BX // BX = src pointer + MOVQ bitWidth+48(FP), CX // CX = bitWidth (should be 4) + + // Check if we have at least 8 values to process + CMPQ DX, $8 + JB avx2_4bit_tail + + // Round down to multiple of 8 for AVX2 processing + MOVQ DX, DI + SHRQ $3, DI // DI = len / 8 + SHLQ $3, DI // DI = aligned length (multiple of 8) + XORQ SI, SI // SI = index + +avx2_4bit_loop: + // Load 4 bytes (contains 8 x 4-bit values) + MOVLQZX (BX), R8 + + // Extract each 4-bit value and store as int64 + // Value 0 (bits 0-3) + MOVQ R8, R9 + ANDQ $0xF, R9 + MOVQ R9, (AX) + + // Value 1 (bits 4-7) + MOVQ R8, R9 + SHRQ $4, R9 + ANDQ $0xF, R9 + MOVQ R9, 8(AX) + + // Value 2 (bits 8-11) + MOVQ R8, R9 + SHRQ $8, R9 + ANDQ $0xF, R9 + MOVQ R9, 16(AX) + + // Value 3 (bits 12-15) + MOVQ R8, R9 + SHRQ $12, R9 + ANDQ $0xF, R9 + MOVQ R9, 24(AX) + + // Value 4 (bits 16-19) + MOVQ R8, R9 + SHRQ $16, R9 + ANDQ $0xF, R9 + MOVQ R9, 32(AX) + + // Value 5 (bits 20-23) + MOVQ R8, R9 + SHRQ $20, R9 + ANDQ $0xF, R9 + MOVQ R9, 40(AX) + + // Value 6 (bits 24-27) + MOVQ R8, R9 + SHRQ $24, R9 + ANDQ $0xF, R9 + MOVQ R9, 48(AX) + + // Value 7 (bits 28-31) + MOVQ R8, R9 + SHRQ $28, R9 + ANDQ $0xF, R9 + MOVQ R9, 56(AX) + + // Advance pointers + ADDQ $4, BX // src += 4 bytes + ADDQ $64, AX // dst += 8 int64 (64 bytes) + ADDQ $8, SI // index += 8 + + CMPQ SI, DI + JNE avx2_4bit_loop + +avx2_4bit_tail: + // Handle remaining elements with scalar fallback + CMPQ SI, DX + JE avx2_4bit_done + + // Compute remaining elements + SUBQ SI, DX + + // Calculate bit offset for remaining elements + // Each processed element consumes 4 bits, so bitOffset = SI * 4 + MOVQ SI, R9 + SHLQ $2, R9 // bitOffset = SI * 4 + XORQ R10, R10 // index = 0 (within remaining elements) + JMP avx2_4bit_scalar_test + +avx2_4bit_scalar_loop: + MOVQ R9, R11 + SHRQ $3, R11 // byte_index = bitOffset / 8 + MOVQ src_base+24(FP), R14 // Get original src pointer + MOVBQZX (R14)(R11*1), R12 // Load byte from original src + + MOVQ R9, R13 + ANDQ $7, R13 // bit_offset = bitOffset % 8 + + MOVQ R13, CX // Move bit offset to CX for shift + SHRQ CL, R12 // Shift right by bit offset + ANDQ $0xF, R12 // Mask to get 4 bits + MOVQ R12, (AX) // Store as int64 + + ADDQ $8, AX // dst++ + ADDQ $4, R9 // bitOffset += 4 + ADDQ $1, R10 // index++ + +avx2_4bit_scalar_test: + CMPQ R10, DX + JNE avx2_4bit_scalar_loop + +avx2_4bit_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_arm64.s new file mode 100644 index 00000000000..671b6ea0bea --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_4bit_arm64.s @@ -0,0 +1,118 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt64x4bitNEON implements NEON unpacking for bitWidth=4 using direct bit manipulation +// Each byte contains 2 values of 4 bits each +// +// func unpackInt64x4bitNEON(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x4bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 4) + + MOVD $0, R5 // R5 = index (initialize early for tail path) + + // Check if we have at least 16 values to process + CMP $16, R1 + BLT neon4_tail + + // Round down to multiple of 16 for NEON processing + MOVD R1, R4 + LSR $4, R4, R4 // R4 = len / 16 + LSL $4, R4, R4 // R4 = aligned length (multiple of 16) + + // Load mask for 4 bits (0x0F0F0F0F...) + MOVD $0x0F0F0F0F0F0F0F0F, R6 + VMOV R6, V31.D[0] + VMOV R6, V31.D[1] // V31 = mask for low nibbles + +neon4_loop: + // Load 8 bytes (contains 16 x 4-bit values) + VLD1 (R2), [V0.B8] + + // Extract low nibbles (values at even nibble positions) + VAND V31.B16, V0.B16, V1.B16 // V1 = low nibbles + + // Extract high nibbles (values at odd nibble positions) + VUSHR $4, V0.B16, V2.B16 // V2 = high nibbles (shifted down) + VAND V31.B16, V2.B16, V2.B16 // V2 = high nibbles (masked) + + // Now V1 has values [0,2,4,6,8,10,12,14] and V2 has [1,3,5,7,9,11,13,15] + // We need to interleave them: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + VZIP1 V2.B8, V1.B8, V3.B8 // V3 = interleaved low half (values 0-7) + VZIP2 V2.B8, V1.B8, V4.B8 // V4 = interleaved high half (values 8-15) + + // Widen first 8 values (V3) to int64 + USHLL_8H_8B(5, 3) // V5.8H ← V3.8B + USHLL_4S_4H(6, 5) // V6.4S ← V5.4H + USHLL2_4S_8H(7, 5) // V7.4S ← V5.8H + USHLL_2D_2S(8, 6) // V8.2D ← V6.2S (values 0-1) + USHLL2_2D_4S(9, 6) // V9.2D ← V6.4S (values 2-3) + USHLL_2D_2S(10, 7) // V10.2D ← V7.2S (values 4-5) + USHLL2_2D_4S(11, 7) // V11.2D ← V7.4S (values 6-7) + + // Widen second 8 values (V4) to int64 + USHLL_8H_8B(12, 4) // V12.8H ← V4.8B + USHLL_4S_4H(13, 12) // V13.4S ← V12.4H + USHLL2_4S_8H(14, 12) // V14.4S ← V12.8H + USHLL_2D_2S(15, 13) // V15.2D ← V13.2S (values 8-9) + USHLL2_2D_4S(16, 13) // V16.2D ← V13.4S (values 10-11) + USHLL_2D_2S(17, 14) // V17.2D ← V14.2S (values 12-13) + USHLL2_2D_4S(18, 14) // V18.2D ← V14.4S (values 14-15) + + // Store 16 int64 values (128 bytes) + VST1 [V8.D2, V9.D2], (R0) + ADD $32, R0, R0 + VST1 [V10.D2, V11.D2], (R0) + ADD $32, R0, R0 + VST1 [V15.D2, V16.D2], (R0) + ADD $32, R0, R0 + VST1 [V17.D2, V18.D2], (R0) + ADD $32, R0, R0 + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $16, R5, R5 // index += 16 + + CMP R4, R5 + BLT neon4_loop + +neon4_tail: + // Handle remaining elements with scalar fallback + CMP R1, R5 + BEQ neon4_done + + // Compute remaining elements + SUB R5, R1, R1 + + // Fall back to scalar unpack for tail + MOVD $0x0F, R4 // bitMask = 0x0F (4 bits) + MOVD $0, R6 // bitOffset = 0 (start from current R2 position) + MOVD $0, R7 // loop counter = 0 + B neon4_scalar_test + +neon4_scalar_loop: + MOVD R6, R8 + LSR $3, R8, R8 // byte_index = bitOffset / 8 + MOVBU (R2)(R8), R9 // Load byte from current position + + MOVD R6, R10 + AND $7, R10, R10 // bit_offset = bitOffset % 8 + + LSR R10, R9, R9 // Shift right by bit offset + AND $0x0F, R9, R9 // Mask to get 4 bits + MOVD R9, (R0) // Store as int64 + + ADD $8, R0, R0 // dst++ + ADD $4, R6, R6 // bitOffset += 4 + ADD $1, R7, R7 // counter++ + +neon4_scalar_test: + CMP R1, R7 + BLT neon4_scalar_loop + +neon4_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_amd64.s new file mode 100644 index 00000000000..090447f6f8a --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_amd64.s @@ -0,0 +1,105 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// unpackInt64x8bitAVX2 implements optimized unpacking for bitWidth=8 using AVX2 +// Each byte is already a complete value, processes 8 values at a time +// +// func unpackInt64x8bitAVX2(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x8bitAVX2(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX // AX = dst pointer + MOVQ dst_len+8(FP), DX // DX = dst length + MOVQ src_base+24(FP), BX // BX = src pointer + MOVQ bitWidth+48(FP), CX // CX = bitWidth (should be 8) + + // Check if we have at least 8 values to process + CMPQ DX, $8 + JB avx2_8bit_tail + + // Round down to multiple of 8 for AVX2 processing + MOVQ DX, DI + SHRQ $3, DI // DI = len / 8 + SHLQ $3, DI // DI = aligned length (multiple of 8) + XORQ SI, SI // SI = index + +avx2_8bit_loop: + // Load 8 bytes (8 x 8-bit values) + MOVQ (BX), R8 + + // Extract each byte and store as int64 + // Value 0 (byte 0) + MOVQ R8, R9 + ANDQ $0xFF, R9 + MOVQ R9, (AX) + + // Value 1 (byte 1) + MOVQ R8, R9 + SHRQ $8, R9 + ANDQ $0xFF, R9 + MOVQ R9, 8(AX) + + // Value 2 (byte 2) + MOVQ R8, R9 + SHRQ $16, R9 + ANDQ $0xFF, R9 + MOVQ R9, 16(AX) + + // Value 3 (byte 3) + MOVQ R8, R9 + SHRQ $24, R9 + ANDQ $0xFF, R9 + MOVQ R9, 24(AX) + + // Value 4 (byte 4) + MOVQ R8, R9 + SHRQ $32, R9 + ANDQ $0xFF, R9 + MOVQ R9, 32(AX) + + // Value 5 (byte 5) + MOVQ R8, R9 + SHRQ $40, R9 + ANDQ $0xFF, R9 + MOVQ R9, 40(AX) + + // Value 6 (byte 6) + MOVQ R8, R9 + SHRQ $48, R9 + ANDQ $0xFF, R9 + MOVQ R9, 48(AX) + + // Value 7 (byte 7) + MOVQ R8, R9 + SHRQ $56, R9 + MOVQ R9, 56(AX) + + // Advance pointers + ADDQ $8, BX // src += 8 bytes + ADDQ $64, AX // dst += 8 int64 (64 bytes) + ADDQ $8, SI // index += 8 + + CMPQ SI, DI + JNE avx2_8bit_loop + +avx2_8bit_tail: + // Handle remaining elements with scalar fallback + CMPQ SI, DX + JE avx2_8bit_done + + // Compute remaining elements + SUBQ SI, DX + +avx2_8bit_tail_loop: + MOVBQZX (BX), R8 // Load byte + MOVQ R8, (AX) // Store as int64 (zero-extended) + + ADDQ $1, BX // src++ + ADDQ $8, AX // dst++ + DECQ DX // remaining-- + + CMPQ DX, $0 + JNE avx2_8bit_tail_loop + +avx2_8bit_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_arm64.s new file mode 100644 index 00000000000..2e8a4e479ff --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_8bit_arm64.s @@ -0,0 +1,71 @@ +//go:build !purego + +#include "textflag.h" +#include "unpack_neon_macros_arm64.h" + +// unpackInt64x8bitNEON implements NEON unpacking for bitWidth=8 +// Each byte is already a complete value - just widen to int64 +// Processes 8 values at a time using NEON +// +// func unpackInt64x8bitNEON(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x8bitNEON(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth (should be 8) + + MOVD $0, R5 // R5 = index + + // Check if we have at least 8 values to process + CMP $8, R1 + BLT tbl8_tail + + // Round down to multiple of 8 for NEON processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + +tbl8_loop: + // Load 8 bytes (8 x 8-bit values) + VLD1 (R2), [V0.B8] + + // Widen to int64: byte → short → int → long + USHLL_8H_8B(1, 0) // V1.8H ← V0.8B (8x8-bit → 8x16-bit) + USHLL_4S_4H(2, 1) // V2.4S ← V1.4H (lower 4x16-bit → 4x32-bit) + USHLL2_4S_8H(3, 1) // V3.4S ← V1.8H (upper 4x16-bit → 4x32-bit) + USHLL_2D_2S(4, 2) // V4.2D ← V2.2S (lower 2x32-bit → 2x64-bit) + USHLL2_2D_4S(5, 2) // V5.2D ← V2.4S (upper 2x32-bit → 2x64-bit) + USHLL_2D_2S(6, 3) // V6.2D ← V3.2S (lower 2x32-bit → 2x64-bit) + USHLL2_2D_4S(7, 3) // V7.2D ← V3.4S (upper 2x32-bit → 2x64-bit) + + // Store 8 int64 values (64 bytes) + VST1 [V4.D2, V5.D2], (R0) + ADD $32, R0, R11 // Temporary pointer for second store + VST1 [V6.D2, V7.D2], (R11) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes + ADD $64, R0, R0 // dst += 8 int64 (64 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT tbl8_loop + +tbl8_tail: + // Handle remaining elements (0-7) one by one + CMP R1, R5 + BGE tbl8_done + +tbl8_tail_loop: + MOVBU (R2), R6 // Load byte + MOVD R6, (R0) // Store as int64 (zero-extended) + + ADD $1, R2, R2 // src++ + ADD $8, R0, R0 // dst++ + ADD $1, R5, R5 // index++ + + CMP R1, R5 + BLT tbl8_tail_loop + +tbl8_done: + RET diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.go b/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.go new file mode 100644 index 00000000000..d244f2b4204 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.go @@ -0,0 +1,38 @@ +//go:build !purego + +package bitpack + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "golang.org/x/sys/cpu" +) + +//go:noescape +func unpackInt64Default(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x1to32bitsAVX2(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x1bitAVX2(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x2bitAVX2(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x4bitAVX2(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x8bitAVX2(dst []int64, src []byte, bitWidth uint) + +func unpackInt64(dst []int64, src []byte, bitWidth uint) { + hasAVX2 := cpu.X86.HasAVX2 + switch { + case hasAVX2 && bitWidth <= 32: + unpackInt64x1to32bitsAVX2(dst, src, bitWidth) + case bitWidth == 64: + copy(dst, unsafecast.Slice[int64](src)) + default: + unpackInt64Default(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.s new file mode 100644 index 00000000000..6c07ca5c9a4 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_amd64.s @@ -0,0 +1,824 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func unpackInt64Default(dst []int64, src []uint32, bitWidth uint) +TEXT ·unpackInt64Default(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + // Initialize + XORQ DI, DI // bitOffset + XORQ SI, SI // index + + // Check if length >= 4 for unrolled loop + CMPQ DX, $4 + JB scalar_loop_start + + // Calculate bitMask = (1 << bitWidth) - 1 + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + + // Calculate unrolled iterations: (length / 4) * 4 + MOVQ DX, R15 + SHRQ $2, R15 // R15 = length / 4 + JZ scalar_loop_start + SHLQ $2, R15 // R15 = (length / 4) * 4 + +unrolled_loop: + // Process 4 elements with 64-bit loads + + // === Element 0 === + MOVQ DI, R10 + SHRQ $6, R10 // i = bitOffset / 64 + MOVQ DI, R9 + ANDQ $63, R9 // j = bitOffset % 64 + MOVQ (BX)(R10*8), R11 // load 64-bit word + MOVQ R8, R12 + MOVQ R9, CX + SHLQ CL, R12 + ANDQ R12, R11 + SHRQ CL, R11 + + // Check if spans to next word + MOVQ R9, R13 + ADDQ bitWidth+48(FP), R13 + CMPQ R13, $64 + JBE store0 + MOVQ $64, CX + SUBQ R9, CX + MOVQ 8(BX)(R10*8), R14 + MOVQ R8, R12 + SHRQ CL, R12 + ANDQ R12, R14 + SHLQ CL, R14 + ORQ R14, R11 + +store0: + ADDQ bitWidth+48(FP), DI + MOVQ R11, (AX)(SI*8) + INCQ SI + + // === Element 1 === + MOVQ DI, R10 + SHRQ $6, R10 + MOVQ DI, R9 + ANDQ $63, R9 + MOVQ (BX)(R10*8), R11 + MOVQ R8, R12 + MOVQ R9, CX + SHLQ CL, R12 + ANDQ R12, R11 + SHRQ CL, R11 + + MOVQ R9, R13 + ADDQ bitWidth+48(FP), R13 + CMPQ R13, $64 + JBE store1 + MOVQ $64, CX + SUBQ R9, CX + MOVQ 8(BX)(R10*8), R14 + MOVQ R8, R12 + SHRQ CL, R12 + ANDQ R12, R14 + SHLQ CL, R14 + ORQ R14, R11 + +store1: + ADDQ bitWidth+48(FP), DI + MOVQ R11, (AX)(SI*8) + INCQ SI + + // === Element 2 === + MOVQ DI, R10 + SHRQ $6, R10 + MOVQ DI, R9 + ANDQ $63, R9 + MOVQ (BX)(R10*8), R11 + MOVQ R8, R12 + MOVQ R9, CX + SHLQ CL, R12 + ANDQ R12, R11 + SHRQ CL, R11 + + MOVQ R9, R13 + ADDQ bitWidth+48(FP), R13 + CMPQ R13, $64 + JBE store2 + MOVQ $64, CX + SUBQ R9, CX + MOVQ 8(BX)(R10*8), R14 + MOVQ R8, R12 + SHRQ CL, R12 + ANDQ R12, R14 + SHLQ CL, R14 + ORQ R14, R11 + +store2: + ADDQ bitWidth+48(FP), DI + MOVQ R11, (AX)(SI*8) + INCQ SI + + // === Element 3 === + MOVQ DI, R10 + SHRQ $6, R10 + MOVQ DI, R9 + ANDQ $63, R9 + MOVQ (BX)(R10*8), R11 + MOVQ R8, R12 + MOVQ R9, CX + SHLQ CL, R12 + ANDQ R12, R11 + SHRQ CL, R11 + + MOVQ R9, R13 + ADDQ bitWidth+48(FP), R13 + CMPQ R13, $64 + JBE store3 + MOVQ $64, CX + SUBQ R9, CX + MOVQ 8(BX)(R10*8), R14 + MOVQ R8, R12 + SHRQ CL, R12 + ANDQ R12, R14 + SHLQ CL, R14 + ORQ R14, R11 + +store3: + ADDQ bitWidth+48(FP), DI + MOVQ R11, (AX)(SI*8) + INCQ SI + + CMPQ SI, R15 + JB unrolled_loop + + // Check if done + CMPQ SI, DX + JE done + +scalar_loop_start: + // Fallback scalar loop for remaining elements + // Check if there are any elements to process + CMPQ SI, DX + JE done + + MOVQ bitWidth+48(FP), CX + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + +scalar_loop: + // i = bitOffset / 64 + MOVQ DI, R10 + SHRQ $6, R10 + + // j = bitOffset % 64 + MOVQ DI, R9 + ANDQ $63, R9 + + // Load 64-bit word and extract + MOVQ (BX)(R10*8), R11 + MOVQ R8, R12 + MOVQ R9, CX + SHLQ CL, R12 + ANDQ R12, R11 + SHRQ CL, R11 + + // Check for span + MOVQ R9, R13 + ADDQ bitWidth+48(FP), R13 + CMPQ R13, $64 + JBE scalar_next + + MOVQ $64, CX + SUBQ R9, CX + MOVQ 8(BX)(R10*8), R14 + MOVQ R8, R12 + SHRQ CL, R12 + ANDQ R12, R14 + SHLQ CL, R14 + ORQ R14, R11 + +scalar_next: + MOVQ R11, (AX)(SI*8) + ADDQ bitWidth+48(FP), DI + INCQ SI + CMPQ SI, DX + JNE scalar_loop + JMP done + +zero_fill: + // Fill output with zeros for bitWidth==0 + XORQ SI, SI // Initialize index + XORQ R8, R8 // Zero value +zero_loop: + CMPQ SI, DX + JE done + MOVQ R8, (AX)(SI*8) + INCQ SI + JMP zero_loop + +done: + RET + +// This bit unpacking function was inspired from the 32 bit version, but +// adapted to account for the fact that eight 64 bit values span across +// two YMM registers, and across lanes of YMM registers. +// +// Because of the two lanes of YMM registers, we cannot use the VPSHUFB +// instruction to dispatch bytes of the input to the registers. Instead we use +// the VPERMD instruction, which has higher latency but supports dispatching +// bytes across register lanes. Measurable throughput gains remain despite the +// algorithm running on a few more CPU cycles per loop. +// +// The initialization phase of this algorithm generates masks for +// permutations and shifts used to decode the bit-packed values. +// +// The permutation masks are written to Y7 and Y8, and contain the results +// of this formula: +// +// temp[i] = (bitWidth * i) / 32 +// mask[i] = temp[i] | ((temp[i] + 1) << 32) +// +// Since VPERMQ only supports reading the permutation combination from an +// immediate value, we use VPERMD and generate permutation for pairs of two +// consecutive 32 bit words, which is why we have the upper part of each 64 +// bit word set with (x+1)<<32. +// +// The masks for right shifts are written to Y5 and Y6, and computed with +// this formula: +// +// shift[i] = (bitWidth * i) - (32 * ((bitWidth * i) / 32)) +// +// The amount to shift by is the number of values previously unpacked, offseted +// by the byte count of 32 bit words that we read from first bits from. +// +// Technically the masks could be precomputed and declared in global tables; +// however, declaring masks for all bit width is tedious and makes code +// maintenance more costly for no measurable benefits on production workloads. +// +// unpackInt64x1to32bitsAVX2 implements optimized unpacking for bit widths 1-32 +// Uses specialized kernels for common bit widths with batched processing +// +// func unpackInt64x1to32bitsAVX2(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x1to32bitsAVX2(SB), NOSPLIT, $56-56 + NO_LOCAL_POINTERS + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), DX + MOVQ src_base+24(FP), BX + MOVQ bitWidth+48(FP), CX + + // Check if we have enough values for specialized kernels + // 1-bit needs 8, 2-bit needs 8, 4-bit needs 8, 8-bit needs 8 + // 16-bit needs 4, 32-bit needs 2 + CMPQ CX, $1 + JE check_1bit + CMPQ CX, $2 + JE check_2bit + CMPQ CX, $3 + JE check_3bit + CMPQ CX, $4 + JE check_4bit + CMPQ CX, $5 + JE check_5bit + CMPQ CX, $6 + JE check_6bit + CMPQ CX, $7 + JE check_7bit + CMPQ CX, $8 + JE check_8bit + CMPQ CX, $16 + JE check_16bit + CMPQ CX, $32 + JE check_32bit + + // For other bit widths, check if we have at least 8 values + CMPQ DX, $8 + JB tail + JMP generic_avx2 + +check_1bit: + CMPQ DX, $8 + JB tail + JMP int64_1bit + +check_2bit: + CMPQ DX, $8 + JB tail + JMP int64_2bit + +check_3bit: + CMPQ DX, $8 + JB tail + JMP int64_3bit + +check_4bit: + CMPQ DX, $8 + JB tail + JMP int64_4bit + +check_5bit: + CMPQ DX, $8 + JB tail + JMP int64_5bit + +check_6bit: + CMPQ DX, $8 + JB tail + JMP int64_6bit + +check_7bit: + CMPQ DX, $8 + JB tail + JMP int64_7bit + +check_8bit: + CMPQ DX, $8 + JB tail + JMP int64_8bit + +check_16bit: + CMPQ DX, $4 + JB tail + JMP int64_16bit + +check_32bit: + CMPQ DX, $2 + JB tail + JMP int64_32bit + + +int64_1bit: + // Call specialized 1-bit kernel + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt64x1bitAVX2(SB) + RET + +int64_2bit: + // Call specialized 2-bit kernel + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt64x2bitAVX2(SB) + RET + +int64_3bit: + // BitWidth 3: 8 int64 values packed in 3 bytes + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + +int64_3bit_loop: + MOVLQZX (BX), R8 + + MOVQ R8, R9 + ANDQ $7, R9 + MOVQ R9, (AX) + + MOVQ R8, R9 + SHRQ $3, R9 + ANDQ $7, R9 + MOVQ R9, 8(AX) + + MOVQ R8, R9 + SHRQ $6, R9 + ANDQ $7, R9 + MOVQ R9, 16(AX) + + MOVQ R8, R9 + SHRQ $9, R9 + ANDQ $7, R9 + MOVQ R9, 24(AX) + + MOVQ R8, R9 + SHRQ $12, R9 + ANDQ $7, R9 + MOVQ R9, 32(AX) + + MOVQ R8, R9 + SHRQ $15, R9 + ANDQ $7, R9 + MOVQ R9, 40(AX) + + MOVQ R8, R9 + SHRQ $18, R9 + ANDQ $7, R9 + MOVQ R9, 48(AX) + + MOVQ R8, R9 + SHRQ $21, R9 + ANDQ $7, R9 + MOVQ R9, 56(AX) + + ADDQ $3, BX + ADDQ $64, AX + ADDQ $8, SI + + CMPQ SI, DI + JNE int64_3bit_loop + + CMPQ SI, DX + JE done + SUBQ SI, DX + JMP tail + +int64_4bit: + // Call specialized 4-bit kernel + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt64x4bitAVX2(SB) + RET + +int64_5bit: + // BitWidth 5: 8 int64 values packed in 5 bytes + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + +int64_5bit_loop: + MOVQ (BX), R8 + + MOVQ R8, R9 + ANDQ $31, R9 + MOVQ R9, (AX) + + MOVQ R8, R9 + SHRQ $5, R9 + ANDQ $31, R9 + MOVQ R9, 8(AX) + + MOVQ R8, R9 + SHRQ $10, R9 + ANDQ $31, R9 + MOVQ R9, 16(AX) + + MOVQ R8, R9 + SHRQ $15, R9 + ANDQ $31, R9 + MOVQ R9, 24(AX) + + MOVQ R8, R9 + SHRQ $20, R9 + ANDQ $31, R9 + MOVQ R9, 32(AX) + + MOVQ R8, R9 + SHRQ $25, R9 + ANDQ $31, R9 + MOVQ R9, 40(AX) + + MOVQ R8, R9 + SHRQ $30, R9 + ANDQ $31, R9 + MOVQ R9, 48(AX) + + MOVQ R8, R9 + SHRQ $35, R9 + ANDQ $31, R9 + MOVQ R9, 56(AX) + + ADDQ $5, BX + ADDQ $64, AX + ADDQ $8, SI + + CMPQ SI, DI + JNE int64_5bit_loop + + CMPQ SI, DX + JE done + SUBQ SI, DX + JMP tail + +int64_6bit: + // BitWidth 6: 8 int64 values packed in 6 bytes + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + +int64_6bit_loop: + MOVQ (BX), R8 + + MOVQ R8, R9 + ANDQ $63, R9 + MOVQ R9, (AX) + + MOVQ R8, R9 + SHRQ $6, R9 + ANDQ $63, R9 + MOVQ R9, 8(AX) + + MOVQ R8, R9 + SHRQ $12, R9 + ANDQ $63, R9 + MOVQ R9, 16(AX) + + MOVQ R8, R9 + SHRQ $18, R9 + ANDQ $63, R9 + MOVQ R9, 24(AX) + + MOVQ R8, R9 + SHRQ $24, R9 + ANDQ $63, R9 + MOVQ R9, 32(AX) + + MOVQ R8, R9 + SHRQ $30, R9 + ANDQ $63, R9 + MOVQ R9, 40(AX) + + MOVQ R8, R9 + SHRQ $36, R9 + ANDQ $63, R9 + MOVQ R9, 48(AX) + + MOVQ R8, R9 + SHRQ $42, R9 + ANDQ $63, R9 + MOVQ R9, 56(AX) + + ADDQ $6, BX + ADDQ $64, AX + ADDQ $8, SI + + CMPQ SI, DI + JNE int64_6bit_loop + + CMPQ SI, DX + JE done + SUBQ SI, DX + JMP tail + +int64_7bit: + // BitWidth 7: 8 int64 values packed in 7 bytes + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + +int64_7bit_loop: + MOVQ (BX), R8 + + MOVQ R8, R9 + ANDQ $127, R9 + MOVQ R9, (AX) + + MOVQ R8, R9 + SHRQ $7, R9 + ANDQ $127, R9 + MOVQ R9, 8(AX) + + MOVQ R8, R9 + SHRQ $14, R9 + ANDQ $127, R9 + MOVQ R9, 16(AX) + + MOVQ R8, R9 + SHRQ $21, R9 + ANDQ $127, R9 + MOVQ R9, 24(AX) + + MOVQ R8, R9 + SHRQ $28, R9 + ANDQ $127, R9 + MOVQ R9, 32(AX) + + MOVQ R8, R9 + SHRQ $35, R9 + ANDQ $127, R9 + MOVQ R9, 40(AX) + + MOVQ R8, R9 + SHRQ $42, R9 + ANDQ $127, R9 + MOVQ R9, 48(AX) + + MOVQ R8, R9 + SHRQ $49, R9 + ANDQ $127, R9 + MOVQ R9, 56(AX) + + ADDQ $7, BX + ADDQ $64, AX + ADDQ $8, SI + + CMPQ SI, DI + JNE int64_7bit_loop + + CMPQ SI, DX + JE done + SUBQ SI, DX + JMP tail + +int64_8bit: + // Call specialized 8-bit kernel + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt64x8bitAVX2(SB) + RET + +int64_16bit: + // BitWidth 16: 4 int64 values packed in 8 bytes + // Process 4 values at a time + + MOVQ DX, DI + SHRQ $2, DI // DI = len / 4 + SHLQ $2, DI // DI = aligned length (multiple of 4) + XORQ SI, SI // SI = index + + CMPQ DI, $0 + JE tail + +int64_16bit_loop: + // Load 8 bytes as 4 uint16 values + MOVQ (BX), R8 + + // Extract 16-bit values and write as int64 + // Value 0 (bits 0-15) + MOVQ R8, R9 + ANDQ $0xFFFF, R9 + MOVQ R9, (AX) + + // Value 1 (bits 16-31) + MOVQ R8, R9 + SHRQ $16, R9 + ANDQ $0xFFFF, R9 + MOVQ R9, 8(AX) + + // Value 2 (bits 32-47) + MOVQ R8, R9 + SHRQ $32, R9 + ANDQ $0xFFFF, R9 + MOVQ R9, 16(AX) + + // Value 3 (bits 48-63) + MOVQ R8, R9 + SHRQ $48, R9 + MOVQ R9, 24(AX) + + // Advance pointers + ADDQ $8, BX // src += 8 bytes (4 values) + ADDQ $32, AX // dst += 4 int64 (32 bytes) + ADDQ $4, SI // index += 4 + + CMPQ SI, DI + JNE int64_16bit_loop + + // Handle tail with scalar + CMPQ SI, DX + JE done + + LEAQ (AX), AX // AX already points to correct position + SUBQ SI, DX + JMP tail + +int64_32bit: + // BitWidth 32: Each value is exactly 4 bytes + // Process values one at a time for simplicity + + MOVQ DX, DI // DI = total length + XORQ SI, SI // SI = index + + CMPQ DI, $0 + JE done + +int64_32bit_loop: + // Load 4 bytes as one uint32 value + MOVLQZX (BX), R8 // Load 32-bit value, zero-extend to 64-bit + MOVQ R8, (AX) // Store as int64 + + // Advance pointers + ADDQ $4, BX // src += 4 bytes (1 value) + ADDQ $8, AX // dst += 1 int64 (8 bytes) + ADDQ $1, SI // index += 1 + + CMPQ SI, DI + JNE int64_32bit_loop + + // All values processed + JMP done + +generic_avx2: + // Optimized AVX2 with reduced setup overhead + CMPQ DX, $8 + JB tail + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + XORQ SI, SI + + // Compute bitMask + MOVQ $1, R8 + SHLQ CX, R8 + DECQ R8 + MOVQ R8, X0 + VPBROADCASTQ X0, Y0 + + // Use pre-computed table for bitWidths 9-31 + CMPQ CX, $9 + JB compute_masks + CMPQ CX, $31 + JA compute_masks + + // Calculate table offset: (bitWidth - 9) * 128 + MOVQ CX, R9 + SUBQ $9, R9 + SHLQ $7, R9 // multiply by 128 + + // Load pre-computed permutation and shift masks from table + LEAQ ·permuteInt64Table(SB), R10 + VMOVDQU (R10)(R9*1), Y7 + VMOVDQU 32(R10)(R9*1), Y8 + VMOVDQU 64(R10)(R9*1), Y5 + VMOVDQU 96(R10)(R9*1), Y6 + JMP generic_loop + +compute_masks: + // Fallback: compute masks dynamically (original code) + VPCMPEQQ Y1, Y1, Y1 + VPSRLQ $63, Y1, Y1 + MOVQ CX, X2 + VPBROADCASTQ X2, Y2 + VMOVDQU range0n7<>+0(SB), Y3 + VMOVDQU range0n7<>+32(SB), Y4 + VPMULLD Y2, Y3, Y5 + VPMULLD Y2, Y4, Y6 + VPSRLQ $5, Y5, Y7 + VPSRLQ $5, Y6, Y8 + VPSLLQ $5, Y7, Y9 + VPSLLQ $5, Y8, Y10 + VPADDQ Y1, Y7, Y11 + VPADDQ Y1, Y8, Y12 + VPSLLQ $32, Y11, Y11 + VPSLLQ $32, Y12, Y12 + VPOR Y11, Y7, Y7 + VPOR Y12, Y8, Y8 + VPSUBQ Y9, Y5, Y5 + VPSUBQ Y10, Y6, Y6 + +generic_loop: + VMOVDQU (BX), Y1 + VPERMD Y1, Y7, Y2 + VPERMD Y1, Y8, Y3 + VPSRLVQ Y5, Y2, Y2 + VPSRLVQ Y6, Y3, Y3 + VPAND Y0, Y2, Y2 + VPAND Y0, Y3, Y3 + VMOVDQU Y2, (AX)(SI*8) + VMOVDQU Y3, 32(AX)(SI*8) + ADDQ CX, BX + ADDQ $8, SI + CMPQ SI, DI + JNE generic_loop + VZEROUPPER + + CMPQ SI, DX + JE done + LEAQ (AX)(SI*8), AX + SUBQ SI, DX +tail: + MOVQ AX, dst_base-56(SP) + MOVQ DX, dst_len-48(SP) + MOVQ BX, src_base-32(SP) + MOVQ CX, bitWidth-8(SP) + CALL ·unpackInt64Default(SB) +done: + RET + +GLOBL range0n7<>(SB), RODATA|NOPTR, $64 +DATA range0n7<>+0(SB)/8, $0 +DATA range0n7<>+8(SB)/8, $1 +DATA range0n7<>+16(SB)/8, $2 +DATA range0n7<>+24(SB)/8, $3 +DATA range0n7<>+32(SB)/8, $4 +DATA range0n7<>+40(SB)/8, $5 +DATA range0n7<>+48(SB)/8, $6 +DATA range0n7<>+56(SB)/8, $7 diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.go b/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.go new file mode 100644 index 00000000000..02e6afc9943 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.go @@ -0,0 +1,50 @@ +//go:build !purego + +package bitpack + +import ( + "github.com/parquet-go/bitpack/unsafecast" +) + +//go:noescape +func unpackInt64Default(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x1to32bitsARM64(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x1bitNEON(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x2bitNEON(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x3bitNEON(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x4bitNEON(dst []int64, src []byte, bitWidth uint) + +//go:noescape +func unpackInt64x8bitNEON(dst []int64, src []byte, bitWidth uint) + +func unpackInt64(dst []int64, src []byte, bitWidth uint) { + // For ARM64, NEON (Advanced SIMD) is always available + // Use table-based NEON operations for small bit widths + switch { + case bitWidth == 1: + unpackInt64x1bitNEON(dst, src, bitWidth) + case bitWidth == 2: + unpackInt64x2bitNEON(dst, src, bitWidth) + case bitWidth == 4: + unpackInt64x4bitNEON(dst, src, bitWidth) + case bitWidth == 8: + unpackInt64x8bitNEON(dst, src, bitWidth) + // bitWidth == 3,5,6,7: Skip NEON table (don't divide evenly into 8) + case bitWidth <= 32: + unpackInt64x1to32bitsARM64(dst, src, bitWidth) + case bitWidth == 64: + copy(dst, unsafecast.Slice[int64](src)) + default: + unpackInt64Default(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.s b/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.s new file mode 100644 index 00000000000..2c638fbfe6d --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_arm64.s @@ -0,0 +1,943 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func unpackInt64Default(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64Default(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Initialize registers + MOVD $0, R5 // R5 = bitOffset + MOVD $0, R6 // R6 = index + + // Check if length >= 4 for unrolled loop + CMP $4, R1 + BLT scalar_loop_start + + // Calculate bitMask = (1 << bitWidth) - 1 + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 // R4 = bitMask + + // Calculate unrolled iterations: (length / 4) * 4 + LSR $2, R1, R16 // R16 = length / 4 + CBZ R16, scalar_loop_start + LSL $2, R16, R16 // R16 = (length / 4) * 4 + +unrolled_loop: + // Process 4 elements with instruction-level parallelism + // Use 64-bit loads for better performance + + // === Element 0 === + LSR $6, R5, R7 // i = bitOffset / 64 + AND $63, R5, R8 // j = bitOffset % 64 + MOVD (R2)(R7<<3), R9 // load 64-bit word from src[i] + LSL R8, R4, R10 + AND R10, R9, R9 + LSR R8, R9, R9 + + // Check if value spans into next word + ADD R8, R3, R11 + CMP $64, R11 + BLE store0 + MOVD $64, R12 + SUB R8, R12, R12 + ADD $1, R7, R13 + MOVD (R2)(R13<<3), R14 + LSR R12, R4, R15 + AND R15, R14, R14 + LSL R12, R14, R14 + ORR R14, R9, R9 + +store0: + ADD R3, R5, R5 // bitOffset += bitWidth + MOVD R9, (R0)(R6<<3) + ADD $1, R6, R6 + + // === Element 1 === + LSR $6, R5, R7 + AND $63, R5, R8 + MOVD (R2)(R7<<3), R9 + LSL R8, R4, R10 + AND R10, R9, R9 + LSR R8, R9, R9 + + ADD R8, R3, R11 + CMP $64, R11 + BLE store1 + MOVD $64, R12 + SUB R8, R12, R12 + ADD $1, R7, R13 + MOVD (R2)(R13<<3), R14 + LSR R12, R4, R15 + AND R15, R14, R14 + LSL R12, R14, R14 + ORR R14, R9, R9 + +store1: + ADD R3, R5, R5 + MOVD R9, (R0)(R6<<3) + ADD $1, R6, R6 + + // === Element 2 === + LSR $6, R5, R7 + AND $63, R5, R8 + MOVD (R2)(R7<<3), R9 + LSL R8, R4, R10 + AND R10, R9, R9 + LSR R8, R9, R9 + + ADD R8, R3, R11 + CMP $64, R11 + BLE store2 + MOVD $64, R12 + SUB R8, R12, R12 + ADD $1, R7, R13 + MOVD (R2)(R13<<3), R14 + LSR R12, R4, R15 + AND R15, R14, R14 + LSL R12, R14, R14 + ORR R14, R9, R9 + +store2: + ADD R3, R5, R5 + MOVD R9, (R0)(R6<<3) + ADD $1, R6, R6 + + // === Element 3 === + LSR $6, R5, R7 + AND $63, R5, R8 + MOVD (R2)(R7<<3), R9 + LSL R8, R4, R10 + AND R10, R9, R9 + LSR R8, R9, R9 + + ADD R8, R3, R11 + CMP $64, R11 + BLE store3 + MOVD $64, R12 + SUB R8, R12, R12 + ADD $1, R7, R13 + MOVD (R2)(R13<<3), R14 + LSR R12, R4, R15 + AND R15, R14, R14 + LSL R12, R14, R14 + ORR R14, R9, R9 + +store3: + ADD R3, R5, R5 + MOVD R9, (R0)(R6<<3) + ADD $1, R6, R6 + + CMP R16, R6 + BLT unrolled_loop + + // Check if done + CMP R1, R6 + BEQ done + +scalar_loop_start: + // Fallback scalar loop for remaining elements + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 // R4 = bitMask + +scalar_loop: + LSR $6, R5, R7 // i = bitOffset / 64 + AND $63, R5, R8 // j = bitOffset % 64 + MOVD (R2)(R7<<3), R9 // load 64-bit word + LSL R8, R4, R10 // bitMask << j + AND R10, R9, R9 + LSR R8, R9, R9 // extracted value + + // Check for span + ADD R8, R3, R11 + CMP $64, R11 + BLE scalar_next + MOVD $64, R12 + SUB R8, R12, R12 // k = 64 - j + ADD $1, R7, R13 + MOVD (R2)(R13<<3), R14 + LSR R12, R4, R15 + AND R15, R14, R14 + LSL R12, R14, R14 + ORR R14, R9, R9 + +scalar_next: + MOVD R9, (R0)(R6<<3) // dst[index] = d + ADD R3, R5, R5 // bitOffset += bitWidth + ADD $1, R6, R6 // index++ + +scalar_test: + CMP R1, R6 + BNE scalar_loop + +done: + RET + +// unpackInt64x1to32bitsARM64 implements optimized unpacking for bit widths 1-32 +// Uses optimized scalar ARM64 operations with batched processing +// +// func unpackInt64x1to32bitsARM64(dst []int64, src []byte, bitWidth uint) +TEXT ·unpackInt64x1to32bitsARM64(SB), NOSPLIT, $0-56 + MOVD dst_base+0(FP), R0 // R0 = dst pointer + MOVD dst_len+8(FP), R1 // R1 = dst length + MOVD src_base+24(FP), R2 // R2 = src pointer + MOVD bitWidth+48(FP), R3 // R3 = bitWidth + + // Check if we have at least 4 values to process + CMP $4, R1 + BLT scalar_fallback_int64 + + // Determine which path to use based on bitWidth + CMP $1, R3 + BEQ int64_1bit + CMP $2, R3 + BEQ int64_2bit + CMP $3, R3 + BEQ int64_3bit + CMP $4, R3 + BEQ int64_4bit + CMP $5, R3 + BEQ int64_5bit + CMP $6, R3 + BEQ int64_6bit + CMP $7, R3 + BEQ int64_7bit + CMP $8, R3 + BEQ int64_8bit + CMP $16, R3 + BEQ int64_16bit + CMP $32, R3 + BEQ int64_32bit + + // For other bit widths, fall back to scalar + B scalar_fallback_int64 + +int64_1bit: + // BitWidth 1: 8 int64 values packed in 1 byte + // Process 8 values at a time + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_1bit_loop: + // Load 1 byte (contains 8 values, 1 bit each) + MOVBU (R2), R6 + + // Extract 8 bits + AND $1, R6, R7 + MOVD R7, (R0) + LSR $1, R6, R7 + AND $1, R7, R7 + MOVD R7, 8(R0) + LSR $2, R6, R7 + AND $1, R7, R7 + MOVD R7, 16(R0) + LSR $3, R6, R7 + AND $1, R7, R7 + MOVD R7, 24(R0) + LSR $4, R6, R7 + AND $1, R7, R7 + MOVD R7, 32(R0) + LSR $5, R6, R7 + AND $1, R7, R7 + MOVD R7, 40(R0) + LSR $6, R6, R7 + AND $1, R7, R7 + MOVD R7, 48(R0) + LSR $7, R6, R7 + AND $1, R7, R7 + MOVD R7, 56(R0) + + ADD $1, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_1bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_2bit: + // BitWidth 2: 8 int64 values packed in 2 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_2bit_loop: + MOVHU (R2), R6 + + AND $3, R6, R7 + MOVD R7, (R0) + LSR $2, R6, R7 + AND $3, R7, R7 + MOVD R7, 8(R0) + LSR $4, R6, R7 + AND $3, R7, R7 + MOVD R7, 16(R0) + LSR $6, R6, R7 + AND $3, R7, R7 + MOVD R7, 24(R0) + LSR $8, R6, R7 + AND $3, R7, R7 + MOVD R7, 32(R0) + LSR $10, R6, R7 + AND $3, R7, R7 + MOVD R7, 40(R0) + LSR $12, R6, R7 + AND $3, R7, R7 + MOVD R7, 48(R0) + LSR $14, R6, R7 + AND $3, R7, R7 + MOVD R7, 56(R0) + + ADD $2, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_2bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_3bit: + // BitWidth 3: 8 int64 values packed in 3 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_3bit_loop: + MOVWU (R2), R6 + + AND $7, R6, R7 + MOVD R7, (R0) + LSR $3, R6, R7 + AND $7, R7, R7 + MOVD R7, 8(R0) + LSR $6, R6, R7 + AND $7, R7, R7 + MOVD R7, 16(R0) + LSR $9, R6, R7 + AND $7, R7, R7 + MOVD R7, 24(R0) + LSR $12, R6, R7 + AND $7, R7, R7 + MOVD R7, 32(R0) + LSR $15, R6, R7 + AND $7, R7, R7 + MOVD R7, 40(R0) + LSR $18, R6, R7 + AND $7, R7, R7 + MOVD R7, 48(R0) + LSR $21, R6, R7 + AND $7, R7, R7 + MOVD R7, 56(R0) + + ADD $3, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_3bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_4bit: + // BitWidth 4: 8 int64 values packed in 4 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_4bit_loop: + MOVWU (R2), R6 + + AND $15, R6, R7 + MOVD R7, (R0) + LSR $4, R6, R7 + AND $15, R7, R7 + MOVD R7, 8(R0) + LSR $8, R6, R7 + AND $15, R7, R7 + MOVD R7, 16(R0) + LSR $12, R6, R7 + AND $15, R7, R7 + MOVD R7, 24(R0) + LSR $16, R6, R7 + AND $15, R7, R7 + MOVD R7, 32(R0) + LSR $20, R6, R7 + AND $15, R7, R7 + MOVD R7, 40(R0) + LSR $24, R6, R7 + AND $15, R7, R7 + MOVD R7, 48(R0) + LSR $28, R6, R7 + AND $15, R7, R7 + MOVD R7, 56(R0) + + ADD $4, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_4bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_5bit: + // BitWidth 5: 8 int64 values packed in 5 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_5bit_loop: + MOVD (R2), R6 + + AND $31, R6, R7 + MOVD R7, (R0) + LSR $5, R6, R7 + AND $31, R7, R7 + MOVD R7, 8(R0) + LSR $10, R6, R7 + AND $31, R7, R7 + MOVD R7, 16(R0) + LSR $15, R6, R7 + AND $31, R7, R7 + MOVD R7, 24(R0) + LSR $20, R6, R7 + AND $31, R7, R7 + MOVD R7, 32(R0) + LSR $25, R6, R7 + AND $31, R7, R7 + MOVD R7, 40(R0) + LSR $30, R6, R7 + AND $31, R7, R7 + MOVD R7, 48(R0) + LSR $35, R6, R7 + AND $31, R7, R7 + MOVD R7, 56(R0) + + ADD $5, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_5bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_6bit: + // BitWidth 6: 8 int64 values packed in 6 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_6bit_loop: + MOVD (R2), R6 + + AND $63, R6, R7 + MOVD R7, (R0) + LSR $6, R6, R7 + AND $63, R7, R7 + MOVD R7, 8(R0) + LSR $12, R6, R7 + AND $63, R7, R7 + MOVD R7, 16(R0) + LSR $18, R6, R7 + AND $63, R7, R7 + MOVD R7, 24(R0) + LSR $24, R6, R7 + AND $63, R7, R7 + MOVD R7, 32(R0) + LSR $30, R6, R7 + AND $63, R7, R7 + MOVD R7, 40(R0) + LSR $36, R6, R7 + AND $63, R7, R7 + MOVD R7, 48(R0) + LSR $42, R6, R7 + AND $63, R7, R7 + MOVD R7, 56(R0) + + ADD $6, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_6bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_7bit: + // BitWidth 7: 8 int64 values packed in 7 bytes + MOVD R1, R4 + LSR $3, R4, R4 + LSL $3, R4, R4 + + MOVD $0, R5 + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_7bit_loop: + MOVD (R2), R6 + + AND $127, R6, R7 + MOVD R7, (R0) + LSR $7, R6, R7 + AND $127, R7, R7 + MOVD R7, 8(R0) + LSR $14, R6, R7 + AND $127, R7, R7 + MOVD R7, 16(R0) + LSR $21, R6, R7 + AND $127, R7, R7 + MOVD R7, 24(R0) + LSR $28, R6, R7 + AND $127, R7, R7 + MOVD R7, 32(R0) + LSR $35, R6, R7 + AND $127, R7, R7 + MOVD R7, 40(R0) + LSR $42, R6, R7 + AND $127, R7, R7 + MOVD R7, 48(R0) + LSR $49, R6, R7 + AND $127, R7, R7 + MOVD R7, 56(R0) + + ADD $7, R2, R2 + ADD $64, R0, R0 + ADD $8, R5, R5 + + CMP R4, R5 + BLT int64_7bit_loop + + CMP R1, R5 + BEQ int64_done + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_8bit: + // BitWidth 8: 8 int64 values packed in 8 bytes + // Process 8 values at a time + + // Round down to multiple of 8 for processing + MOVD R1, R4 + LSR $3, R4, R4 // R4 = len / 8 + LSL $3, R4, R4 // R4 = aligned length (multiple of 8) + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_8bit_loop: + // Load 8 bytes (contains 8 values, 1 byte each) + MOVD (R2), R6 + + // Extract 8 bytes and store as int64 + // Value 0: byte 0 + AND $0xFF, R6, R7 + MOVD R7, (R0) + + // Value 1: byte 1 + LSR $8, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 8(R0) + + // Value 2: byte 2 + LSR $16, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 16(R0) + + // Value 3: byte 3 + LSR $24, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 24(R0) + + // Value 4: byte 4 + LSR $32, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 32(R0) + + // Value 5: byte 5 + LSR $40, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 40(R0) + + // Value 6: byte 6 + LSR $48, R6, R7 + AND $0xFF, R7, R7 + MOVD R7, 48(R0) + + // Value 7: byte 7 + LSR $56, R6, R7 + MOVD R7, 56(R0) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes (8 values) + ADD $64, R0, R0 // dst += 8 int64 (64 bytes) + ADD $8, R5, R5 // index += 8 + + CMP R4, R5 + BLT int64_8bit_loop + + // Handle tail with scalar + CMP R1, R5 + BEQ int64_done + + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_16bit: + // BitWidth 16: 4 int64 values packed in 8 bytes + // Process 4 values at a time + + MOVD R1, R4 + LSR $2, R4, R4 // R4 = len / 4 + LSL $2, R4, R4 // R4 = aligned length (multiple of 4) + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_16bit_loop: + // Load 8 bytes as 4 uint16 values + MOVD (R2), R6 + + // Extract 16-bit values and write as int64 + // Value 0 (bits 0-15) + AND $0xFFFF, R6, R7 + MOVD R7, (R0) + + // Value 1 (bits 16-31) + LSR $16, R6, R7 + AND $0xFFFF, R7, R7 + MOVD R7, 8(R0) + + // Value 2 (bits 32-47) + LSR $32, R6, R7 + AND $0xFFFF, R7, R7 + MOVD R7, 16(R0) + + // Value 3 (bits 48-63) + LSR $48, R6, R7 + MOVD R7, 24(R0) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes (4 values) + ADD $32, R0, R0 // dst += 4 int64 (32 bytes) + ADD $4, R5, R5 // index += 4 + + CMP R4, R5 + BLT int64_16bit_loop + + // Handle tail with scalar + CMP R1, R5 + BEQ int64_done + + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_32bit: + // BitWidth 32: 2 int64 values packed in 8 bytes + // Process 2 values at a time + + MOVD R1, R4 + LSR $1, R4, R4 // R4 = len / 2 + LSL $1, R4, R4 // R4 = aligned length (multiple of 2) + + MOVD $0, R5 // R5 = index + CMP $0, R4 + BEQ scalar_fallback_int64 + +int64_32bit_loop: + // Load 8 bytes as 2 uint32 values + MOVD (R2), R6 + + // Extract 32-bit values and write as int64 + // Value 0 (bits 0-31) + AND $0xFFFFFFFF, R6, R7 + MOVD R7, (R0) + + // Value 1 (bits 32-63) + LSR $32, R6, R7 + MOVD R7, 8(R0) + + // Advance pointers + ADD $8, R2, R2 // src += 8 bytes (2 values) + ADD $16, R0, R0 // dst += 2 int64 (16 bytes) + ADD $2, R5, R5 // index += 2 + + CMP R4, R5 + BLT int64_32bit_loop + + // Handle tail with scalar + CMP R1, R5 + BEQ int64_done + + SUB R5, R1, R1 + B scalar_fallback_entry_int64 + +int64_done: + RET + +scalar_fallback_int64: + MOVD $0, R5 // Start from beginning + +scalar_fallback_entry_int64: + // R0 = current dst position (already advanced) + // R1 = remaining elements + // R2 = current src position (already advanced) + // R3 = bitWidth + // R5 = elements already processed + + // Fall back to optimized implementation for remaining elements + CMP $0, R1 + BEQ scalar_done_int64 // No remaining elements + + // Check if we can do 4-way unrolled loop + CMP $4, R1 + BLT scalar_single_int64 + + // Calculate bitMask + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 // R4 = bitMask + + // Calculate unrolled iterations: (remaining / 4) * 4 + LSR $2, R1, R16 + CBZ R16, scalar_single_int64 + LSL $2, R16, R16 // R16 = (len / 4) * 4 + + MOVD $0, R6 // R6 = bitOffset + MOVD $0, R7 // R7 = index + +scalar_unrolled_loop_int64: + // === Element 0 === + LSR $6, R6, R8 // i = bitOffset / 64 + AND $63, R6, R9 // j = bitOffset % 64 + MOVD (R2)(R8<<3), R11 // load 64-bit word + LSL R9, R4, R12 + AND R12, R11, R11 + LSR R9, R11, R11 + + ADD R9, R3, R12 + CMP $64, R12 + BLE scalar_store0_int64 + MOVD $64, R13 + SUB R9, R13, R13 + ADD $1, R8, R14 + MOVD (R2)(R14<<3), R15 + LSR R13, R4, R10 + AND R10, R15, R15 + LSL R13, R15, R15 + ORR R15, R11, R11 + +scalar_store0_int64: + ADD R3, R6, R6 + LSL $3, R7, R10 + MOVD R11, (R0)(R10) + ADD $1, R7, R7 + + // === Element 1 === + LSR $6, R6, R8 + AND $63, R6, R9 + MOVD (R2)(R8<<3), R11 + LSL R9, R4, R12 + AND R12, R11, R11 + LSR R9, R11, R11 + + ADD R9, R3, R12 + CMP $64, R12 + BLE scalar_store1_int64 + MOVD $64, R13 + SUB R9, R13, R13 + ADD $1, R8, R14 + MOVD (R2)(R14<<3), R15 + LSR R13, R4, R10 + AND R10, R15, R15 + LSL R13, R15, R15 + ORR R15, R11, R11 + +scalar_store1_int64: + ADD R3, R6, R6 + LSL $3, R7, R10 + MOVD R11, (R0)(R10) + ADD $1, R7, R7 + + // === Element 2 === + LSR $6, R6, R8 + AND $63, R6, R9 + MOVD (R2)(R8<<3), R11 + LSL R9, R4, R12 + AND R12, R11, R11 + LSR R9, R11, R11 + + ADD R9, R3, R12 + CMP $64, R12 + BLE scalar_store2_int64 + MOVD $64, R13 + SUB R9, R13, R13 + ADD $1, R8, R14 + MOVD (R2)(R14<<3), R15 + LSR R13, R4, R10 + AND R10, R15, R15 + LSL R13, R15, R15 + ORR R15, R11, R11 + +scalar_store2_int64: + ADD R3, R6, R6 + LSL $3, R7, R10 + MOVD R11, (R0)(R10) + ADD $1, R7, R7 + + // === Element 3 === + LSR $6, R6, R8 + AND $63, R6, R9 + MOVD (R2)(R8<<3), R11 + LSL R9, R4, R12 + AND R12, R11, R11 + LSR R9, R11, R11 + + ADD R9, R3, R12 + CMP $64, R12 + BLE scalar_store3_int64 + MOVD $64, R13 + SUB R9, R13, R13 + ADD $1, R8, R14 + MOVD (R2)(R14<<3), R15 + LSR R13, R4, R10 + AND R10, R15, R15 + LSL R13, R15, R15 + ORR R15, R11, R11 + +scalar_store3_int64: + ADD R3, R6, R6 + LSL $3, R7, R10 + MOVD R11, (R0)(R10) + ADD $1, R7, R7 + + CMP R16, R7 + BLT scalar_unrolled_loop_int64 + + // Check if done + CMP R1, R7 + BEQ scalar_done_int64 + + // Preserve R6 (bitOffset), R4 (bitMask), and R7 (index) for tail processing + // R1 still contains total count, R7 has current index + B scalar_loop_single_int64 + +scalar_single_int64: + // Process remaining elements one at a time + MOVD $1, R4 + LSL R3, R4, R4 + SUB $1, R4, R4 // R4 = bitMask + + MOVD $0, R6 // R6 = bitOffset + MOVD $0, R7 // R7 = index + +scalar_loop_single_int64: + LSR $6, R6, R8 // i = bitOffset / 64 + AND $63, R6, R9 // j = bitOffset % 64 + MOVD (R2)(R8<<3), R11 // load 64-bit word + LSL R9, R4, R12 + AND R12, R11, R11 + LSR R9, R11, R11 + + ADD R9, R3, R12 + CMP $64, R12 + BLE scalar_next_single_int64 + MOVD $64, R13 + SUB R9, R13, R13 + ADD $1, R8, R14 + MOVD (R2)(R14<<3), R15 + LSR R13, R4, R10 + AND R10, R15, R15 + LSL R13, R15, R15 + ORR R15, R11, R11 + +scalar_next_single_int64: + LSL $3, R7, R10 + MOVD R11, (R0)(R10) + ADD R3, R6, R6 + ADD $1, R7, R7 + + CMP R1, R7 + BLT scalar_loop_single_int64 + +scalar_done_int64: + RET + +// Macro definitions for unsupported NEON instructions using WORD encodings +// USHLL Vd.8H, Vn.8B, #0 - widen 8x8-bit to 8x16-bit +#define USHLL_8H_8B(vd, vn) WORD $(0x2f08a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.8H, Vn.16B, #0 - widen upper 8x8-bit to 8x16-bit +#define USHLL2_8H_16B(vd, vn) WORD $(0x6f08a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.4S, Vn.4H, #0 - widen 4x16-bit to 4x32-bit +#define USHLL_4S_4H(vd, vn) WORD $(0x2f10a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.4S, Vn.8H, #0 - widen upper 4x16-bit to 4x32-bit +#define USHLL2_4S_8H(vd, vn) WORD $(0x6f10a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.2D, Vn.2S, #0 - widen 2x32-bit to 2x64-bit +#define USHLL_2D_2S(vd, vn) WORD $(0x2f20a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.2D, Vn.4S, #0 - widen upper 2x32-bit to 2x64-bit +#define USHLL2_2D_4S(vd, vn) WORD $(0x6f20a400 | (vd) | ((vn)<<5)) + +// unpackInt64x1bitNEON implements table-based NEON unpacking for int64 bitWidth=1 +// Similar to int32 version but with additional widening to 64-bit +// +// func unpackInt64x1bitNEON(dst []int64, src []byte, bitWidth uint) diff --git a/vendor/github.com/parquet-go/bitpack/unpack_int64_purego.go b/vendor/github.com/parquet-go/bitpack/unpack_int64_purego.go new file mode 100644 index 00000000000..3d7b1183282 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_int64_purego.go @@ -0,0 +1,25 @@ +//go:build purego || (!amd64 && !arm64) + +package bitpack + +func unpackInt64(dst []int64, src []byte, bitWidth uint) { + bits := unsafecastBytesToUint32(src) + bitMask := uint64(1<> j + if j+bitWidth > 32 { + k := 32 - j + d |= (uint64(bits[i+1]) & (bitMask >> k)) << k + if j+bitWidth > 64 { + k := 64 - j + d |= (uint64(bits[i+2]) & (bitMask >> k)) << k + } + } + dst[n] = int64(d) + bitOffset += bitWidth + } +} diff --git a/vendor/github.com/parquet-go/bitpack/unpack_neon_macros_arm64.h b/vendor/github.com/parquet-go/bitpack/unpack_neon_macros_arm64.h new file mode 100644 index 00000000000..7ba1c8f7b01 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unpack_neon_macros_arm64.h @@ -0,0 +1,18 @@ +// Macro definitions for unsupported NEON instructions using WORD encodings +// USHLL Vd.8H, Vn.8B, #0 - widen 8x8-bit to 8x16-bit +#define USHLL_8H_8B(vd, vn) WORD $(0x2f08a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.8H, Vn.16B, #0 - widen upper 8x8-bit to 8x16-bit +#define USHLL2_8H_16B(vd, vn) WORD $(0x6f08a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.4S, Vn.4H, #0 - widen 4x16-bit to 4x32-bit +#define USHLL_4S_4H(vd, vn) WORD $(0x2f10a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.4S, Vn.8H, #0 - widen upper 4x16-bit to 4x32-bit +#define USHLL2_4S_8H(vd, vn) WORD $(0x6f10a400 | (vd) | ((vn)<<5)) + +// USHLL Vd.2D, Vn.2S, #0 - widen 2x32-bit to 2x64-bit +#define USHLL_2D_2S(vd, vn) WORD $(0x2f20a400 | (vd) | ((vn)<<5)) + +// USHLL2 Vd.2D, Vn.4S, #0 - widen upper 2x32-bit to 2x64-bit +#define USHLL2_2D_4S(vd, vn) WORD $(0x6f20a400 | (vd) | ((vn)<<5)) diff --git a/vendor/github.com/parquet-go/bitpack/unsafecast/unsafecast.go b/vendor/github.com/parquet-go/bitpack/unsafecast/unsafecast.go new file mode 100644 index 00000000000..0838fd10e68 --- /dev/null +++ b/vendor/github.com/parquet-go/bitpack/unsafecast/unsafecast.go @@ -0,0 +1,54 @@ +// Package unsafecast exposes functions to bypass the Go type system and perform +// conversions between types that would otherwise not be possible. +// +// The functions of this package are mostly useful as optimizations to avoid +// memory copies when converting between compatible memory layouts; for example, +// casting a [][16]byte to a []byte in order to use functions of the standard +// bytes package on the slices. +// +// With great power comes great responsibility. +package unsafecast + +import "unsafe" + +// The slice type represents the memory layout of slices in Go. It is similar to +// reflect.SliceHeader but uses a unsafe.Pointer instead of uintptr to for the +// backing array to allow the garbage collector to track track the reference. +type slice struct { + ptr unsafe.Pointer + len int + cap int +} + +// Slice converts the data slice of type []From to a slice of type []To sharing +// the same backing array. The length and capacity of the returned slice are +// scaled according to the size difference between the source and destination +// types. +// +// Note that the function does not perform any checks to ensure that the memory +// layouts of the types are compatible, it is possible to cause memory +// corruption if the layouts mismatch (e.g. the pointers in the From are +// different than the pointers in To). +func Slice[To, From any](data []From) []To { + // This function could use unsafe.Slice but it would drop the capacity + // information, so instead we implement the type conversion. + var zf From + var zt To + var s = slice{ + ptr: unsafe.Pointer(unsafe.SliceData(data)), + len: int((uintptr(len(data)) * unsafe.Sizeof(zf)) / unsafe.Sizeof(zt)), + cap: int((uintptr(cap(data)) * unsafe.Sizeof(zf)) / unsafe.Sizeof(zt)), + } + return *(*[]To)(unsafe.Pointer(&s)) +} + +// String converts a byte slice to a string value. The returned string shares +// the backing array of the byte slice. +// +// Programs using this function are responsible for ensuring that the data slice +// is not modified while the returned string is in use, otherwise the guarantee +// of immutability of Go string values will be violated, resulting in undefined +// behavior. +func String(data []byte) string { + return unsafe.String(unsafe.SliceData(data), len(data)) +} diff --git a/vendor/github.com/parquet-go/jsonlite/.gitignore b/vendor/github.com/parquet-go/jsonlite/.gitignore new file mode 100644 index 00000000000..b3584c8d4de --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.py + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Emacs +*~ +#*# +.# diff --git a/vendor/github.com/parquet-go/jsonlite/LICENSE b/vendor/github.com/parquet-go/jsonlite/LICENSE new file mode 100644 index 00000000000..ef8a76d1d24 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 parquet-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/parquet-go/jsonlite/README.md b/vendor/github.com/parquet-go/jsonlite/README.md new file mode 100644 index 00000000000..18c4e911ca2 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/README.md @@ -0,0 +1,54 @@ +# jsonlite + +[![Go Reference](https://pkg.go.dev/badge/github.com/parquet-go/jsonlite.svg)](https://pkg.go.dev/github.com/parquet-go/jsonlite) + +A lightweight JSON parser for Go, optimized for performance through careful +memory management. + +## Motivation + +Go's standard `encoding/json` package is designed for marshaling and +unmarshaling into Go structs. This works well when you know the schema ahead of +time, but becomes awkward when you need to traverse arbitrary JSON +structures—you end up with `map[string]interface{}` and type assertions +everywhere. + +Packages like `github.com/tidwall/gjson` solve this with path-based queries, +but they re-scan the input for each query. If you're recursively walking a JSON +document, you pay the parsing cost repeatedly. + +`jsonlite` takes a different approach: parse once, traverse many times. The +parser builds a lightweight in-memory index of the entire document in a single +pass. Once parsed, you can navigate the structure freely without re-parsing. + +## Design + +The core insight is that most JSON strings don't contain escape sequences. When +a string like `"hello"` appears in the input, we don't need to copy it—we can +just point directly into the original input buffer. Only strings with escapes +(like `"hello\nworld"`) require allocation for the unescaped result. + +Each JSON value is represented by a `Value` struct containing just two machine +words: a pointer and a packed integer. The integer uses its high bits to store +the value's type (null, boolean, number, string, array, or object) and its low +bits for length information. This bit-packing means the overhead per value is +minimal—16 bytes on 64-bit systems regardless of the value type. + +Arrays and objects are stored as contiguous slices, so iteration is +cache-friendly. Object fields are sorted by key during parsing, enabling binary +search for lookups. + +## Trade-offs + +This design optimizes for read-heavy workloads where you parse once and query +multiple times. If you only need to extract a single value from a large +document, a streaming parser or path-based query might be more appropriate. + +The parser assumes the input remains valid for the lifetime of the parsed +values. If you're parsing from a buffer that gets reused, you'll need to copy +strings out before the buffer changes. + +Numeric values are stored as their original string representation and parsed on +demand when you call `Int()`, `Float()`, etc. This avoids precision loss for +large integers and defers parsing cost until needed, but means repeated numeric +access will re-parse each time. diff --git a/vendor/github.com/parquet-go/jsonlite/convert.go b/vendor/github.com/parquet-go/jsonlite/convert.go new file mode 100644 index 00000000000..31843da80ba --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/convert.go @@ -0,0 +1,352 @@ +package jsonlite + +import ( + "encoding/json" + "math" + "strconv" + "time" +) + +// Convertible is the type constraint for the As function, defining all supported +// conversion types. +type Convertible interface { + any | + bool | + int64 | + uint64 | + float64 | + json.Number | + string | + time.Duration | + time.Time | + []any | + []bool | + []int64 | + []uint64 | + []float64 | + []json.Number | + []string | + []time.Duration | + []time.Time | + map[string]any | + map[string]bool | + map[string]int64 | + map[string]uint64 | + map[string]float64 | + map[string]json.Number | + map[string]string | + map[string]time.Duration | + map[string]time.Time +} + +// As converts a JSON value to the specified Go type. +// +// For primitive types (bool, int64, uint64, float64, string, time.Duration, time.Time): +// - Follows the same conversion rules as the corresponding As* function +// - Returns zero value for nil or incompatible types +// +// For json.Number: +// - Returns the number as a json.Number string +// - Returns empty string for non-number values +// +// For slice types ([]T): +// - Converts JSON arrays where each element is converted using the primitive T logic +// - Returns nil for non-array values +// - Returns empty slice for empty arrays +// +// For map types (map[string]T): +// - Converts JSON objects where each value is converted using the primitive T logic +// - Returns nil for non-object values +// - Returns empty map for empty objects +// +// For any types: +// - any: Returns the most natural Go representation (bool, int64, uint64, float64, string, []any, map[string]any) +// - []any: Recursively converts array elements to any +// - map[string]any: Recursively converts object values to any +// +// Examples: +// +// val, _ := Parse(`[1, 2, 3]`) +// nums := As[[]int64](val) // []int64{1, 2, 3} +// +// val, _ := Parse(`{"a": 1, "b": 2}`) +// m := As[map[string]int64](val) // map[string]int64{"a": 1, "b": 2} +func As[T Convertible](v *Value) T { + switch any(*new(T)).(type) { + case bool: + return any(asBool(v)).(T) + case int64: + return any(asInt(v)).(T) + case uint64: + return any(asUint(v)).(T) + case float64: + return any(asFloat(v)).(T) + case json.Number: + return any(asNumber(v)).(T) + case string: + return any(asString(v)).(T) + case time.Duration: + return any(asDuration(v)).(T) + case time.Time: + return any(asTime(v)).(T) + case []any: + return any(asSlice(v, asAny)).(T) + case []bool: + return any(asSlice(v, asBool)).(T) + case []int64: + return any(asSlice(v, asInt)).(T) + case []uint64: + return any(asSlice(v, asUint)).(T) + case []float64: + return any(asSlice(v, asFloat)).(T) + case []json.Number: + return any(asSlice(v, asNumber)).(T) + case []string: + return any(asSlice(v, asString)).(T) + case []time.Duration: + return any(asSlice(v, asDuration)).(T) + case []time.Time: + return any(asSlice(v, asTime)).(T) + case map[string]any: + return any(asMap(v, asAny)).(T) + case map[string]bool: + return any(asMap(v, asBool)).(T) + case map[string]int64: + return any(asMap(v, asInt)).(T) + case map[string]uint64: + return any(asMap(v, asUint)).(T) + case map[string]float64: + return any(asMap(v, asFloat)).(T) + case map[string]json.Number: + return any(asMap(v, asNumber)).(T) + case map[string]string: + return any(asMap(v, asString)).(T) + case map[string]time.Duration: + return any(asMap(v, asDuration)).(T) + case map[string]time.Time: + return any(asMap(v, asTime)).(T) + default: + // Special handling for any to avoid panic when result is nil + r, _ := asAny(v).(T) + return r + } +} + +// asBool coerces the value to a boolean. +func asBool(v *Value) bool { + if v != nil { + switch v.Kind() { + case True: + return true + case Number: + f, err := strconv.ParseFloat(v.json(), 64) + return err == nil && f != 0 + case String, Object, Array: + return v.Len() > 0 + } + } + return false +} + +// asString coerces the value to a string. +func asString(v *Value) string { + if v != nil && v.Kind() != Null { + return v.String() + } + return "" +} + +// asInt coerces the value to a signed 64-bit integer. +func asInt(v *Value) int64 { + if v != nil { + switch v.Kind() { + case True: + return 1 + case Number: + if i, err := strconv.ParseInt(v.json(), 10, 64); err == nil { + return i + } + if f, err := strconv.ParseFloat(v.json(), 64); err == nil { + return int64(f) + } + case String: + // Strip surrounding quotes - no escapes in valid number strings + s := v.json() + s = s[1 : len(s)-1] + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + if f, err := strconv.ParseFloat(s, 64); err == nil { + return int64(f) + } + } + } + return 0 +} + +// asUint coerces the value to an unsigned 64-bit integer. +func asUint(v *Value) uint64 { + if v != nil { + switch v.Kind() { + case True: + return 1 + case Number: + if u, err := strconv.ParseUint(v.json(), 10, 64); err == nil { + return u + } + if f, err := strconv.ParseFloat(v.json(), 64); err == nil { + if f >= 0 { + return uint64(f) + } + } + case String: + // Strip surrounding quotes - no escapes in valid number strings + s := v.json() + s = s[1 : len(s)-1] + if u, err := strconv.ParseUint(s, 10, 64); err == nil { + return u + } + if f, err := strconv.ParseFloat(s, 64); err == nil { + if f >= 0 { + return uint64(f) + } + } + } + } + return 0 +} + +// asFloat coerces the value to a 64-bit floating point number. +func asFloat(v *Value) float64 { + if v != nil { + switch v.Kind() { + case True: + return 1 + case Number: + if f, err := strconv.ParseFloat(v.json(), 64); err == nil { + return f + } + case String: + // Strip surrounding quotes - no escapes in valid number strings + s := v.json() + s = s[1 : len(s)-1] + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + } + return 0 +} + +// asDuration coerces the value to a time.Duration. +func asDuration(v *Value) time.Duration { + if v != nil { + switch v.Kind() { + case True: + return time.Second + case Number: + if f, err := strconv.ParseFloat(v.json(), 64); err == nil { + return time.Duration(f * float64(time.Second)) + } + case String: + // Strip surrounding quotes - no escapes in valid duration strings + s := v.json() + s = s[1 : len(s)-1] + if d, err := time.ParseDuration(s); err == nil { + return d + } + } + } + return 0 +} + +// asTime coerces the value to a time.Time. +func asTime(v *Value) time.Time { + if v != nil { + switch v.Kind() { + case Number: + if f, err := strconv.ParseFloat(v.json(), 64); err == nil { + sec, frac := math.Modf(f) + return time.Unix(int64(sec), int64(frac*1e9)).UTC() + } + case String: + // Strip surrounding quotes - no escapes in valid RFC3339 time strings + s := v.json() + s = s[1 : len(s)-1] + if t, err := time.ParseInLocation(time.RFC3339, s, time.UTC); err == nil { + return t + } + } + } + return time.Time{} +} + +// asNumber coerces the value to a json.Number. +func asNumber(v *Value) json.Number { + if v != nil && v.Kind() == Number { + return json.Number(v.json()) + } + return "" +} + +// asSlice converts a JSON array to a Go slice by applying the converter +// function to each element. Returns nil for non-array values. +func asSlice[E any](v *Value, converter func(*Value) E) []E { + if v == nil || v.Kind() != Array { + return nil + } + result := make([]E, 0, v.Len()) + for elem := range v.Array { + result = append(result, converter(elem)) + } + return result +} + +// asMap converts a JSON object to a Go map by applying the converter +// function to each value. Returns nil for non-object values. +func asMap[V any](v *Value, converter func(*Value) V) map[string]V { + if v == nil || v.Kind() != Object { + return nil + } + result := make(map[string]V, v.Len()) + for k, val := range v.Object { + result[k] = converter(val) + } + return result +} + +// asAny converts a JSON value to its most natural Go representation. +func asAny(v *Value) any { + if v == nil { + return nil + } + switch v.Kind() { + case Null: + return nil + case True: + return true + case False: + return false + case Number: + switch v.NumberType() { + case Int: + return v.Int() + case Uint: + u := v.Uint() + if u <= math.MaxInt64 { + return int64(u) + } + return u + default: + return v.Float() + } + case String: + return v.String() + case Array: + return asSlice(v, asAny) + case Object: + return asMap(v, asAny) + default: + return nil + } +} diff --git a/vendor/github.com/parquet-go/jsonlite/doc.go b/vendor/github.com/parquet-go/jsonlite/doc.go new file mode 100644 index 00000000000..e5fd4e55720 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/doc.go @@ -0,0 +1,8 @@ +// Package jsonlite provides a lightweight JSON parser optimized for +// performance through careful memory management. It parses JSON into +// a tree of Value nodes that can be inspected and serialized back to JSON. +// +// The parser handles all standard JSON types: null, booleans, numbers, +// strings, arrays, and objects. It properly handles UTF-16 surrogate +// pairs for emoji and extended Unicode characters. +package jsonlite diff --git a/vendor/github.com/parquet-go/jsonlite/iterator.go b/vendor/github.com/parquet-go/jsonlite/iterator.go new file mode 100644 index 00000000000..f2b5d82d7ae --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/iterator.go @@ -0,0 +1,562 @@ +package jsonlite + +import ( + "fmt" + "math" + "strconv" + "time" +) + +// Iterator provides a streaming interface for traversing JSON values. +// It automatically handles control tokens (braces, brackets, colons, commas) +// and presents only the logical JSON values to the caller. +type Iterator struct { + tokens Tokenizer + json string // Original JSON for computing pre-token positions + token string + kind Kind + key string + err error + state []byte // stack of states: 'a' for array, 'o' for object (expecting key), 'v' for object (expecting value) + bytes [16]byte + consumed bool // whether the current value has been consumed +} + +// Iterate creates a new Iterator for the given JSON string. +func Iterate(json string) *Iterator { + it := &Iterator{ + tokens: Tokenizer{json: json}, + json: json, // Store original JSON + } + it.state = it.bytes[:0] + return it +} + +// Reset resets the iterator to parse a new JSON string. +func (it *Iterator) Reset(json string) { + it.tokens = Tokenizer{json: json} + it.json = json + it.token = "" + it.kind = 0 + it.key = "" + it.err = nil + it.consumed = false +} + +// Next advances the iterator to the next JSON value. +// Returns true if there is a value to process, false when done or on error. +func (it *Iterator) Next() bool { + for { + token, ok := it.tokens.Next() + if !ok { + if len(it.state) > 0 { + if it.top() == 'a' { + it.err = errUnexpectedEndOfArray + } else { + it.err = errUnexpectedEndOfObject + } + } + return false + } + + if len(it.state) > 0 { + s := it.top() + switch s { + case 'a': // in array, expecting value or ] + if token == "]" { + it.pop() + continue + } + if token == "," { + continue + } + case 'o': // in object, expecting key or } + if token == "}" { + it.pop() + continue + } + if token == "," { + continue + } + key, err := Unquote(token) + if err != nil { + it.setErrorf("invalid key: %q: %w", token, err) + return false + } + it.setKey(key) + colon, ok := it.tokens.Next() + if !ok { + it.err = errUnexpectedEndOfObject + return false + } + if colon != ":" { + it.setErrorf("expected ':', got %q", colon) + return false + } + // Change state to expect value + it.set('v') + continue + case 'v': // in object, expecting value + // Change state back to expect key/} + it.set('o') + } + } + + return it.setToken(token) + } +} + +func (it *Iterator) push(state byte) { + it.state = append(it.state, state) +} + +func (it *Iterator) pop() { + it.state = it.state[:len(it.state)-1] +} + +func (it *Iterator) top() byte { + return it.state[len(it.state)-1] +} + +func (it *Iterator) set(state byte) { + it.state[len(it.state)-1] = state +} + +func (it *Iterator) setError(err error) { + it.err = err +} + +func (it *Iterator) setErrorf(msg string, args ...any) { + it.setError(fmt.Errorf(msg, args...)) +} + +func (it *Iterator) setKey(key string) { + it.key = key +} + +func (it *Iterator) setToken(token string) bool { + kind, err := tokenKind(token) + it.token = token + it.kind = kind + it.err = err + it.consumed = false + + if err != nil { + return false + } + + switch kind { + case Array: + it.push('a') + case Object: + it.push('o') + } + + return true +} + +func (it *Iterator) skipArray() { + depth := 1 + for depth > 0 { + token, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfArray) + return + } + switch token { + case "[": + depth++ + case "]": + depth-- + } + } + it.pop() +} + +func (it *Iterator) skipObject() { + depth := 1 + for depth > 0 { + token, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfObject) + return + } + switch token { + case "{": + depth++ + case "}": + depth-- + } + } + it.pop() +} + +func tokenKind(token string) (Kind, error) { + switch token[0] { + case 'n': + if token != "null" { + return Null, fmt.Errorf("invalid token: %q", token) + } + return Null, nil + case 't': + if token != "true" { + return Null, fmt.Errorf("invalid token: %q", token) + } + return True, nil + case 'f': + if token != "false" { + return Null, fmt.Errorf("invalid token: %q", token) + } + return False, nil + case '"': + return String, nil + case '[': + return Array, nil + case '{': + return Object, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if !validNumber(token) { + return Number, fmt.Errorf("invalid number: %q", token) + } + return Number, nil + default: + return Null, fmt.Errorf("invalid token: %q", token) + } +} + +// Kind returns the kind of the current value. +func (it *Iterator) Kind() Kind { return it.kind } + +// Key returns the object key for the current value, if inside an object. +// Returns an empty string if not inside an object or at the top level. +func (it *Iterator) Key() string { return it.key } + +// Err returns any error that occurred during iteration. +func (it *Iterator) Err() error { return it.err } + +// Depth returns the current nesting depth (0 at top level). +func (it *Iterator) Depth() int { return len(it.state) } + +// Value parses and returns the current value. +// For arrays and objects, this consumes all nested tokens and returns the +// complete parsed structure. +func (it *Iterator) Value() (*Value, error) { + val, err := it.value() + return &val, err +} + +func (it *Iterator) value() (Value, error) { + if it.err != nil { + return Value{}, it.err + } + + switch it.kind { + case Null, True, False, Number: + return makeValue(it.kind, it.token), nil + case String: + // Validate the quoted string but store the quoted token + if !validString(it.token) { + return Value{}, fmt.Errorf("invalid string: %q", it.token) + } + return makeStringValue(it.token), nil + case Array: + delimi := len(it.token) + offset := len(it.json) - len(it.tokens.json) - delimi + val, rest, err := parseArray(it.json[offset:], it.tokens.json, DefaultMaxDepth) + it.tokens.json, it.consumed = rest, true + if err != nil { + it.setError(err) + } + it.pop() + return val, err + case Object: + delimi := len(it.token) + offset := len(it.json) - len(it.tokens.json) - delimi + val, rest, err := parseObject(it.json[offset:], it.tokens.json, DefaultMaxDepth) + it.tokens.json, it.consumed = rest, true + if err != nil { + it.setError(err) + } + it.pop() + return val, err + default: + return Value{}, fmt.Errorf("unexpected kind: %v", it.kind) + } +} + +// Null returns true if the current value is null. +func (it *Iterator) Null() bool { return it.kind == Null } + +// Bool returns the current value as a boolean. +// Returns false for null values. +// Returns an error if the value is not a boolean, null, or a string that can be parsed as a boolean. +func (it *Iterator) Bool() (bool, error) { + switch it.kind { + case Null, False: + return false, nil + case True: + return true, nil + case String: + s, err := Unquote(it.token) + if err != nil { + return false, fmt.Errorf("invalid string: %q", it.token) + } + return strconv.ParseBool(s) + default: + return false, fmt.Errorf("cannot convert %v to bool", it.kind) + } +} + +// Int returns the current value as a signed 64-bit integer. +// Returns 0 for null values. +// Returns an error if the value is not a number, null, or a string that can be parsed as an integer. +func (it *Iterator) Int() (int64, error) { + switch it.kind { + case Null: + return 0, nil + case Number: + return strconv.ParseInt(it.token, 10, 64) + case String: + s, err := Unquote(it.token) + if err != nil { + return 0, fmt.Errorf("invalid string: %q", it.token) + } + return strconv.ParseInt(s, 10, 64) + default: + return 0, fmt.Errorf("cannot convert %v to int", it.kind) + } +} + +// Float returns the current value as a 64-bit floating point number. +// Returns 0 for null values. +// Returns an error if the value is not a number, null, or a string that can be parsed as a float. +func (it *Iterator) Float() (float64, error) { + switch it.kind { + case Null: + return 0, nil + case Number: + return strconv.ParseFloat(it.token, 64) + case String: + s, err := Unquote(it.token) + if err != nil { + return 0, fmt.Errorf("invalid string: %q", it.token) + } + return strconv.ParseFloat(s, 64) + default: + return 0, fmt.Errorf("cannot convert %v to float", it.kind) + } +} + +// String returns the current value as a string. +// Returns "" for null values. +// Returns an error if the value is not a string or null. +func (it *Iterator) String() (string, error) { + switch it.kind { + case Null: + return "", nil + case String: + return Unquote(it.token) + default: + return "", fmt.Errorf("cannot convert %v to string", it.kind) + } +} + +// Duration returns the current value as a time.Duration. +// Returns 0 for null values. +// For numbers, the value is interpreted as seconds. +// For strings, the value is parsed using time.ParseDuration. +// Returns an error if the value cannot be converted to a duration. +func (it *Iterator) Duration() (time.Duration, error) { + switch it.kind { + case Null: + return 0, nil + case Number: + f, err := strconv.ParseFloat(it.token, 64) + if err != nil { + return 0, err + } + return time.Duration(f * float64(time.Second)), nil + case String: + s, err := Unquote(it.token) + if err != nil { + return 0, fmt.Errorf("invalid string: %q", it.token) + } + return time.ParseDuration(s) + default: + return 0, fmt.Errorf("cannot convert %v to duration", it.kind) + } +} + +// Time returns the current value as a time.Time. +// Returns the zero time for null values. +// For numbers, the value is interpreted as seconds since Unix epoch. +// For strings, the value is parsed using RFC3339 format. +// Returns an error if the value cannot be converted to a time. +func (it *Iterator) Time() (time.Time, error) { + switch it.kind { + case Null: + return time.Time{}, nil + case Number: + f, err := strconv.ParseFloat(it.token, 64) + if err != nil { + return time.Time{}, err + } + sec, frac := math.Modf(f) + return time.Unix(int64(sec), int64(frac*1e9)).UTC(), nil + case String: + s, err := Unquote(it.token) + if err != nil { + return time.Time{}, fmt.Errorf("invalid string: %q", it.token) + } + return time.Parse(time.RFC3339, s) + default: + return time.Time{}, fmt.Errorf("cannot convert %v to time", it.kind) + } +} + +// Object iterates over the key-value pairs of the current object. +// The iterator yields the key for each field, and the Iterator is positioned +// on the field's value. Call Kind(), Value(), Object(), or Array() to process +// the value. If the value is not consumed before the next iteration, it will +// be automatically skipped. +// For null values, no iterations occur. +// +// Must only be called when Kind() == Object or Kind() == Null. +func (it *Iterator) Object(yield func(string, error) bool) { + if it.kind == Null { + return // null is treated as empty object + } + it.consumed = true // mark the object itself as consumed + for i := 0; ; i++ { + // Auto-consume the previous value if it wasn't consumed + if !it.consumed { + switch it.kind { + case Array: + it.consumed = true + it.skipArray() + case Object: + it.consumed = true + it.skipObject() + } + } + + token, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfObject) + yield("", it.err) + return + } + + if token == "}" { + it.pop() + return + } + + if i != 0 { + if token != "," { + it.setErrorf("expected ',', got %q", token) + yield("", it.err) + return + } + token, ok = it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfObject) + yield("", it.err) + return + } + } + + key, err := Unquote(token) + if err != nil { + it.setErrorf("invalid key: %q: %w", token, err) + yield("", it.err) + return + } + + colon, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfObject) + yield("", it.err) + return + } + if colon != ":" { + it.setErrorf("expected ':', got %q", colon) + yield("", it.err) + return + } + + value, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfObject) + yield("", it.err) + return + } + + it.setKey(key) + it.setToken(value) + + if !yield(key, nil) { + return + } + } +} + +// Array iterates over the elements of the current array. +// The iterator yields the index for each element, and the Iterator is +// positioned on the element's value. Call Kind(), Value(), Object(), or +// Array() to process the value. If the value is not consumed before the +// next iteration, it will be automatically skipped. +// For null values, no iterations occur. +// +// Must only be called when Kind() == Array or Kind() == Null. +func (it *Iterator) Array(yield func(int, error) bool) { + if it.kind == Null { + return // null is treated as empty array + } + it.consumed = true // mark the array itself as consumed + for i := 0; ; i++ { + // Auto-consume the previous value if it wasn't consumed + if !it.consumed { + switch it.kind { + case Array: + it.consumed = true + it.skipArray() + case Object: + it.consumed = true + it.skipObject() + } + } + + token, ok := it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfArray) + yield(i, it.err) + return + } + + if token == "]" { + it.pop() + return + } + + if i != 0 { + if token != "," { + it.setErrorf("expected ',', got %q", token) + yield(i, it.err) + return + } + token, ok = it.tokens.Next() + if !ok { + it.setError(errUnexpectedEndOfArray) + yield(i, it.err) + return + } + } + + it.setToken(token) + + if !yield(i, nil) { + return + } + } +} diff --git a/vendor/github.com/parquet-go/jsonlite/parse.go b/vendor/github.com/parquet-go/jsonlite/parse.go new file mode 100644 index 00000000000..7292cdc1a53 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/parse.go @@ -0,0 +1,305 @@ +package jsonlite + +import ( + "errors" + "fmt" + "hash/maphash" + "strings" + "unsafe" +) + +const ( + // DefaultMaxDepth is the default maximum depth for parsing JSON objects. + DefaultMaxDepth = 100 +) + +var ( + errEndOfObject = errors.New("}") + errEndOfArray = errors.New("]") + errUnexpectedEndOfObject = errors.New("unexpected end of object") + errUnexpectedEndOfArray = errors.New("unexpected end of array") +) + +// whitespaceMap is a 256-bit lookup table for ASCII whitespace characters. +// Bit i is set if byte i is whitespace (space, tab, newline, carriage return). +var whitespaceMap = func() [4]uint64 { + var m [4]uint64 + for _, c := range []byte{' ', '\t', '\n', '\r'} { + m[c/64] |= 1 << (c % 64) + } + return m +}() + +// isWhitespace returns true if c is a JSON whitespace character. +func isWhitespace(c byte) bool { + return (whitespaceMap[c/64] & (1 << (c % 64))) != 0 +} + +// delimiterMap is a 256-bit lookup table for JSON delimiters and whitespace. +// Used to quickly find the end of numbers/literals. +var delimiterMap = func() [4]uint64 { + var m [4]uint64 + for _, c := range []byte{' ', '\t', '\n', '\r', '[', ']', '{', '}', ':', ',', '"'} { + m[c/64] |= 1 << (c % 64) + } + return m +}() + +// isDelimiter returns true if c is a JSON delimiter or whitespace. +func isDelimiter(c byte) bool { + return (delimiterMap[c/64] & (1 << (c % 64))) != 0 +} + +// Tokenizer is a JSON tokenizer that splits input into tokens. +// It skips whitespace and returns individual JSON tokens one at a time. +type Tokenizer struct { + json string +} + +// Tokenize creates a new Tokenizer for the given JSON string. +func Tokenize(json string) *Tokenizer { + return &Tokenizer{json: json} +} + +// Next returns the next token from the input. +// Returns an empty string and false when there are no more tokens. +func (t *Tokenizer) Next() (token string, ok bool) { + token, t.json, ok = nextToken(t.json) + return token, ok +} + +// nextToken extracts the next JSON token from s. +// Returns the token, the remaining string after the token, and whether a token was found. +// All values are kept in registers - no heap allocation for tokenizer state. +func nextToken(s string) (token, rest string, ok bool) { + // Skip leading whitespace using lookup table + switch { + case len(s) == 0: + return "", "", false + case s[0] <= ' ': + for isWhitespace(s[0]) { + if s = s[1:]; len(s) == 0 { + return "", "", false + } + } + } + + switch s[0] { + case '"': + // Find closing quote, handling escapes + j := 1 + for { + k := strings.IndexByte(s[j:], '"') + if k < 0 { + return s, "", true + } + j += k + 1 + // Count preceding backslashes to check if quote is escaped + n := 0 + for i := j - 2; i > 0 && s[i] == '\\'; i-- { + n++ + } + if n%2 == 0 { + return s[:j], s[j:], true + } + } + case ',', ':', '[', ']', '{', '}': + return s[:1], s[1:], true + default: + // Numbers and literals: scan until delimiter using lookup table + j := 1 + for j < len(s) && !isDelimiter(s[j]) { + j++ + } + return s[:j], s[j:], true + } +} + +// ParseMaxDepth parses JSON data with a maximum nesting depth for objects. +// Objects at maxDepth <= 0 are stored unparsed and will be lazily parsed +// when accessed via Lookup(), Array(), or Object() methods. +// Depth is only decremented for objects, not arrays. +// Returns an error if the JSON is malformed or empty. +func ParseMaxDepth(data string, maxDepth int) (*Value, error) { + v, rest, err := parseValue(data, max(0, maxDepth)) + if err != nil { + return nil, err + } + // Check for trailing content after the root value + if extra, _, ok := nextToken(rest); ok { + return nil, fmt.Errorf("unexpected token after root value: %q", extra) + } + return &v, nil +} + +// Parse parses JSON data and returns a pointer to the root Value. +// Returns an error if the JSON is malformed or empty. +func Parse(data string) (*Value, error) { return ParseMaxDepth(data, DefaultMaxDepth) } + +// parseValue parses a JSON value from s. +// Returns the parsed value, the remaining unparsed string, and any error. +// The string is passed by value to keep it in registers. +func parseValue(s string, maxDepth int) (Value, string, error) { + token, rest, ok := nextToken(s) + if !ok { + return Value{}, rest, errUnexpectedEndOfObject + } + switch token[0] { + case 'n': + if token != "null" { + return Value{}, rest, fmt.Errorf("invalid token: %q", token) + } + return makeNullValue(token[:4]), rest, nil + case 't': + if token != "true" { + return Value{}, rest, fmt.Errorf("invalid token: %q", token) + } + return makeTrueValue(token[:4]), rest, nil + case 'f': + if token != "false" { + return Value{}, rest, fmt.Errorf("invalid token: %q", token) + } + return makeFalseValue(token[:5]), rest, nil + case '"': + // Validate the quoted string but store the quoted token + if !validString(token) { + return Value{}, rest, fmt.Errorf("invalid token: %q", token) + } + return makeStringValue(token), rest, nil + case '[': + return parseArray(s, rest, maxDepth) + case '{': + return parseObject(s, rest, maxDepth) + case ']': + return Value{}, rest, errEndOfArray + case '}': + return Value{}, rest, errEndOfObject + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if !validNumber(token) { + return Value{}, rest, fmt.Errorf("invalid number: %q", token) + } + return makeNumberValue(token), rest, nil + default: + return Value{}, rest, fmt.Errorf("invalid token: %q", token) + } +} + +func parseArray(start, json string, maxDepth int) (Value, string, error) { + elements := make([]Value, 0, 32) + + for i := 0; ; i++ { + if i != 0 { + token, rest, ok := nextToken(json) + if !ok { + return Value{}, json, errUnexpectedEndOfArray + } + if token == "]" { + cached := start[:len(start)-len(rest)] + result := make([]Value, len(elements)+1) + result[0] = makeStringValue(cached) + copy(result[1:], elements) + return makeArrayValue(result), rest, nil + } + if token != "," { + return Value{}, json, fmt.Errorf("expected ',' or ']', got %q", token) + } + json = rest + } + + v, rest, err := parseValue(json, maxDepth) + if err != nil { + if i == 0 && err == errEndOfArray { + cached := start[:len(start)-len(rest)] + result := make([]Value, len(elements)+1) + result[0] = makeStringValue(cached) + copy(result[1:], elements) + return makeArrayValue(result), rest, nil + } + if err == errEndOfArray { + return Value{}, json, fmt.Errorf("unexpected ']' after ','") + } + return Value{}, json, err + } + json = rest + elements = append(elements, v) + } +} + +func parseObject(start, json string, maxDepth int) (Value, string, error) { + if maxDepth == 0 { + depth, remain := 1, json + for depth > 0 { + token, next, ok := nextToken(remain) + if !ok { + return Value{}, remain, errUnexpectedEndOfObject + } + remain = next + switch token { + case "{": + depth++ + case "}": + depth-- + } + } + json := start[:len(start)-len(remain)] + return makeUnparsedObjectValue(json), remain, nil + } + + maxDepth-- + fields := make([]field, 0, 16) + + for i := 0; ; i++ { + token, rest, ok := nextToken(json) + if !ok { + return Value{}, json, errUnexpectedEndOfObject + } + if token == "}" { + cached := start[:len(start)-len(rest)] + result := make([]field, len(fields)+1) + copy(result[1:], fields) + + fields := result[1:] + hashes := make([]byte, len(fields), (len(fields)*8+1)/8) + for i := range fields { + hashes[i] = byte(maphash.String(hashseed, fields[i].k)) + } + + result[0].v = makeStringValue(cached) + result[0].k = unsafe.String(unsafe.SliceData(hashes), cap(hashes)) + return makeObjectValue(result), rest, nil + } + json = rest + + if i != 0 { + if token != "," { + return Value{}, json, fmt.Errorf("expected ',' or '}', got %q", token) + } + token, rest, ok = nextToken(json) + if !ok { + return Value{}, json, errUnexpectedEndOfObject + } + json = rest + } + + key, err := Unquote(token) + if err != nil { + return Value{}, json, fmt.Errorf("invalid key: %q: %w", token, err) + } + + token, rest, ok = nextToken(json) + if !ok { + return Value{}, json, errUnexpectedEndOfObject + } + if token != ":" { + return Value{}, json, fmt.Errorf("%q → expected ':', got %q", key, token) + } + json = rest + + val, rest, err := parseValue(json, maxDepth) + if err != nil { + return Value{}, json, fmt.Errorf("%q → %w", key, err) + } + json = rest + fields = append(fields, field{k: key, v: val}) + } +} diff --git a/vendor/github.com/parquet-go/jsonlite/quote.go b/vendor/github.com/parquet-go/jsonlite/quote.go new file mode 100644 index 00000000000..0b849354a19 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/quote.go @@ -0,0 +1,121 @@ +package jsonlite + +import ( + "math/bits" + "unsafe" +) + +// Quote returns s as a JSON quoted string. +func Quote(s string) string { + return string(AppendQuote(make([]byte, 0, 2+len(s)), s)) +} + +// AppendQuote appends a JSON quoted string to b and returns the result. +func AppendQuote(b []byte, s string) []byte { + b = append(b, '"') + + for { + i := escapeIndex(s) + if i < 0 { + b = append(b, s...) + break + } + + b = append(b, s[:i]...) + switch c := s[i]; c { + case '\\', '"': + b = append(b, '\\', c) + case '\b': + b = append(b, '\\', 'b') + case '\f': + b = append(b, '\\', 'f') + case '\n': + b = append(b, '\\', 'n') + case '\r': + b = append(b, '\\', 'r') + case '\t': + b = append(b, '\\', 't') + default: + const hex = "0123456789abcdef" + b = append(b, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xf]) + } + + s = s[i+1:] + } + + return append(b, '"') +} + +/* +MIT License + +Copyright (c) 2019 Segment.io, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +// escapeIndex finds the index of the first char in `s` that requires escaping. +// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if +// it includes a double quote or backslash. If no chars in `s` require escaping, +// the return value is -1. +func escapeIndex(s string) int { + var i int + if len(s) >= 8 { + chunks := unsafe.Slice((*uint64)(unsafe.Pointer(unsafe.StringData(s))), len(s)/8) + for j, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\') + if (mask & msb) != 0 { + return j*8 + bits.TrailingZeros64(mask&msb)/8 + } + } + i = len(chunks) * 8 + } + + for ; i < len(s); i++ { + c := s[i] + if c < 0x20 || c > 0x7f || c == '"' || c == '\\' { + return i + } + } + + return -1 +} + +// below return a mask that can be used to determine if any of the bytes +// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was +// below `b`. The result is only valid if `b`, and each byte in `n`, is below +// 0x80. +func below(n uint64, b byte) uint64 { return n - expand(b) } + +// contains returns a mask that can be used to determine if any of the +// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then +// that byte is equal to `b`. The result is only valid if `b`, and each +// byte in `n`, is below 0x80. +func contains(n uint64, b byte) uint64 { return (n ^ expand(b)) - lsb } + +// expand puts the specified byte into each of the 8 bytes of a uint64. +func expand(b byte) uint64 { return lsb * uint64(b) } diff --git a/vendor/github.com/parquet-go/jsonlite/unquote.go b/vendor/github.com/parquet-go/jsonlite/unquote.go new file mode 100644 index 00000000000..63e3f329820 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/unquote.go @@ -0,0 +1,218 @@ +package jsonlite + +import ( + "fmt" + "math/bits" + "unicode/utf16" + "unicode/utf8" + "unsafe" +) + +const ( + // UTF-16 surrogate pair boundaries (from Unicode standard) + surrogateMin = 0xD800 // Start of high surrogate range + lowSurrogateMin = 0xDC00 // Start of low surrogate range + lowSurrogateMax = 0xDFFF // End of low surrogate range +) + +// Unquote removes quotes from a JSON string and processes escape sequences. +// Returns an error if the string is not properly quoted or contains invalid escapes. +// When the string contains no escape sequences, returns a zero-copy substring. +func Unquote(s string) (string, error) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return "", fmt.Errorf("invalid quoted string: %s", s) + } + s = s[1 : len(s)-1] + // Fast path: check if string needs unescaping (has backslash or control chars) + if !escaped(s) { + return s, nil + } + b, err := unquote(make([]byte, 0, len(s)), s) + return string(b), err +} + +// AppendUnquote appends the unquoted string to the buffer. +// Returns an error if the string is not properly quoted or contains invalid escapes. +func AppendUnquote(b []byte, s string) ([]byte, error) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return b, fmt.Errorf("invalid quoted string: %s", s) + } + s = s[1 : len(s)-1] + // Fast path: check if string needs unescaping + if !escaped(s) { + return append(b, s...), nil + } + return unquote(b, s) +} + +func escaped(s string) bool { + // SIMD-like scanning for backslash or control characters. + // The bit tricks only work correctly when all bytes are < 0x80, + // so we also check for high bytes and fall back to byte-by-byte. + var i int + if len(s) >= 8 { + chunks := unsafe.Slice((*uint64)(unsafe.Pointer(unsafe.StringData(s))), len(s)/8) + for _, n := range chunks { + // Check for high bytes (>= 0x80), backslash, or control chars + mask := n | below(n, 0x20) | contains(n, '\\') + if (mask & msb) != 0 { + return true + } + } + i = len(chunks) * 8 + } + + for ; i < len(s); i++ { + c := s[i] + if c < 0x20 || c == '\\' { + return true + } + } + + return false +} + +// unescapeIndex checks if the string content needs unescaping. +// Returns -1 if no unescaping needed, or the index of the first problematic byte. +// A string needs unescaping if it contains backslash or control characters (< 0x20). +func unescapeIndex(s string) int { + // SIMD-like scanning for backslash or control characters. + // The bit tricks only work correctly when all bytes are < 0x80, + // so we also check for high bytes and fall back to byte-by-byte. + var i int + if len(s) >= 8 { + chunks := unsafe.Slice((*uint64)(unsafe.Pointer(unsafe.StringData(s))), len(s)/8) + for j, n := range chunks { + // Check for high bytes (>= 0x80), backslash, or control chars + mask := n | below(n, 0x20) | contains(n, '\\') + if (mask & msb) != 0 { + // Found something in this chunk - check byte at the position + k := j*8 + bits.TrailingZeros64(mask&msb)/8 + c := s[k] + switch { + case c < 0x20, c == '\\': + return k + default: + // High byte (>= 0x80) - scan rest of chunk byte by byte + for k++; k < (j+1)*8; k++ { + c := s[k] + if c < 0x20 || c == '\\' { + return k + } + } + } + } + } + i = len(chunks) * 8 + } + + for ; i < len(s); i++ { + c := s[i] + if c < 0x20 || c == '\\' { + return i + } + } + + return -1 +} + +// unquote processes escape sequences in content and appends to b. +// content should not include the surrounding quotes. +func unquote(b []byte, s string) ([]byte, error) { + for len(s) > 0 { + i := unescapeIndex(s) + if i < 0 { + return append(b, s...), nil + } + + b = append(b, s[:i]...) + c := s[i] + if c < 0x20 { + return b, fmt.Errorf("invalid control character in string") + } + if i+1 >= len(s) { + return b, fmt.Errorf("invalid escape sequence at end of string") + } + + switch c := s[i+1]; c { + case '"', '\\', '/': + b = append(b, c) + s = s[i+2:] + case 'b': + b = append(b, '\b') + s = s[i+2:] + case 'f': + b = append(b, '\f') + s = s[i+2:] + case 'n': + b = append(b, '\n') + s = s[i+2:] + case 'r': + b = append(b, '\r') + s = s[i+2:] + case 't': + b = append(b, '\t') + s = s[i+2:] + case 'u': + if i+6 > len(s) { + return b, fmt.Errorf("invalid unicode escape sequence") + } + r1, ok := parseHex4(s[i+2 : i+6]) + if !ok { + return b, fmt.Errorf("invalid unicode escape sequence") + } + + // Check for UTF-16 surrogate pair + if utf16.IsSurrogate(r1) { + // Low surrogate without high surrogate is an error + if r1 >= lowSurrogateMin { + return b, fmt.Errorf("invalid surrogate pair: unexpected low surrogate") + } + // High surrogate, look for low surrogate + if i+12 > len(s) || s[i+6] != '\\' || s[i+7] != 'u' { + return b, fmt.Errorf("invalid surrogate pair: missing low surrogate") + } + r2, ok := parseHex4(s[i+8 : i+12]) + if !ok { + return b, fmt.Errorf("invalid unicode escape sequence in surrogate pair") + } + if r2 < lowSurrogateMin || r2 > lowSurrogateMax { + return b, fmt.Errorf("invalid surrogate pair: low surrogate out of range") + } + // Decode the surrogate pair + b = utf8.AppendRune(b, utf16.DecodeRune(r1, r2)) + s = s[i+12:] + } else { + b = utf8.AppendRune(b, r1) + s = s[i+6:] + } + default: + return b, fmt.Errorf("invalid escape character: %q", c) + } + } + return b, nil +} + +// parseHex4 parses a 4-character hex string into a rune. +// Returns the rune and true on success, or 0 and false on failure. +func parseHex4(s string) (rune, bool) { + if len(s) < 4 { + return 0, false + } + var r rune + for i := range 4 { + r <<= 4 + c := s[i] + switch { + case c >= '0' && c <= '9': + r |= rune(c - '0') + case c >= 'a' && c <= 'f': + r |= rune(c - 'a' + 10) + case c >= 'A' && c <= 'F': + r |= rune(c - 'A' + 10) + default: + return 0, false + } + } + return r, true +} diff --git a/vendor/github.com/parquet-go/jsonlite/valid.go b/vendor/github.com/parquet-go/jsonlite/valid.go new file mode 100644 index 00000000000..e16bc9c3930 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/valid.go @@ -0,0 +1,244 @@ +package jsonlite + +// Valid reports whether json is a valid JSON string. +// This is similar to encoding/json.Valid but uses the jsonlite tokenizer +// for efficient zero-allocation validation. +func Valid(json string) bool { + tok := Tokenize(json) + if !valid(tok) { + return false + } + // Ensure no trailing content after root value + _, ok := tok.Next() + return !ok +} + +// valid validates a single JSON value and returns true if valid. +func valid(tok *Tokenizer) bool { + token, ok := tok.Next() + if !ok { + return false + } + return validToken(tok, token) +} + +// validToken validates a token and any nested structure it may contain. +func validToken(tok *Tokenizer, token string) bool { + switch token[0] { + case 'n': + return token == "null" + case 't': + return token == "true" + case 'f': + return token == "false" + case '"': + return validString(token) + case '[': + return validArray(tok) + case '{': + return validObject(tok) + default: + return validNumber(token) + } +} + +// validString checks if a token is a valid JSON string. +// The token must start and end with quotes and contain valid escape sequences. +func validString(s string) bool { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return false + } + // Check for valid escape sequences and no unescaped control characters + content := s[1 : len(s)-1] + + // Fast path: use SIMD-like scanning from unquote.go to check if we need + // detailed validation. If no backslashes or control chars, string is valid. + if !escaped(content) { + return true + } + + // Slow path: validate escape sequences + return validStringEscapes(content) +} + +// validStringEscapes validates escape sequences in a string that contains +// at least one backslash or control character. +func validStringEscapes(content string) bool { + for i := 0; i < len(content); i++ { + c := content[i] + if c < 0x20 { + // Control characters must be escaped + return false + } + if c == '\\' { + i++ + if i >= len(content) { + return false + } + switch content[i] { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid single-character escape + case 'u': + // Unicode escape: must be followed by 4 hex digits + if i+4 >= len(content) { + return false + } + if !isHexDigit(content[i+1]) || !isHexDigit(content[i+2]) || + !isHexDigit(content[i+3]) || !isHexDigit(content[i+4]) { + return false + } + i += 4 + default: + return false + } + } + } + return true +} + +// isHexDigit returns true if c is a valid hexadecimal digit. +func isHexDigit(c byte) bool { + return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') +} + +// validArray validates a JSON array starting after the '[' token. +func validArray(tok *Tokenizer) bool { + // Check for empty array + token, ok := tok.Next() + if !ok { + return false + } + if token == "]" { + return true + } + + // Parse first element + if !validToken(tok, token) { + return false + } + + // Parse remaining elements + for { + token, ok = tok.Next() + if !ok { + return false + } + if token == "]" { + return true + } + if token != "," { + return false + } + // Expect value after comma + token, ok = tok.Next() + if !ok { + return false + } + if token == "]" { + // Trailing comma is not valid JSON + return false + } + if !validToken(tok, token) { + return false + } + } +} + +// validObject validates a JSON object starting after the '{' token. +func validObject(tok *Tokenizer) bool { + for i := 0; ; i++ { + token, ok := tok.Next() + if !ok { + return false + } + if token == "}" { + return true // Empty object or end of object + } + if i > 0 { + // After first field, expect comma then key + if token != "," { + return false + } + token, ok = tok.Next() + if !ok { + return false + } + if token == "}" { + // Trailing comma is not valid JSON + return false + } + } + // Expect string key + if len(token) == 0 || token[0] != '"' || !validString(token) { + return false + } + // Expect colon + token, ok = tok.Next() + if !ok || token != ":" { + return false + } + // Expect value + if !valid(tok) { + return false + } + } +} + +// validNumber checks if a string is a valid JSON number. +// JSON numbers: -?(0|[1-9][0-9]*)(\.[0-9]+)?([eE][+-]?[0-9]+)? +func validNumber(s string) bool { + if len(s) == 0 { + return false + } + i := 0 + + // Optional minus sign + if s[i] == '-' { + i++ + if i >= len(s) { + return false + } + } + + // Integer part + if s[i] == '0' { + i++ + } else if s[i] >= '1' && s[i] <= '9' { + i++ + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + } else { + return false + } + + // Fractional part + if i < len(s) && s[i] == '.' { + i++ + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return false + } + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + } + + // Exponent part + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return false + } + if s[i] == '+' || s[i] == '-' { + i++ + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return false + } + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + } + + return i == len(s) +} diff --git a/vendor/github.com/parquet-go/jsonlite/value.go b/vendor/github.com/parquet-go/jsonlite/value.go new file mode 100644 index 00000000000..5aa5fad8d26 --- /dev/null +++ b/vendor/github.com/parquet-go/jsonlite/value.go @@ -0,0 +1,415 @@ +package jsonlite + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "strconv" + "strings" + "unsafe" +) + +const ( + // kindShift is calculated based on pointer size to use the high bits + // for the kind field. We have 7 Kind values (0-6), requiring 3 bits. + // On 64-bit systems this is 61 (top 3 bits for kind, bottom 61 for length), + // on 32-bit systems this is 29 (top 3 bits for kind, bottom 29 for length). + kindShift = (unsafe.Sizeof(uintptr(0))*8 - 3) + kindMask = (1 << kindShift) - 1 + // unparsedBit is set for objects/arrays that haven't been parsed yet (lazy parsing). + // On 64-bit systems this is bit 60, on 32-bit systems this is bit 28. + unparsedBit = uintptr(1) << (kindShift - 1) +) + +var ( + // hashseed is the seed used for hashing object keys. + hashseed = maphash.MakeSeed() +) + +// Kind represents the type of a JSON value. +type Kind int + +const ( + // Null represents a JSON null value. + Null Kind = iota + // True represents a JSON true boolean value. + True + // False represents a JSON false boolean value. + False + // Number represents a JSON number value. + Number + // String represents a JSON string value. + String + // Object represents a JSON object value. + Object + // Array represents a JSON array value. + Array +) + +// Value represents a JSON value of any type. +// +// Value instances as immutable, they can be safely accessed from multiple +// goroutines. +// +// The zero-value of Value is invalid, all Value instances must be acquired +// form Parse or from an Iterator. +type Value struct { + p unsafe.Pointer + n uintptr +} + +type field struct { + k string + v Value +} + +// Kind returns the type of the JSON value. +func (v *Value) Kind() Kind { return Kind(v.n >> kindShift) } + +// Len returns the length of the value. +// For strings, it returns the number of bytes. +// For arrays, it returns the number of elements. +// For objects, it returns the number of fields. +// Panics if called on other types. +func (v *Value) Len() int { + switch v.Kind() { + case String: + // String values now store quoted JSON - subtract 2 for quotes + return int(v.n&kindMask) - 2 + case Number: + return int(v.n & kindMask) + case Array, Object: + parsed := v + if v.unparsed() { + parsed = v.parse() + } + // First element/field is always cached JSON + return int(parsed.n&kindMask) - 1 + default: + panic("jsonlite: Len called on non-string/array/object value") + } +} + +// Int returns the value as a signed 64-bit integer. +// Panics if the value is not a number or if parsing fails. +func (v *Value) Int() int64 { + if v.Kind() != Number { + panic("jsonlite: Int called on non-number value") + } + i, err := strconv.ParseInt(v.json(), 10, 64) + if err != nil { + panic(err) + } + return i +} + +// Uint returns the value as an unsigned 64-bit integer. +// Panics if the value is not a number or if parsing fails. +func (v *Value) Uint() uint64 { + if v.Kind() != Number { + panic("jsonlite: Uint called on non-number value") + } + u, err := strconv.ParseUint(v.json(), 10, 64) + if err != nil { + panic(err) + } + return u +} + +// Float returns the value as a 64-bit floating point number. +// Panics if the value is not a number or if parsing fails. +func (v *Value) Float() float64 { + if v.Kind() != Number { + panic("jsonlite: Float called on non-number value") + } + f, err := strconv.ParseFloat(v.json(), 64) + if err != nil { + panic(err) + } + return f +} + +// String returns the value as a string. +// For string and number values, returns the raw value. +// For other types, returns the JSON representation. +func (v *Value) String() string { + switch v.Kind() { + case Null: + return "" + case String: + s, _ := Unquote(v.json()) + return s + case Number, True, False: + return v.json() + case Array: + return (*Value)(v.p).json() + default: + if v.unparsed() { + return v.json() + } + return (*field)(v.p).v.json() + } +} + +// JSON returns the JSON representation of the value. +func (v *Value) JSON() string { + switch v.Kind() { + case String, Number, Null, True, False: + return v.json() + case Array: + return (*Value)(v.p).json() + default: + if v.unparsed() { + return v.json() + } + return (*field)(v.p).v.json() + } +} + +// Array iterates over the array elements. +// Panics if the value is not an array. +func (v *Value) Array(yield func(*Value) bool) { + if v.Kind() != Array { + panic("jsonlite: Array called on non-array value") + } + parsed := v + if v.unparsed() { + parsed = v.parse() + } + elems := unsafe.Slice((*Value)(parsed.p), parsed.len())[1:] + for i := range elems { + if !yield(&elems[i]) { + return + } + } +} + +// Object iterates over the object's key/value pairs. +// Panics if the value is not an object. +func (v *Value) Object(yield func(string, *Value) bool) { + if v.Kind() != Object { + panic("jsonlite: Object called on non-object value") + } + parsed := v + if v.unparsed() { + parsed = v.parse() + } + fields := unsafe.Slice((*field)(parsed.p), parsed.len())[1:] + for i := range fields { + if !yield(fields[i].k, &fields[i].v) { + return + } + } +} + +// Lookup searches for a field by key in an object and returns a pointer to its value. +// Returns nil if the key is not found. +// Panics if the value is not an object. +func (v *Value) Lookup(k string) *Value { + if v.Kind() != Object { + panic("jsonlite: Lookup called on non-object value") + } + parsed := v + if v.unparsed() { + parsed = v.parse() + } + fields := unsafe.Slice((*field)(parsed.p), parsed.len()) + hashes := fields[0].k + refkey := byte(maphash.String(hashseed, k)) + offset := 0 + for { + i := strings.IndexByte(hashes[offset:], refkey) + if i < 0 { + return nil + } + j := offset + i + 1 + f := &fields[j] + if f.k == k { + return &f.v + } + offset = j + } +} + +// LookupPath searches for a nested field by following a path of keys. +// Returns nil if any key in the path is not found. +// Panics if any intermediate value is not an object. +// If path is empty, returns the value itself. +func (v *Value) LookupPath(path ...string) *Value { + for _, key := range path { + if v == nil { + return nil + } + v = v.Lookup(key) + } + return v +} + +// Index returns the value at index i in an array. +// Panics if the value is not an array or if the index is out of range. +func (v *Value) Index(i int) *Value { + if v.Kind() != Array { + panic("jsonlite: Index called on non-array value") + } + parsed := v + if v.unparsed() { + parsed = v.parse() + } + elems := unsafe.Slice((*Value)(parsed.p), parsed.len())[1:] + return &elems[i] +} + +// NumberType returns the classification of the number (int, uint, or float). +// Panics if the value is not a number. +func (v *Value) NumberType() NumberType { + if v.Kind() != Number { + panic("jsonlite: NumberType called on non-number value") + } + return NumberTypeOf(v.json()) +} + +// Number returns the value as a json.Number. +// Panics if the value is not a number. +func (v *Value) Number() json.Number { + if v.Kind() != Number { + panic("jsonlite: Number called on non-number value") + } + return json.Number(v.json()) +} + +func (v *Value) json() string { + return unsafe.String((*byte)(v.p), v.len()) +} + +func (v *Value) len() int { + return int(v.n & (kindMask &^ unparsedBit)) +} + +func (v *Value) unparsed() bool { + return (v.n & unparsedBit) != 0 +} + +func (v *Value) parse() *Value { + parsed, err := Parse(v.JSON()) + if err != nil { + panic(fmt.Errorf("jsonlite: lazy parse failed: %w", err)) + } + return parsed +} + +// NumberType represents the classification of a JSON number. +type NumberType int + +const ( + // Int indicates a signed integer number (has a leading minus sign, no decimal point or exponent). + Int NumberType = iota + // Uint indicates an unsigned integer number (no minus sign, no decimal point or exponent). + Uint + // Float indicates a floating point number (has decimal point or exponent). + Float +) + +// NumberTypeOf returns the classification of a number string. +func NumberTypeOf(s string) NumberType { + if len(s) == 0 { + return Float + } + t := Uint + if s[0] == '-' { + s = s[1:] + t = Int + } + for i := range len(s) { + switch s[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + continue + default: + return Float + } + } + return t +} + +func makeValue(k Kind, s string) Value { + return Value{ + p: unsafe.Pointer(unsafe.StringData(s)), + n: (uintptr(k) << kindShift) | uintptr(len(s)), + } +} + +func makeNullValue(s string) Value { return makeValue(Null, s) } + +func makeTrueValue(s string) Value { return makeValue(True, s) } + +func makeFalseValue(s string) Value { return makeValue(False, s) } + +func makeNumberValue(s string) Value { return makeValue(Number, s) } + +func makeStringValue(s string) Value { return makeValue(String, s) } + +func makeArrayValue(elements []Value) Value { + return Value{ + p: unsafe.Pointer(unsafe.SliceData(elements)), + n: (uintptr(Array) << kindShift) | uintptr(len(elements)), + } +} + +func makeObjectValue(fields []field) Value { + return Value{ + p: unsafe.Pointer(unsafe.SliceData(fields)), + n: (uintptr(Object) << kindShift) | uintptr(len(fields)), + } +} + +func makeUnparsedObjectValue(json string) Value { + return Value{ + p: unsafe.Pointer(unsafe.StringData(json)), + n: (uintptr(Object) << kindShift) | uintptr(len(json)) | unparsedBit, + } +} + +// Append serializes the Value to JSON and appends it to the buffer. +// Returns the extended buffer. +func (v *Value) Append(buf []byte) []byte { return append(buf, v.JSON()...) } + +// Compact appends a compacted JSON representation of the value to buf by recursively +// reconstructing it from the parsed structure. Unlike Append, this method does not +// use cached JSON and always regenerates the output. +func (v *Value) Compact(buf []byte) []byte { + switch v.Kind() { + case String, Null, True, False, Number: + return append(buf, v.json()...) + case Array: + parsed := v + if v.unparsed() { + parsed = v.parse() + } + buf = append(buf, '[') + var count int + for elem := range parsed.Array { + if count > 0 { + buf = append(buf, ',') + } + buf = elem.Compact(buf) + count++ + } + return append(buf, ']') + default: + parsed := v + if v.unparsed() { + parsed = v.parse() + } + buf = append(buf, '{') + var count int + for k, v := range parsed.Object { + if count > 0 { + buf = append(buf, ',') + } + buf = AppendQuote(buf, k) + buf = append(buf, ':') + buf = v.Compact(buf) + count++ + } + return append(buf, '}') + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/.gitattributes b/vendor/github.com/parquet-go/parquet-go/.gitattributes new file mode 100644 index 00000000000..75b22ee8ecc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/.gitattributes @@ -0,0 +1,2 @@ +internal/gen-go/* linguist-generated=true +testdata/gen/** binary linguist-generated=true \ No newline at end of file diff --git a/vendor/github.com/parquet-go/parquet-go/.gitignore b/vendor/github.com/parquet-go/parquet-go/.gitignore new file mode 100644 index 00000000000..b3584c8d4de --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.py + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Emacs +*~ +#*# +.# diff --git a/vendor/github.com/parquet-go/parquet-go/.mailmap b/vendor/github.com/parquet-go/parquet-go/.mailmap new file mode 100644 index 00000000000..09d89739a51 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/.mailmap @@ -0,0 +1,2 @@ +Achille Roussel Achille +Thomas Pelletier Thomas Pelletier diff --git a/vendor/github.com/parquet-go/parquet-go/.words b/vendor/github.com/parquet-go/parquet-go/.words new file mode 100644 index 00000000000..d7ff3655600 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/.words @@ -0,0 +1,27 @@ + +RowType +Twilio +bottlenecked +decompressors +int96 +millis +nanos +reindexing +repositions +schemas +ColumnPages +PageIndex +Zstandard +xxHash +cardinality +enums +32bit +dic +Blart +Versenwald +purego +stdlib +unscaled +cespare +bitset +checksumming diff --git a/vendor/github.com/parquet-go/parquet-go/AUTHORS.txt b/vendor/github.com/parquet-go/parquet-go/AUTHORS.txt new file mode 100644 index 00000000000..26669538c14 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/AUTHORS.txt @@ -0,0 +1,5 @@ +Achille Roussel +Frederic Branczyk +Julien Fabre +Kevin Burke +Thomas Pelletier diff --git a/vendor/github.com/parquet-go/parquet-go/CHANGELOG.md b/vendor/github.com/parquet-go/parquet-go/CHANGELOG.md new file mode 100644 index 00000000000..987597104ca --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/CHANGELOG.md @@ -0,0 +1,16 @@ + # v0.17.0 + + ## Breaking Changes + + - migrate to module github.com/parquet-go/parquet-go [#3](https://github.com/parquet-go/parquet-go/pull/3) @kevinburke + - drop support for `go1.17` [#16](https://github.com/parquet-go/parquet-go/pull/16) @gernest + + ## Bug fixes + + - fix error handling when reading from io.ReaderAt [#18](https://github.com/parquet-go/parquet-go/pull/18) @gernest + - fix zero value of nested field point [#9](https://github.com/parquet-go/parquet-go/pull/9) @gernest + - fix memory corruption in `MergeRowGroups` [#31](https://github.com/parquet-go/parquet-go/pull/31) @gernest + + ## Enhancements + - performance improvement on GenericReader [#17](https://github.com/parquet-go/parquet-go/pull/17) @gernest, @zolstein + - stabilize flakey `TestOpenFile` [#11](https://github.com/parquet-go/parquet-go/pull/11) @gernest diff --git a/vendor/github.com/parquet-go/parquet-go/CODEOWNERS b/vendor/github.com/parquet-go/parquet-go/CODEOWNERS new file mode 100644 index 00000000000..45632dfe361 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/CODEOWNERS @@ -0,0 +1 @@ +* @achille-roussel @fpetkovski @joe-elliott @thorfour diff --git a/vendor/github.com/parquet-go/parquet-go/CODE_OF_CONDUCT.md b/vendor/github.com/parquet-go/parquet-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..2f0727ed54e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at open-source@twilio.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/parquet-go/parquet-go/CONTRIBUTING.md b/vendor/github.com/parquet-go/parquet-go/CONTRIBUTING.md new file mode 100644 index 00000000000..185d9581184 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/CONTRIBUTING.md @@ -0,0 +1,52 @@ +# Contributing to segmentio/parquet + +## Code of Conduct + +Help us keep the project open and inclusive. Please be kind to and +considerate of other developers, as we all have the same goal: make +the project as good as it can be. + +* [Code of Conduct](./CODE_OF_CONDUCT.md) + +## Licensing + +All third party contributors acknowledge that any contributions they provide +will be made under the same open source license that the open source project +is provided under. + +## Contributing + +* Open an Issue to report bugs or discuss non-trivial changes. +* Open a Pull Request to submit a code change for review. + +### Guidelines for code review + +It's good to do code review but we are busy and it's bad to wait for consensus +or opinions that might not arrive. Here are some guidelines + +#### Changes where code review is optional + +- Documentation changes +- Bug fixes where a reproducible test case exists +- Keeping the lights on style work (compat with new Parquet versions, new Go + versions, for example) +- Updates to the CI environment +- Adding additional benchmarks or test cases +- Pull requests that have been open for 30 days, where an attempt has been made + to contact another code owner, and no one has expressly requested changes + +#### Changes that should get at least one code review from an owner + +- Changes that may affect the performance of core library functionality + (serializing, deserializing Parquet data) by more than 2% +- Behavior changes +- New API or changes to existing API + +### Coding Rules + +To ensure consistency throughout the source code, keep these rules in mind +when submitting contributions: + +* All features or bug fixes must be tested by one or more tests. +* All exported types, functions, and symbols must be documented. +* All code must be formatted with `go fmt`. diff --git a/vendor/github.com/parquet-go/parquet-go/LICENSE b/vendor/github.com/parquet-go/parquet-go/LICENSE new file mode 100644 index 00000000000..251c7e5154f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/LICENSE @@ -0,0 +1,213 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Twilio, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Parquet. + +* deprecated/parquet.go is based on Apache Parquet's thrift file +* format/parquet.go is based on Apache Parquet's thrift file + +Copyright: 2014 The Apache Software Foundation. +Home page: https://github.com/apache/parquet-format +License: http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/parquet-go/parquet-go/Makefile b/vendor/github.com/parquet-go/parquet-go/Makefile new file mode 100644 index 00000000000..e3bfd8bc2d0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/Makefile @@ -0,0 +1,15 @@ +.PHONY: format + +AUTHORS.txt: .mailmap + go install github.com/kevinburke/write_mailmap@latest + write_mailmap > AUTHORS.txt + +tools: + go mod tidy -modfile go.tools.mod + +format: tools + go fmt ./... + go tool -modfile go.tools.mod modernize -fix -test ./... + +test: + go test -v -trimpath -race -cover -tags= ./... diff --git a/vendor/github.com/parquet-go/parquet-go/README.md b/vendor/github.com/parquet-go/parquet-go/README.md new file mode 100644 index 00000000000..be0415bdf7b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/README.md @@ -0,0 +1,615 @@ +
+ +
+

parquet-go/parquet-go

+

+High-performance Go library to manipulate parquet files, initially developed at +Twilio Segment. +

+ +
+ +## Motivation + +Parquet has been established as a powerful solution to represent columnar data +on persistent storage mediums, achieving levels of compression and query +performance that enable managing data sets at scales that reach the petabytes. +In addition, having intensive data applications sharing a common format creates +opportunities for interoperation in our tool kits, providing greater leverage +and value to engineers maintaining and operating those systems. + +The creation and evolution of large scale data management systems, combined with +realtime expectations come with challenging maintenance and performance +requirements, that existing solutions to use parquet with Go were not addressing. + +The `parquet-go/parquet-go` package was designed and developed to respond to those +challenges, offering high level APIs to read and write parquet files, while +keeping a low compute and memory footprint in order to be used in environments +where data volumes and cost constraints require software to achieve high levels +of efficiency. + +## Specification + +Columnar storage allows Parquet to store data more efficiently than, say, +using JSON or Protobuf. For more information, refer to the [Parquet Format Specification](https://github.com/apache/parquet-format). + +## Installation + +The package is distributed as a standard Go module that programs can take a +dependency on and install with the following command: + +```bash +go get github.com/parquet-go/parquet-go +``` + +Go 1.22 or later is required to use the package. + +### Compatibility Guarantees + +The package is currently released as a pre-v1 version, which gives maintainers +the freedom to break backward compatibility to help improve the APIs as we learn +which initial design decisions would need to be revisited to better support the +use cases that the library solves for. These occurrences are expected to be rare +in frequency and documentation will be produce to guide users on how to adapt +their programs to breaking changes. + +## Usage + +The following sections describe how to use APIs exposed by the library, +highlighting the use cases with code examples to demonstrate how they are used +in practice. + +### Writing Parquet Files: [parquet.GenericWriter[T]](https://pkg.go.dev/github.com/parquet-go/parquet-go#GenericWriter) + +A parquet file is a collection of rows sharing the same schema, arranged in +columns to support faster scan operations on subsets of the data set. + +For simple use cases, the `parquet.WriteFile[T]` function allows the creation +of parquet files on the file system from a slice of Go values representing the +rows to write to the file. + +```go +type RowType struct { FirstName, LastName string } + +if err := parquet.WriteFile("file.parquet", []RowType{ + {FirstName: "Bob"}, + {FirstName: "Alice"}, +}); err != nil { + ... +} +``` + +The `parquet.GenericWriter[T]` type denormalizes rows into columns, then encodes +the columns into a parquet file, generating row groups, column chunks, and pages +based on configurable heuristics. + +```go +type RowType struct { FirstName, LastName string } + +writer := parquet.NewGenericWriter[RowType](output) + +_, err := writer.Write([]RowType{ + ... +}) +if err != nil { + ... +} + +// Closing the writer is necessary to flush buffers and write the file footer. +if err := writer.Close(); err != nil { + ... +} +``` + +Explicit declaration of the parquet schema on a writer is useful when the +application needs to ensure that data written to a file adheres to a predefined +schema, which may differ from the schema derived from the writer's type +parameter. The `parquet.Schema` type is a in-memory representation of the schema +of parquet rows, translated from the type of Go values, and can be used for this +purpose. + +```go +schema := parquet.SchemaOf(new(RowType)) +writer := parquet.NewGenericWriter[any](output, schema) +... +``` + +### Reading Parquet Files: [parquet.GenericReader[T]](https://pkg.go.dev/github.com/parquet-go/parquet-go#GenericReader) + +For simple use cases where the data set fits in memory and the program will +read most rows of the file, the `parquet.ReadFile[T]` function returns a slice +of Go values representing the rows read from the file. + +```go +type RowType struct { FirstName, LastName string } + +rows, err := parquet.ReadFile[RowType]("file.parquet") +if err != nil { + ... +} + +for _, c := range rows { + fmt.Printf("%+v\n", c) +} +``` + +The expected schema of rows can be explicitly declared when the reader is +constructed, which is useful to ensure that the program receives rows matching +an specific format; for example, when dealing with files from remote storage +sources that applications cannot trust to have used an expected schema. + +Configuring the schema of a reader is done by passing a `parquet.Schema` +instance as argument when constructing a reader. When the schema is declared, +conversion rules implemented by the package are applied to ensure that rows +read by the application match the desired format (see **Evolving Parquet Schemas**). + +```go +schema := parquet.SchemaOf(new(RowType)) +reader := parquet.NewReader(file, schema) +... +``` + +### Inspecting Parquet Files: [parquet.File](https://pkg.go.dev/github.com/parquet-go/parquet-go#File) + +Sometimes, lower-level APIs can be useful to leverage the columnar layout of +parquet files. The `parquet.File` type is intended to provide such features to +Go applications, by exposing APIs to iterate over the various parts of a +parquet file. + +```go +f, err := parquet.OpenFile(file, size) +if err != nil { + ... +} + +for _, rowGroup := range f.RowGroups() { + for _, columnChunk := range rowGroup.ColumnChunks() { + ... + } +} +``` + +### Evolving Parquet Schemas: [parquet.Convert](https://pkg.go.dev/github.com/parquet-go/parquet-go#Convert) + +Parquet files embed all the metadata necessary to interpret their content, +including a description of the schema of the tables represented by the rows and +columns they contain. + +Parquet files are also immutable; once written, there is not mechanism for +_updating_ a file. If their contents need to be changed, rows must be read, +modified, and written to a new file. + +Because applications evolve, the schema written to parquet files also tend to +evolve over time. Those requirements creating challenges when applications need +to operate on parquet files with heterogenous schemas: algorithms that expect +new columns to exist may have issues dealing with rows that come from files with +mismatching schema versions. + +To help build applications that can handle evolving schemas, `parquet-go/parquet-go` +implements conversion rules that create views of row groups to translate between +schema versions. + +The `parquet.Convert` function is the low-level routine constructing conversion +rules from a source to a target schema. The function is used to build converted +views of `parquet.RowReader` or `parquet.RowGroup`, for example: + +```go +type RowTypeV1 struct { ID int64; FirstName string } +type RowTypeV2 struct { ID int64; FirstName, LastName string } + +source := parquet.SchemaOf(RowTypeV1{}) +target := parquet.SchemaOf(RowTypeV2{}) + +conversion, err := parquet.Convert(target, source) +if err != nil { + ... +} + +targetRowGroup := parquet.ConvertRowGroup(sourceRowGroup, conversion) +... +``` + +Conversion rules are automatically applied by the `parquet.CopyRows` function +when the reader and writers passed to the function also implement the +`parquet.RowReaderWithSchema` and `parquet.RowWriterWithSchema` interfaces. +The copy determines whether the reader and writer schemas can be converted from +one to the other, and automatically applies the conversion rules to facilitate +the translation between schemas. + +At this time, conversion rules only supports adding or removing columns from +the schemas, there are no type conversions performed, nor ways to rename +columns, etc... More advanced conversion rules may be added in the future. + +### Sorting Row Groups: [parquet.GenericBuffer[T]](https://pkg.go.dev/github.com/parquet-go/parquet-go#Buffer) + +The `parquet.GenericWriter[T]` type is optimized for minimal memory usage, +keeping the order of rows unchanged and flushing pages as soon as they are filled. + +Parquet supports expressing columns by which rows are sorted through the +declaration of _sorting columns_ on row groups. Sorting row groups requires +buffering all rows before ordering and writing them to a parquet file. + +To help with those use cases, the `parquet-go/parquet-go` package exposes the +`parquet.GenericBuffer[T]` type which acts as a buffer of rows and implements +`sort.Interface` to allow applications to sort rows prior to writing them +to a file. + +The columns that rows are ordered by are configured when creating +`parquet.GenericBuffer[T]` instances using the `parquet.SortingColumns` function +to construct row group options configuring the buffer. The type of parquet +columns defines how values are compared, see [Parquet Logical Types](https://github.com/apache/parquet-format/blob/master/LogicalTypes.md) +for details. + +When written to a file, the buffer is materialized into a single row group with +the declared sorting columns. After being written, buffers can be reused by +calling their `Reset` method. + +The following example shows how to use a `parquet.GenericBuffer[T]` to order rows +written to a parquet file: + +```go +type RowType struct { FirstName, LastName string } + +buffer := parquet.NewGenericBuffer[RowType]( + parquet.SortingRowGroupConfig( + parquet.SortingColumns( + parquet.Ascending("LastName"), + parquet.Ascending("FistName"), + ), + ), +) + +buffer.Write([]RowType{ + {FirstName: "Luke", LastName: "Skywalker"}, + {FirstName: "Han", LastName: "Solo"}, + {FirstName: "Anakin", LastName: "Skywalker"}, +}) + +sort.Sort(buffer) + +writer := parquet.NewGenericWriter[RowType](output) +_, err := parquet.CopyRows(writer, buffer.Rows()) +if err != nil { + ... +} +if err := writer.Close(); err != nil { + ... +} +``` + +### Merging Row Groups: [parquet.MergeRowGroups](https://pkg.go.dev/github.com/parquet-go/parquet-go#MergeRowGroups) + +Parquet files are often used as part of the underlying engine for data +processing or storage layers, in which cases merging multiple row groups +into one that contains more rows can be a useful operation to improve query +performance; for example, bloom filters in parquet files are stored for each +row group, the larger the row group, the fewer filters need to be stored and +the more effective they become. + +The `parquet-go/parquet-go` package supports creating merged views of row groups, +where the view contains all the rows of the merged groups, maintaining the order +defined by the sorting columns of the groups. + +There are a few constraints when merging row groups: + +- The sorting columns of all the row groups must be the same, or the merge + operation must be explicitly configured a set of sorting columns which are + a prefix of the sorting columns of all merged row groups. + +- The schemas of row groups must all be equal, or the merge operation must + be explicitly configured with a schema that all row groups can be converted + to, in which case the limitations of schema conversions apply. + +Once a merged view is created, it may be written to a new parquet file or buffer +in order to create a larger row group: + +```go +merge, err := parquet.MergeRowGroups(rowGroups) +if err != nil { + ... +} + +writer := parquet.NewGenericWriter[RowType](output) +_, err := parquet.CopyRows(writer, merge.Rows()) +if err != nil { + ... +} +if err := writer.Close(); err != nil { + ... +} +``` + +### Using Bloom Filters: [parquet.BloomFilter](https://pkg.go.dev/github.com/parquet-go/parquet-go#BloomFilter) + +Parquet files can embed bloom filters to help improve the performance of point +lookups in the files. The format of parquet bloom filters is documented in +the parquet specification: [Parquet Bloom Filter](https://github.com/apache/parquet-format/blob/master/BloomFilter.md) + +By default, no bloom filters are created in parquet files, but applications can +configure the list of columns to create filters for using the `parquet.BloomFilters` +option when instantiating writers; for example: + +```go +type RowType struct { + FirstName string `parquet:"first_name"` + LastName string `parquet:"last_name"` +} + +const filterBitsPerValue = 10 +writer := parquet.NewGenericWriter[RowType](output, + parquet.BloomFilters( + // Configures the write to generate split-block bloom filters for the + // "first_name" and "last_name" columns of the parquet schema of rows + // witten by the application. + parquet.SplitBlockFilter(filterBitsPerValue, "first_name"), + parquet.SplitBlockFilter(filterBitsPerValue, "last_name"), + ), +) +... +``` + +Generating bloom filters requires to know how many values exist in a column +chunk in order to properly size the filter, which requires buffering all the +values written to the column in memory. Because of it, the memory footprint +of `parquet.GenericWriter[T]` increases linearly with the number of columns +that the writer needs to generate filters for. This extra cost is optimized +away when rows are copied from a `parquet.GenericBuffer[T]` to a writer, since +in this case the number of values per column in known since the buffer already +holds all the values in memory. + +When reading parquet files, column chunks expose the generated bloom filters +with the `parquet.ColumnChunk.BloomFilter` method, returning a +`parquet.BloomFilter` instance if a filter was available, or `nil` when there +were no filters. + +Using bloom filters in parquet files is useful when performing point-lookups in +parquet files; searching for column rows matching a given value. Programs can +quickly eliminate column chunks that they know does not contain the value they +search for by checking the filter first, which is often multiple orders of +magnitude faster than scanning the column. + +The following code snippet hilights how filters are typically used: + +```go +var candidateChunks []parquet.ColumnChunk + +for _, rowGroup := range file.RowGroups() { + columnChunk := rowGroup.ColumnChunks()[columnIndex] + bloomFilter := columnChunk.BloomFilter() + + if bloomFilter != nil { + if ok, err := bloomFilter.Check(value); err != nil { + ... + } else if !ok { + // Bloom filters may return false positives, but never return false + // negatives, we know this column chunk does not contain the value. + continue + } + } + + candidateChunks = append(candidateChunks, columnChunk) +} +``` + +## Optimizations + +The following sections describe common optimization techniques supported by the +library. + +### Optimizing Reads + +Lower level APIs used to read parquet files offer more efficient ways to access +column values. Consecutive sequences of values are grouped into pages which are +represented by the `parquet.Page` interface. + +A column chunk may contain multiple pages, each holding a section of the column +values. Applications can retrieve the column values either by reading them into +buffers of `parquet.Value`, or type asserting the pages to read arrays of +primitive Go values. The following example demonstrates how to use both +mechanisms to read column values: + +```go +pages := column.Pages() +defer func() { + checkErr(pages.Close()) +}() + +for { + p, err := pages.ReadPage() + if err != nil { + ... // io.EOF when there are no more pages + } + + switch page := p.Values().(type) { + case parquet.Int32Reader: + values := make([]int32, page.NumValues()) + _, err := page.ReadInt32s(values) + ... + case parquet.Int64Reader: + values := make([]int64, page.NumValues()) + _, err := page.ReadInt64s(values) + ... + default: + values := make([]parquet.Value, page.NumValues()) + _, err := page.ReadValues(values) + ... + } +} +``` + +Reading arrays of typed values is often preferable when performing aggregations +on the values as this model offers a more compact representation of the values +in memory, and pairs well with the use of optimizations like SIMD vectorization. + +### Optimizing Writes + +Applications that deal with columnar storage are sometimes designed to work with +columnar data throughout the abstraction layers; it then becomes possible to +write columns of values directly instead of reconstructing rows from the column +values. The package offers two main mechanisms to satisfy those use cases: + +#### A. Writing Columns of Typed Arrays + +The first solution assumes that the program works with in-memory arrays of typed +values, for example slices of primitive Go types like `[]float32`; this would be +the case if the application is built on top of a framework like +[Apache Arrow](https://pkg.go.dev/github.com/apache/arrow/go/arrow). + +`parquet.GenericBuffer[T]` is an implementation of the `parquet.RowGroup` +interface which maintains in-memory buffers of column values. Rows can be +written by either boxing primitive values into arrays of `parquet.Value`, +or type asserting the columns to a access specialized versions of the write +methods accepting arrays of Go primitive types. + +When using either of these models, the application is responsible for ensuring +that the same number of rows are written to each column or the resulting parquet +file will be malformed. + +The following examples demonstrate how to use these two models to write columns +of Go values: + +```go +type RowType struct { FirstName, LastName string } + +func writeColumns(buffer *parquet.GenericBuffer[RowType], firstNames []string) error { + values := make([]parquet.Value, len(firstNames)) + for i := range firstNames { + values[i] = parquet.ValueOf(firstNames[i]) + } + _, err := buffer.ColumnBuffers()[0].WriteValues(values) + return err +} +``` + +```go +type RowType struct { ID int64; Value float32 } + +func writeColumns(buffer *parquet.GenericBuffer[RowType], ids []int64, values []float32) error { + if len(ids) != len(values) { + return fmt.Errorf("number of ids and values mismatch: ids=%d values=%d", len(ids), len(values)) + } + columns := buffer.ColumnBuffers() + if err := columns[0].(parquet.Int64Writer).WriteInt64s(ids); err != nil { + return err + } + if err := columns[1].(parquet.FloatWriter).WriteFloats(values); err != nil { + return err + } + return nil +} +``` + +The latter is more efficient as it does not require boxing the input into an +intermediary array of `parquet.Value`. However, it may not always be the right +model depending on the situation, sometimes the generic abstraction can be a +more expressive model. + +#### B. Implementing parquet.RowGroup + +Programs that need full control over the construction of row groups can choose +to provide their own implementation of the `parquet.RowGroup` interface, which +includes defining implementations of `parquet.ColumnChunk` and `parquet.Page` +to expose column values of the row group. + +This model can be preferable when the underlying storage or in-memory +representation of the data needs to be optimized further than what can be +achieved by using an intermediary buffering layer with `parquet.GenericBuffer[T]`. + +See [parquet.RowGroup](https://pkg.go.dev/github.com/parquet-go/parquet-go#RowGroup) +for the full interface documentation. + +#### C. Using on-disk page buffers + +When generating parquet files, the writer needs to buffer all pages before it +can create the row group. This may require significant amounts of memory as the +entire file content must be buffered prior to generating it. In some cases, the +files might even be larger than the amount of memory available to the program. + +The `parquet.GenericWriter[T]` can be configured to use disk storage instead as +a scratch buffer when generating files, by configuring a different page buffer +pool using the `parquet.ColumnPageBuffers` option and `parquet.PageBufferPool` +interface. + +The `parquet-go/parquet-go` package provides an implementation of the interface +which uses temporary files to store pages while a file is generated, allowing +programs to use local storage as swap space to hold pages and keep memory +utilization to a minimum. The following example demonstrates how to configure +a parquet writer to use on-disk page buffers: + +```go +type RowType struct { ... } + +writer := parquet.NewGenericWriter[RowType](output, + parquet.ColumnPageBuffers( + parquet.NewFileBufferPool("", "buffers.*"), + ), +) +``` + +When a row group is complete, pages buffered to disk need to be copied back to +the output file. This results in doubling I/O operations and storage space +requirements (the system needs to have enough free disk space to hold two copies +of the file). The resulting write amplification can often be optimized away by +the kernel if the file system supports copy-on-write of disk pages since copies +between `os.File` instances are optimized using `copy_file_range(2)` (on linux). + +See [parquet.PageBufferPool](https://pkg.go.dev/github.com/parquet-go/parquet-go#PageBufferPool) +for the full interface documentation. + +#### D. Parallel Column Writes + +For applications that need to maximize throughput when writing large columnar datasets, the library supports writing columns in parallel. This is especially useful when each column can be prepared independently and written concurrently, leveraging multiple CPU cores. + +You can use Go's goroutines to write to each column's `ColumnWriter` in parallel. Each column's values can be written using `WriteRowValues`, and the column must be closed after writing. It is the application's responsibility to ensure that all columns receive the same number of rows, as mismatched row counts will result in malformed files. + +Example: + +```go +columns = writer.ColumnWriters() +var ( + wg sync.WaitGroup + errs = make([]error, len(columns)) + rowCounts = make([]int, len(columns)) +) +for i, col := range columns { + wg.Add(1) + go func(i int, col parquet.ColumnWriter) { + defer wg.Done() + n, err := col.WriteRowValues(values[i]) // values[i] is []parquet.Value for column i + if err != nil { + errs[i] = err + return + } + rowCounts[i] = n + errs[i] = col.Close() + }(i, col) +} +wg.Wait() +// Check errs and rowCounts for consistency +``` + +This approach can significantly reduce the time required to write wide tables or large datasets, especially on multi-core systems. However, you should ensure proper error handling and synchronization, as shown above. + +## Maintenance + +While initial design and development occurred at Twilio Segment, the project is now maintained by the open source community. We welcome external contributors. +to participate in the form of discussions or code changes. Please review to the +[Contribution](./CONTRIBUTING.md) guidelines as well as the [Code of Conduct](./CODE_OF_CONDUCT.md) +before submitting contributions. + +### Continuous Integration + +The project uses [Github Actions](https://github.com/parquet-go/parquet-go/actions) for CI. + +### Debugging + +The package has debugging capabilities built in which can be turned on using the +`PARQUETGODEBUG` environment variable. The value follows a model similar to +`GODEBUG`, it must be formatted as a comma-separated list of `key=value` pairs. + +The following debug flag are currently supported: + +- `tracebuf=1` turns on tracing of internal buffers, which validates that + reference counters are set to zero when buffers are reclaimed by the garbage + collector. When the package detects that a buffer was leaked, it logs an error + message along with the stack trace captured when the buffer was last used. diff --git a/vendor/github.com/parquet-go/parquet-go/allocator.go b/vendor/github.com/parquet-go/parquet-go/allocator.go new file mode 100644 index 00000000000..0c1c3ae261b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/allocator.go @@ -0,0 +1,64 @@ +package parquet + +import ( + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" +) + +type allocator struct{ buffer []byte } + +func (a *allocator) makeBytes(n int) []byte { + if free := cap(a.buffer) - len(a.buffer); free < n { + newCap := 2 * cap(a.buffer) + if newCap == 0 { + newCap = 4096 + } + for newCap < n { + newCap *= 2 + } + a.buffer = make([]byte, 0, newCap) + } + + i := len(a.buffer) + j := len(a.buffer) + n + a.buffer = a.buffer[:j] + return a.buffer[i:j:j] +} + +func (a *allocator) copyBytes(v []byte) []byte { + b := a.makeBytes(len(v)) + copy(b, v) + return b +} + +func (a *allocator) copyString(v string) string { + b := a.makeBytes(len(v)) + copy(b, v) + return unsafecast.String(b) +} + +func (a *allocator) reset() { + a.buffer = a.buffer[:0] +} + +// rowAllocator is a memory allocator used to make a copy of rows referencing +// memory buffers that parquet-go does not have ownership of. +// +// This type is used in the implementation of various readers and writers that +// need to capture rows passed to the ReadRows/WriteRows methods. Copies to a +// local buffer is necessary in those cases to repect the reader/writer +// contracts that do not allow the implementations to retain the rows they +// are passed as arguments. +// +// See: RowBuffer, NewRowGroupRowReader, NewColumnChunkRowReader +type rowAllocator struct{ allocator } + +func (a *rowAllocator) capture(row Row) { + for i, v := range row { + switch v.Kind() { + case ByteArray, FixedLenByteArray: + row[i].ptr = unsafe.SliceData(a.copyBytes(v.byteArray())) + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/array.go b/vendor/github.com/parquet-go/parquet-go/array.go new file mode 100644 index 00000000000..4b9dea766b8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/array.go @@ -0,0 +1,27 @@ +package parquet + +import ( + "unsafe" + + "github.com/parquet-go/parquet-go/sparse" +) + +// makeArrayVlaue constructs a sparse.Array from a slice of Value, +// using the offset to locate the field within the Value struct that +// the sparse array is indexing. +func makeArrayValue(values []Value, offset uintptr) sparse.Array { + ptr := unsafe.Pointer(unsafe.SliceData(values)) + return sparse.UnsafeArray(unsafe.Add(ptr, offset), len(values), unsafe.Sizeof(Value{})) +} + +func makeArray(base unsafe.Pointer, length int, offset uintptr) sparse.Array { + return sparse.UnsafeArray(base, length, offset) +} + +func makeArrayFromSlice[T any](s []T) sparse.Array { + return makeArray(unsafe.Pointer(unsafe.SliceData(s)), len(s), unsafe.Sizeof(*new(T))) +} + +func makeArrayFromPointer[T any](v *T) sparse.Array { + return makeArray(unsafe.Pointer(v), 1, unsafe.Sizeof(*v)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bitmap.go b/vendor/github.com/parquet-go/parquet-go/bitmap.go new file mode 100644 index 00000000000..5f2c121b2b3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bitmap.go @@ -0,0 +1,38 @@ +package parquet + +import "github.com/parquet-go/parquet-go/internal/memory" + +type bitmap struct { + bits []uint64 +} + +func (m *bitmap) reset(size int) { + size = (size + 63) / 64 + if cap(m.bits) < size { + m.bits = make([]uint64, size, 2*size) + } else { + m.bits = m.bits[:size] + m.clear() + } +} + +func (m *bitmap) clear() { + for i := range m.bits { + m.bits[i] = 0 + } +} + +var ( + bitmapPool memory.Pool[bitmap] +) + +func acquireBitmap(n int) *bitmap { + return bitmapPool.Get( + func() *bitmap { return &bitmap{bits: make([]uint64, n, 2*n)} }, + func(b *bitmap) { b.reset(n) }, + ) +} + +func releaseBitmap(b *bitmap) { + bitmapPool.Put(b) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom.go b/vendor/github.com/parquet-go/parquet-go/bloom.go new file mode 100644 index 00000000000..550cf21192f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom.go @@ -0,0 +1,280 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/bloom" + "github.com/parquet-go/parquet-go/bloom/xxhash" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// BloomFilter is an interface allowing applications to test whether a key +// exists in a bloom filter. +type BloomFilter interface { + // Implement the io.ReaderAt interface as a mechanism to allow reading the + // raw bits of the filter. + io.ReaderAt + + // Returns the size of the bloom filter (in bytes). + Size() int64 + + // Tests whether the given value is present in the filter. + // + // A non-nil error may be returned if reading the filter failed. This may + // happen if the filter was lazily loaded from a storage medium during the + // call to Check for example. Applications that can guarantee that the + // filter was in memory at the time Check was called can safely ignore the + // error, which would always be nil in this case. + Check(value Value) (bool, error) +} + +type errorBloomFilter struct{ err error } + +func (f *errorBloomFilter) Size() int64 { return 0 } +func (f *errorBloomFilter) ReadAt([]byte, int64) (int, error) { return 0, f.err } +func (f *errorBloomFilter) Check(Value) (bool, error) { return false, f.err } + +type FileBloomFilter struct { + io.SectionReader + hash bloom.Hash + check func(io.ReaderAt, int64, uint64) (bool, error) +} + +func (f *FileBloomFilter) Check(v Value) (bool, error) { + return f.check(&f.SectionReader, f.Size(), v.hash(f.hash)) +} + +func (v Value) hash(h bloom.Hash) uint64 { + switch v.Kind() { + case Boolean: + return h.Sum64Uint8(v.byte()) + case Int32, Float: + return h.Sum64Uint32(v.uint32()) + case Int64, Double: + return h.Sum64Uint64(v.uint64()) + default: // Int96, ByteArray, FixedLenByteArray, or null + return h.Sum64(v.byteArray()) + } +} + +func newBloomFilter(file io.ReaderAt, offset int64, header *format.BloomFilterHeader) *FileBloomFilter { + if header.Algorithm.Block != nil { + if header.Hash.XxHash != nil { + if header.Compression.Uncompressed != nil { + return &FileBloomFilter{ + SectionReader: *io.NewSectionReader(file, offset, int64(header.NumBytes)), + hash: bloom.XXH64{}, + check: bloom.CheckSplitBlock, + } + } + } + } + return nil +} + +// The BloomFilterColumn interface is a declarative representation of bloom filters +// used when configuring filters on a parquet writer. +type BloomFilterColumn interface { + // Returns the path of the column that the filter applies to. + Path() []string + + // Returns the hashing algorithm used when inserting values into a bloom + // filter. + Hash() bloom.Hash + + // Returns an encoding which can be used to write columns of values to the + // filter. + Encoding() encoding.Encoding + + // Returns the size of the filter needed to encode values in the filter, + // assuming each value will be encoded with the given number of bits. + Size(numValues int64) int +} + +// SplitBlockFilter constructs a split block bloom filter object for the column +// at the given path, with the given bitsPerValue. +// +// If you are unsure what number of bitsPerValue to use, 10 is a reasonable +// tradeoff between size and error rate for common datasets. +// +// For more information on the tradeoff between size and error rate, consult +// this website: https://hur.st/bloomfilter/?n=4000&p=0.1&m=&k=1 +func SplitBlockFilter(bitsPerValue uint, path ...string) BloomFilterColumn { + return splitBlockFilter{ + bitsPerValue: bitsPerValue, + path: path, + } +} + +type splitBlockFilter struct { + bitsPerValue uint + path []string +} + +func (f splitBlockFilter) Path() []string { return f.path } +func (f splitBlockFilter) Hash() bloom.Hash { return bloom.XXH64{} } +func (f splitBlockFilter) Encoding() encoding.Encoding { return splitBlockEncoding{} } + +func (f splitBlockFilter) Size(numValues int64) int { + return bloom.BlockSize * bloom.NumSplitBlocksOf(numValues, f.bitsPerValue) +} + +// Creates a header from the given bloom filter. +// +// For now there is only one type of filter supported, but we provide this +// function to suggest a model for extending the implementation if new filters +// are added to the parquet specs. +func bloomFilterHeader(filter BloomFilterColumn) (header format.BloomFilterHeader) { + switch filter.(type) { + case splitBlockFilter: + header.Algorithm.Block = &format.SplitBlockAlgorithm{} + } + switch filter.Hash().(type) { + case bloom.XXH64: + header.Hash.XxHash = &format.XxHash{} + } + header.Compression.Uncompressed = &format.BloomFilterUncompressed{} + return header +} + +func searchBloomFilterColumn(filters []BloomFilterColumn, path columnPath) BloomFilterColumn { + for _, f := range filters { + if path.equal(f.Path()) { + return f + } + } + return nil +} + +const ( + // Size of the stack buffer used to perform bulk operations on bloom filters. + // + // This value was determined as being a good default empirically, + // 128 x uint64 makes a 1KiB buffer which amortizes the cost of calling + // methods of bloom filters while not causing too much stack growth either. + filterEncodeBufferSize = 128 +) + +type splitBlockEncoding struct { + encoding.NotSupported +} + +func (splitBlockEncoding) EncodeBoolean(dst []byte, src []byte) ([]byte, error) { + splitBlockEncodeUint8(bloom.MakeSplitBlockFilter(dst), src) + return dst, nil +} + +func (splitBlockEncoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + splitBlockEncodeUint32(bloom.MakeSplitBlockFilter(dst), unsafecast.Slice[uint32](src)) + return dst, nil +} + +func (splitBlockEncoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + splitBlockEncodeUint64(bloom.MakeSplitBlockFilter(dst), unsafecast.Slice[uint64](src)) + return dst, nil +} + +func (e splitBlockEncoding) EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) { + splitBlockEncodeFixedLenByteArray(bloom.MakeSplitBlockFilter(dst), unsafecastInt96ToBytes(src), 12) + return dst, nil +} + +func (splitBlockEncoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + splitBlockEncodeUint32(bloom.MakeSplitBlockFilter(dst), unsafecast.Slice[uint32](src)) + return dst, nil +} + +func (splitBlockEncoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + splitBlockEncodeUint64(bloom.MakeSplitBlockFilter(dst), unsafecast.Slice[uint64](src)) + return dst, nil +} + +func (splitBlockEncoding) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { + filter := bloom.MakeSplitBlockFilter(dst) + buffer := make([]uint64, 0, filterEncodeBufferSize) + baseOffset := offsets[0] + + for _, endOffset := range offsets[1:] { + value := src[baseOffset:endOffset:endOffset] + baseOffset = endOffset + + if len(buffer) == cap(buffer) { + filter.InsertBulk(buffer) + buffer = buffer[:0] + } + + buffer = append(buffer, xxhash.Sum64(value)) + } + + filter.InsertBulk(buffer) + return dst, nil +} + +func (splitBlockEncoding) EncodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + filter := bloom.MakeSplitBlockFilter(dst) + if size == 16 { + splitBlockEncodeUint128(filter, unsafecast.Slice[[16]byte](src)) + } else { + splitBlockEncodeFixedLenByteArray(filter, src, size) + } + return dst, nil +} + +func splitBlockEncodeFixedLenByteArray(filter bloom.SplitBlockFilter, data []byte, size int) { + buffer := make([]uint64, 0, filterEncodeBufferSize) + + for i, j := 0, size; j <= len(data); { + if len(buffer) == cap(buffer) { + filter.InsertBulk(buffer) + buffer = buffer[:0] + } + buffer = append(buffer, xxhash.Sum64(data[i:j])) + i += size + j += size + } + + filter.InsertBulk(buffer) +} + +func splitBlockEncodeUint8(filter bloom.SplitBlockFilter, values []uint8) { + buffer := make([]uint64, filterEncodeBufferSize) + + for i := 0; i < len(values); { + n := xxhash.MultiSum64Uint8(buffer, values[i:]) + filter.InsertBulk(buffer[:n]) + i += n + } +} + +func splitBlockEncodeUint32(filter bloom.SplitBlockFilter, values []uint32) { + buffer := make([]uint64, filterEncodeBufferSize) + + for i := 0; i < len(values); { + n := xxhash.MultiSum64Uint32(buffer, values[i:]) + filter.InsertBulk(buffer[:n]) + i += n + } +} + +func splitBlockEncodeUint64(filter bloom.SplitBlockFilter, values []uint64) { + buffer := make([]uint64, filterEncodeBufferSize) + + for i := 0; i < len(values); { + n := xxhash.MultiSum64Uint64(buffer, values[i:]) + filter.InsertBulk(buffer[:n]) + i += n + } +} + +func splitBlockEncodeUint128(filter bloom.SplitBlockFilter, values [][16]byte) { + buffer := make([]uint64, filterEncodeBufferSize) + + for i := 0; i < len(values); { + n := xxhash.MultiSum64Uint128(buffer, values[i:]) + filter.InsertBulk(buffer[:n]) + i += n + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/block.go b/vendor/github.com/parquet-go/parquet-go/bloom/block.go new file mode 100644 index 00000000000..193dec721de --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/block.go @@ -0,0 +1,28 @@ +package bloom + +import "unsafe" + +// Word represents 32 bits words of bloom filter blocks. +type Word uint32 + +// Block represents bloom filter blocks which contain eight 32 bits words. +type Block [8]Word + +// Bytes returns b as a byte slice. +func (b *Block) Bytes() []byte { + return unsafe.Slice((*byte)(unsafe.Pointer(b)), BlockSize) +} + +const ( + // BlockSize is the size of bloom filter blocks in bytes. + BlockSize = 32 + + salt0 = 0x47b6137b + salt1 = 0x44974d91 + salt2 = 0x8824ad5b + salt3 = 0xa2b7289d + salt4 = 0x705495c7 + salt5 = 0x2df1424b + salt6 = 0x9efc4947 + salt7 = 0x5c6bfb31 +) diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.go b/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.go new file mode 100644 index 00000000000..5edf9d5d901 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.go @@ -0,0 +1,39 @@ +//go:build !purego + +package bloom + +import "golang.org/x/sys/cpu" + +// The functions in this file are SIMD-optimized versions of the functions +// declared in block_optimized.go for x86 targets. +// +// The optimization yields measurable improvements over the pure Go versions: +// +// goos: darwin +// goarch: amd64 +// pkg: github.com/parquet-go/parquet-go/bloom +// cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz +// +// name old time/op new time/op delta +// BlockInsert 11.6ns ± 4% 2.0ns ± 3% -82.37% (p=0.000 n=8+8) +// BlockCheck 12.6ns ±28% 2.1ns ± 4% -83.12% (p=0.000 n=10+8) +// +// name old speed new speed delta +// BlockInsert 2.73GB/s ±13% 15.70GB/s ± 3% +475.96% (p=0.000 n=9+8) +// BlockCheck 2.59GB/s ±23% 15.06GB/s ± 4% +482.25% (p=0.000 n=10+8) +// +// Note that the numbers above are a comparison to the routines implemented in +// block_optimized.go; the delta comparing to functions in block_default.go is +// significantly larger but not very interesting since those functions have no +// practical use cases. +var hasAVX2 = cpu.X86.HasAVX2 + +//go:noescape +func blockInsert(b *Block, x uint32) + +//go:noescape +func blockCheck(b *Block, x uint32) bool + +func (b *Block) Insert(x uint32) { blockInsert(b, x) } + +func (b *Block) Check(x uint32) bool { return blockCheck(b, x) } diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.s b/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.s new file mode 100644 index 00000000000..713aea2cd7e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/block_amd64.s @@ -0,0 +1,129 @@ +//go:build !purego + +#include "textflag.h" + +#define salt0 0x47b6137b +#define salt1 0x44974d91 +#define salt2 0x8824ad5b +#define salt3 0xa2b7289d +#define salt4 0x705495c7 +#define salt5 0x2df1424b +#define salt6 0x9efc4947 +#define salt7 0x5c6bfb31 + +DATA ones+0(SB)/4, $1 +DATA ones+4(SB)/4, $1 +DATA ones+8(SB)/4, $1 +DATA ones+12(SB)/4, $1 +DATA ones+16(SB)/4, $1 +DATA ones+20(SB)/4, $1 +DATA ones+24(SB)/4, $1 +DATA ones+28(SB)/4, $1 +GLOBL ones(SB), RODATA|NOPTR, $32 + +DATA salt+0(SB)/4, $salt0 +DATA salt+4(SB)/4, $salt1 +DATA salt+8(SB)/4, $salt2 +DATA salt+12(SB)/4, $salt3 +DATA salt+16(SB)/4, $salt4 +DATA salt+20(SB)/4, $salt5 +DATA salt+24(SB)/4, $salt6 +DATA salt+28(SB)/4, $salt7 +GLOBL salt(SB), RODATA|NOPTR, $32 + +// This initial block is a SIMD implementation of the mask function declared in +// block_default.go and block_optimized.go. For each of the 8 x 32 bits words of +// the bloom filter block, the operation performed is: +// +// block[i] = 1 << ((x * salt[i]) >> 27) +// +// Arguments +// --------- +// +// * src is a memory location where the value to use when computing the mask is +// located. The memory location is not modified. +// +// * tmp is a YMM register used as scratch space to hold intermediary results in +// the algorithm. +// +// * dst is a YMM register where the final mask is written. +// +#define generateMask(src, tmp, dst) \ + VMOVDQA ones(SB), dst \ + VPBROADCASTD src, tmp \ + VPMULLD salt(SB), tmp, tmp \ + VPSRLD $27, tmp, tmp \ + VPSLLVD tmp, dst, dst + +#define insert(salt, src, dst) \ + MOVL src, CX \ + IMULL salt, CX \ + SHRL $27, CX \ + MOVL $1, DX \ + SHLL CX, DX \ + ORL DX, dst + +#define check(salt, b, x) \ + MOVL b, CX \ + MOVL x, DX \ + IMULL salt, DX \ + SHRL $27, DX \ + BTL DX, CX \ + JAE notfound + +// func blockInsert(b *Block, x uint32) +TEXT ·blockInsert(SB), NOSPLIT, $0-16 + MOVQ b+0(FP), AX + CMPB ·hasAVX2(SB), $0 + JE fallback +avx2: + generateMask(x+8(FP), Y1, Y0) + // Set all 1 bits of the mask in the bloom filter block. + VPOR (AX), Y0, Y0 + VMOVDQU Y0, (AX) + VZEROUPPER + RET +fallback: + MOVL x+8(FP), BX + insert($salt0, BX, 0(AX)) + insert($salt1, BX, 4(AX)) + insert($salt2, BX, 8(AX)) + insert($salt3, BX, 12(AX)) + insert($salt4, BX, 16(AX)) + insert($salt5, BX, 20(AX)) + insert($salt6, BX, 24(AX)) + insert($salt7, BX, 28(AX)) + RET + +// func blockCheck(b *Block, x uint32) bool +TEXT ·blockCheck(SB), NOSPLIT, $0-17 + MOVQ b+0(FP), AX + CMPB ·hasAVX2(SB), $0 + JE fallback +avx2: + generateMask(x+8(FP), Y1, Y0) + // Compare the 1 bits of the mask with the bloom filter block, then compare + // the result with the mask, expecting equality if the value `x` was present + // in the block. + VPAND (AX), Y0, Y1 // Y0 = block & mask + VPTEST Y0, Y1 // if (Y0 & ^Y1) != 0 { CF = 1 } + SETCS ret+16(FP) // return CF == 1 + VZEROUPPER + RET +fallback: + MOVL x+8(FP), BX + check($salt0, 0(AX), BX) + check($salt1, 4(AX), BX) + check($salt2, 8(AX), BX) + check($salt3, 12(AX), BX) + check($salt4, 16(AX), BX) + check($salt5, 20(AX), BX) + check($salt6, 24(AX), BX) + check($salt7, 28(AX), BX) + MOVB $1, CX + JMP done +notfound: + XORB CX, CX +done: + MOVB CX, ret+16(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/block_default.go b/vendor/github.com/parquet-go/parquet-go/bloom/block_default.go new file mode 100644 index 00000000000..016cb73b442 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/block_default.go @@ -0,0 +1,65 @@ +//go:build purego && parquet.bloom.no_unroll + +package bloom + +// This file contains direct translation of the algorithms described in the +// parquet bloom filter spec: +// https://github.com/apache/parquet-format/blob/master/BloomFilter.md +// +// There are no practical reasons to eable the parquet.bloom.no_unroll build +// tag, the code is left here as a reference to ensure that the optimized +// implementations of block operations behave the same as the functions in this +// file. + +var salt = [8]uint32{ + 0: salt0, + 1: salt1, + 2: salt2, + 3: salt3, + 4: salt4, + 5: salt5, + 6: salt6, + 7: salt7, +} + +func (w *Word) set(i uint) { + *w |= Word(1 << i) +} + +func (w Word) has(i uint) bool { + return ((w >> Word(i)) & 1) != 0 +} + +func mask(x uint32) Block { + var b Block + for i := uint(0); i < 8; i++ { + y := x * salt[i] + b[i].set(uint(y) >> 27) + } + return b +} + +func (b *Block) Insert(x uint32) { + masked := mask(x) + for i := uint(0); i < 8; i++ { + for j := uint(0); j < 32; j++ { + if masked[i].has(j) { + b[i].set(j) + } + } + } +} + +func (b *Block) Check(x uint32) bool { + masked := mask(x) + for i := uint(0); i < 8; i++ { + for j := uint(0); j < 32; j++ { + if masked[i].has(j) { + if !b[i].has(j) { + return false + } + } + } + } + return true +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/block_optimized.go b/vendor/github.com/parquet-go/parquet-go/bloom/block_optimized.go new file mode 100644 index 00000000000..20a9768121c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/block_optimized.go @@ -0,0 +1,53 @@ +//go:build (!amd64 || purego) && !parquet.bloom.no_unroll + +package bloom + +// The functions in this file are optimized versions of the algorithms described +// in https://github.com/apache/parquet-format/blob/master/BloomFilter.md +// +// The functions are manual unrolling of the loops, which yield significant +// performance improvements: +// +// goos: darwin +// goarch: amd64 +// pkg: github.com/parquet-go/parquet-go/bloom +// cpu: Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz +// +// name old time/op new time/op delta +// BlockInsert 327ns ± 1% 12ns ± 4% -96.47% (p=0.000 n=9+8) +// BlockCheck 240ns ± 4% 13ns ±28% -94.75% (p=0.000 n=8+10) +// +// name old speed new speed delta +// BlockInsert 97.8MB/s ± 1% 2725.0MB/s ±13% +2686.59% (p=0.000 n=9+9) +// BlockCheck 133MB/s ± 4% 2587MB/s ±23% +1838.46% (p=0.000 n=8+10) +// +// The benchmarks measure throughput based on the byte size of a bloom filter +// block. + +func (b *Block) Insert(x uint32) { + b[0] |= 1 << ((x * salt0) >> 27) + b[1] |= 1 << ((x * salt1) >> 27) + b[2] |= 1 << ((x * salt2) >> 27) + b[3] |= 1 << ((x * salt3) >> 27) + b[4] |= 1 << ((x * salt4) >> 27) + b[5] |= 1 << ((x * salt5) >> 27) + b[6] |= 1 << ((x * salt6) >> 27) + b[7] |= 1 << ((x * salt7) >> 27) +} + +func (b *Block) Check(x uint32) bool { + return ((b[0] & (1 << ((x * salt0) >> 27))) != 0) && + ((b[1] & (1 << ((x * salt1) >> 27))) != 0) && + ((b[2] & (1 << ((x * salt2) >> 27))) != 0) && + ((b[3] & (1 << ((x * salt3) >> 27))) != 0) && + ((b[4] & (1 << ((x * salt4) >> 27))) != 0) && + ((b[5] & (1 << ((x * salt5) >> 27))) != 0) && + ((b[6] & (1 << ((x * salt6) >> 27))) != 0) && + ((b[7] & (1 << ((x * salt7) >> 27))) != 0) +} + +func (f SplitBlockFilter) insertBulk(x []uint64) { + for i := range x { + f.Insert(x[i]) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/bloom.go b/vendor/github.com/parquet-go/parquet-go/bloom/bloom.go new file mode 100644 index 00000000000..337fe0d7d8f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/bloom.go @@ -0,0 +1,13 @@ +// Package bloom implements parquet bloom filters. +package bloom + +func fasthash1x64(value uint64, scale int32) uint64 { + return ((value >> 32) * uint64(scale)) >> 32 +} + +func fasthash4x64(dst, src *[4]uint64, scale int32) { + dst[0] = ((src[0] >> 32) * uint64(scale)) >> 32 + dst[1] = ((src[1] >> 32) * uint64(scale)) >> 32 + dst[2] = ((src[2] >> 32) * uint64(scale)) >> 32 + dst[3] = ((src[3] >> 32) * uint64(scale)) >> 32 +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/filter.go b/vendor/github.com/parquet-go/parquet-go/bloom/filter.go new file mode 100644 index 00000000000..493428a3d01 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/filter.go @@ -0,0 +1,94 @@ +package bloom + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// Filter is an interface representing read-only bloom filters where programs +// can probe for the possible presence of a hash key. +type Filter interface { + Check(uint64) bool +} + +// SplitBlockFilter is an in-memory implementation of the parquet bloom filters. +// +// This type is useful to construct bloom filters that are later serialized +// to a storage medium. +type SplitBlockFilter []Block + +// MakeSplitBlockFilter constructs a SplitBlockFilter value from the data byte +// slice. +func MakeSplitBlockFilter(data []byte) SplitBlockFilter { + return unsafecast.Slice[Block](data) +} + +// NumSplitBlocksOf returns the number of blocks in a filter intended to hold +// the given number of values and bits of filter per value. +// +// This function is useful to determine the number of blocks when creating bloom +// filters in memory, for example: +// +// f := make(bloom.SplitBlockFilter, bloom.NumSplitBlocksOf(n, 10)) +func NumSplitBlocksOf(numValues int64, bitsPerValue uint) int { + numBytes := ((uint(numValues) * bitsPerValue) + 7) / 8 + numBlocks := (numBytes + (BlockSize - 1)) / BlockSize + return int(numBlocks) +} + +// Reset clears the content of the filter f. +func (f SplitBlockFilter) Reset() { + for i := range f { + f[i] = Block{} + } +} + +// Block returns a pointer to the block that the given value hashes to in the +// bloom filter. +func (f SplitBlockFilter) Block(x uint64) *Block { return &f[fasthash1x64(x, int32(len(f)))] } + +// InsertBulk adds all values from x into f. +func (f SplitBlockFilter) InsertBulk(x []uint64) { filterInsertBulk(f, x) } + +// Insert adds x to f. +func (f SplitBlockFilter) Insert(x uint64) { filterInsert(f, x) } + +// Check tests whether x is in f. +func (f SplitBlockFilter) Check(x uint64) bool { return filterCheck(f, x) } + +// Bytes converts f to a byte slice. +// +// The returned slice shares the memory of f. The method is intended to be used +// to serialize the bloom filter to a storage medium. +func (f SplitBlockFilter) Bytes() []byte { + return unsafecast.Slice[byte](f) +} + +// CheckSplitBlock is similar to bloom.SplitBlockFilter.Check but reads the +// bloom filter of n bytes from r. +// +// The size n of the bloom filter is assumed to be a multiple of the block size. +func CheckSplitBlock(r io.ReaderAt, n int64, x uint64) (bool, error) { + block := acquireBlock() + defer releaseBlock(block) + offset := BlockSize * fasthash1x64(x, int32(n/BlockSize)) + _, err := r.ReadAt(block.Bytes(), int64(offset)) + return block.Check(uint32(x)), err +} + +var ( + blockPool memory.Pool[Block] +) + +func acquireBlock() *Block { + return blockPool.Get( + func() *Block { return new(Block) }, + func(b *Block) {}, + ) +} + +func releaseBlock(b *Block) { + blockPool.Put(b) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.go b/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.go new file mode 100644 index 00000000000..3649da51cae --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.go @@ -0,0 +1,33 @@ +//go:build !purego + +package bloom + +// This file contains the signatures for bloom filter algorithms implemented in +// filter_amd64.s. +// +// The assembly code provides significant speedups on filter inserts and checks, +// with the greatest gains seen on the bulk insert operation where the use of +// vectorized code yields great results. +// +// The following sections record the kind of performance improvements we were +// able to measure, comparing with performing the filter block lookups in Go +// and calling to the block insert and check routines: +// +// name old time/op new time/op delta +// FilterInsertBulk 45.1ns ± 2% 17.8ns ± 3% -60.41% (p=0.000 n=10+10) +// FilterInsert 3.48ns ± 2% 2.55ns ± 1% -26.86% (p=0.000 n=10+8) +// FilterCheck 3.64ns ± 3% 2.66ns ± 2% -26.82% (p=0.000 n=10+9) +// +// name old speed new speed delta +// FilterInsertBulk 11.4GB/s ± 2% 28.7GB/s ± 3% +152.61% (p=0.000 n=10+10) +// FilterInsert 9.19GB/s ± 2% 12.56GB/s ± 1% +36.71% (p=0.000 n=10+8) +// FilterCheck 8.80GB/s ± 3% 12.03GB/s ± 2% +36.61% (p=0.000 n=10+9) + +//go:noescape +func filterInsertBulk(f []Block, x []uint64) + +//go:noescape +func filterInsert(f []Block, x uint64) + +//go:noescape +func filterCheck(f []Block, x uint64) bool diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.s b/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.s new file mode 100644 index 00000000000..cfa75e69fb8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/filter_amd64.s @@ -0,0 +1,214 @@ +//go:build !purego + +#include "textflag.h" + +#define salt0 0x47b6137b +#define salt1 0x44974d91 +#define salt2 0x8824ad5b +#define salt3 0xa2b7289d +#define salt4 0x705495c7 +#define salt5 0x2df1424b +#define salt6 0x9efc4947 +#define salt7 0x5c6bfb31 + +// See block_amd64.s for a description of this algorithm. +#define generateMask(src, dst) \ + VMOVDQA ones(SB), dst \ + VPMULLD salt(SB), src, src \ + VPSRLD $27, src, src \ + VPSLLVD src, dst, dst + +#define applyMask(src, dst) \ + VPOR dst, src, src \ + VMOVDQU src, dst + +#define fasthash1x64(scale, value) \ + SHRQ $32, value \ + IMULQ scale, value \ + SHRQ $32, value \ + SHLQ $5, value + +#define fasthash4x64(scale, value) \ + VPSRLQ $32, value, value \ + VPMULUDQ scale, value, value \ + VPSRLQ $32, value, value \ + VPSLLQ $5, value, value + +#define extract4x64(srcYMM, srcXMM, tmpXMM, r0, r1, r2, r3) \ + VEXTRACTI128 $1, srcYMM, tmpXMM \ + MOVQ srcXMM, r0 \ + VPEXTRQ $1, srcXMM, r1 \ + MOVQ tmpXMM, r2 \ + VPEXTRQ $1, tmpXMM, r3 + +#define insert(salt, src, dst) \ + MOVL src, CX \ + IMULL salt, CX \ + SHRL $27, CX \ + MOVL $1, DX \ + SHLL CX, DX \ + ORL DX, dst + +#define check(salt, b, x) \ + MOVL b, CX \ + MOVL x, DX \ + IMULL salt, DX \ + SHRL $27, DX \ + BTL DX, CX \ + JAE notfound + +// func filterInsertBulk(f []Block, x []uint64) +TEXT ·filterInsertBulk(SB), NOSPLIT, $0-48 + MOVQ f_base+0(FP), AX + MOVQ f_len+8(FP), CX + MOVQ x_base+24(FP), BX + MOVQ x_len+32(FP), DX + CMPB ·hasAVX2(SB), $0 + JE fallback +avx2: + VPBROADCASTQ f_base+8(FP), Y0 + // Loop initialization, SI holds the current index in `x`, DI is the number + // of elements in `x` rounded down to the nearest multiple of 4. + XORQ SI, SI + MOVQ DX, DI + SHRQ $2, DI + SHLQ $2, DI +avx2loop4x64: + CMPQ SI, DI + JAE avx2loop1x64 + + // The masks and indexes for 4 input hashes are computed in each loop + // iteration. The hashes are loaded in Y1 so we can use vector instructions + // to compute all 4 indexes in parallel. The lower 32 bits of the hashes are + // also broadcasted in 4 YMM registers to compute the 4 masks that will then + // be applied to the filter. + VMOVDQU (BX)(SI*8), Y1 + VPBROADCASTD 0(BX)(SI*8), Y2 + VPBROADCASTD 8(BX)(SI*8), Y3 + VPBROADCASTD 16(BX)(SI*8), Y4 + VPBROADCASTD 24(BX)(SI*8), Y5 + + fasthash4x64(Y0, Y1) + generateMask(Y2, Y6) + generateMask(Y3, Y7) + generateMask(Y4, Y8) + generateMask(Y5, Y9) + + // The next block of instructions move indexes from the vector to general + // purpose registers in order to use them as offsets when applying the mask + // to the filter. + extract4x64(Y1, X1, X10, R8, R9, R10, R11) + + // Apply masks to the filter; this operation is sensitive to aliasing, when + // blocks overlap the, CPU has to serialize the reads and writes, which has + // a measurable impact on throughput. This would be frequent for small bloom + // filters which may have only a few blocks, the probability of seeing + // overlapping blocks on large filters should be small enough to make this + // a non-issue though. + applyMask(Y6, (AX)(R8*1)) + applyMask(Y7, (AX)(R9*1)) + applyMask(Y8, (AX)(R10*1)) + applyMask(Y9, (AX)(R11*1)) + + ADDQ $4, SI + JMP avx2loop4x64 +avx2loop1x64: + // Compute trailing elements in `x` if the length was not a multiple of 4. + // This is the same algorithm as the one in the loop4x64 section, working + // on a single mask/block pair at a time. + CMPQ SI, DX + JE avx2done + MOVQ (BX)(SI*8), R8 + VPBROADCASTD (BX)(SI*8), Y0 + fasthash1x64(CX, R8) + generateMask(Y0, Y1) + applyMask(Y1, (AX)(R8*1)) + INCQ SI + JMP avx2loop1x64 +avx2done: + VZEROUPPER + JMP done +fallback: + XORQ SI, SI + MOVQ DX, DI + MOVQ CX, R10 +loop: + CMPQ SI, DI + JE done + MOVLQZX (BX)(SI*8), R8 + MOVQ (BX)(SI*8), R9 + fasthash1x64(R10, R9) + insert($salt0, R8, 0(AX)(R9*1)) + insert($salt1, R8, 4(AX)(R9*1)) + insert($salt2, R8, 8(AX)(R9*1)) + insert($salt3, R8, 12(AX)(R9*1)) + insert($salt4, R8, 16(AX)(R9*1)) + insert($salt5, R8, 20(AX)(R9*1)) + insert($salt6, R8, 24(AX)(R9*1)) + insert($salt7, R8, 28(AX)(R9*1)) + INCQ SI + JMP loop +done: + RET + +// func filterInsert(f []Block, x uint64) +TEXT ·filterInsert(SB), NOSPLIT, $0-32 + MOVQ f_base+0(FP), AX + MOVQ f_len+8(FP), BX + MOVQ x+24(FP), CX + fasthash1x64(BX, CX) + CMPB ·hasAVX2(SB), $0 + JE fallback +avx2: + VPBROADCASTD x+24(FP), Y1 + generateMask(Y1, Y0) + applyMask(Y0, (AX)(CX*1)) + VZEROUPPER + RET +fallback: + ADDQ CX, AX + MOVL x+24(FP), BX + insert($salt0, BX, 0(AX)) + insert($salt1, BX, 4(AX)) + insert($salt2, BX, 8(AX)) + insert($salt3, BX, 12(AX)) + insert($salt4, BX, 16(AX)) + insert($salt5, BX, 20(AX)) + insert($salt6, BX, 24(AX)) + insert($salt7, BX, 28(AX)) + RET + +// func filterCheck(f []Block, x uint64) bool +TEXT ·filterCheck(SB), NOSPLIT, $0-33 + MOVQ f_base+0(FP), AX + MOVQ f_len+8(FP), BX + MOVQ x+24(FP), CX + fasthash1x64(BX, CX) + CMPB ·hasAVX2(SB), $0 + JE fallback +avx2: + VPBROADCASTD x+24(FP), Y1 + generateMask(Y1, Y0) + VPAND (AX)(CX*1), Y0, Y1 + VPTEST Y0, Y1 + SETCS ret+32(FP) + VZEROUPPER + RET +fallback: + ADDQ CX, AX + MOVL x+24(FP), BX + check($salt0, 0(AX), BX) + check($salt1, 4(AX), BX) + check($salt2, 8(AX), BX) + check($salt3, 12(AX), BX) + check($salt4, 16(AX), BX) + check($salt5, 20(AX), BX) + check($salt6, 24(AX), BX) + check($salt7, 28(AX), BX) + MOVB $1, CX + JMP done +notfound: + XORB CX, CX +done: + MOVB CX, ret+32(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/filter_default.go b/vendor/github.com/parquet-go/parquet-go/bloom/filter_default.go new file mode 100644 index 00000000000..38fadd5be83 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/filter_default.go @@ -0,0 +1,17 @@ +//go:build purego || !amd64 + +package bloom + +func filterInsertBulk(f []Block, x []uint64) { + for i := range x { + filterInsert(f, x[i]) + } +} + +func filterInsert(f []Block, x uint64) { + f[fasthash1x64(x, int32(len(f)))].Insert(uint32(x)) +} + +func filterCheck(f []Block, x uint64) bool { + return f[fasthash1x64(x, int32(len(f)))].Check(uint32(x)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/hash.go b/vendor/github.com/parquet-go/parquet-go/bloom/hash.go new file mode 100644 index 00000000000..9a3b61a3000 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/hash.go @@ -0,0 +1,77 @@ +package bloom + +import "github.com/parquet-go/parquet-go/bloom/xxhash" + +// Hash is an interface abstracting the hashing algorithm used in bloom filters. +// +// Hash instances must be safe to use concurrently from multiple goroutines. +type Hash interface { + // Returns the 64 bit hash of the value passed as argument. + Sum64(value []byte) uint64 + + // Compute hashes of individual values of primitive types. + Sum64Uint8(value uint8) uint64 + Sum64Uint16(value uint16) uint64 + Sum64Uint32(value uint32) uint64 + Sum64Uint64(value uint64) uint64 + Sum64Uint128(value [16]byte) uint64 + + // Compute hashes of the array of fixed size values passed as arguments, + // returning the number of hashes written to the destination buffer. + MultiSum64Uint8(dst []uint64, src []uint8) int + MultiSum64Uint16(dst []uint64, src []uint16) int + MultiSum64Uint32(dst []uint64, src []uint32) int + MultiSum64Uint64(dst []uint64, src []uint64) int + MultiSum64Uint128(dst []uint64, src [][16]byte) int +} + +// XXH64 is an implementation of the Hash interface using the XXH64 algorithm. +type XXH64 struct{} + +func (XXH64) Sum64(b []byte) uint64 { + return xxhash.Sum64(b) +} + +func (XXH64) Sum64Uint8(v uint8) uint64 { + return xxhash.Sum64Uint8(v) +} + +func (XXH64) Sum64Uint16(v uint16) uint64 { + return xxhash.Sum64Uint16(v) +} + +func (XXH64) Sum64Uint32(v uint32) uint64 { + return xxhash.Sum64Uint32(v) +} + +func (XXH64) Sum64Uint64(v uint64) uint64 { + return xxhash.Sum64Uint64(v) +} + +func (XXH64) Sum64Uint128(v [16]byte) uint64 { + return xxhash.Sum64Uint128(v) +} + +func (XXH64) MultiSum64Uint8(h []uint64, v []uint8) int { + return xxhash.MultiSum64Uint8(h, v) +} + +func (XXH64) MultiSum64Uint16(h []uint64, v []uint16) int { + return xxhash.MultiSum64Uint16(h, v) +} + +func (XXH64) MultiSum64Uint32(h []uint64, v []uint32) int { + return xxhash.MultiSum64Uint32(h, v) +} + +func (XXH64) MultiSum64Uint64(h []uint64, v []uint64) int { + return xxhash.MultiSum64Uint64(h, v) +} + +func (XXH64) MultiSum64Uint128(h []uint64, v [][16]byte) int { + return xxhash.MultiSum64Uint128(h, v) +} + +var ( + _ Hash = XXH64{} +) diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/LICENSE b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/LICENSE new file mode 100644 index 00000000000..80bef2ebc4e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/LICENSE @@ -0,0 +1,27 @@ +The following files in this directory were derived from the open-source +project at https://github.com/cespare/xxhash. A copy of the original +license is provided below. +------------------------------------------------------------------------ + +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint.go new file mode 100644 index 00000000000..84423d0efba --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint.go @@ -0,0 +1,37 @@ +package xxhash + +func Sum64Uint8(v uint8) uint64 { + h := prime5 + 1 + h ^= uint64(v) * prime5 + return avalanche(rol11(h) * prime1) +} + +func Sum64Uint16(v uint16) uint64 { + h := prime5 + 2 + h ^= uint64(v&0xFF) * prime5 + h = rol11(h) * prime1 + h ^= uint64(v>>8) * prime5 + h = rol11(h) * prime1 + return avalanche(h) +} + +func Sum64Uint32(v uint32) uint64 { + h := prime5 + 4 + h ^= uint64(v) * prime1 + return avalanche(rol23(h)*prime2 + prime3) +} + +func Sum64Uint64(v uint64) uint64 { + h := prime5 + 8 + h ^= round(0, v) + return avalanche(rol27(h)*prime1 + prime4) +} + +func Sum64Uint128(v [16]byte) uint64 { + h := prime5 + 16 + h ^= round(0, u64(v[:8])) + h = rol27(h)*prime1 + prime4 + h ^= round(0, u64(v[8:])) + h = rol27(h)*prime1 + prime4 + return avalanche(h) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.go new file mode 100644 index 00000000000..03a4d31b159 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.go @@ -0,0 +1,49 @@ +//go:build !purego + +package xxhash + +import "golang.org/x/sys/cpu" + +// This file contains the declaration of signatures for the multi hashing +// functions implemented in sum64uint_amd64.s, which provides vectorized +// versions of the algorithms written in sum64uint_purego.go. +// +// The use of SIMD optimization yields measurable throughput increases when +// computing multiple hash values in parallel compared to hashing values +// individually in loops: +// +// name old speed new speed delta +// MultiSum64Uint8/4KB 4.94GB/s ± 2% 6.82GB/s ± 5% +38.00% (p=0.000 n=10+10) +// MultiSum64Uint16/4KB 3.44GB/s ± 2% 4.63GB/s ± 4% +34.56% (p=0.000 n=10+10) +// MultiSum64Uint32/4KB 4.84GB/s ± 2% 6.39GB/s ± 4% +31.94% (p=0.000 n=10+10) +// MultiSum64Uint64/4KB 3.77GB/s ± 2% 4.95GB/s ± 2% +31.14% (p=0.000 n=9+10) +// MultiSum64Uint128/4KB 1.84GB/s ± 2% 3.11GB/s ± 4% +68.70% (p=0.000 n=9+10) +// +// name old hash/s new hash/s delta +// MultiSum64Uint8/4KB 617M ± 2% 852M ± 5% +38.00% (p=0.000 n=10+10) +// MultiSum64Uint16/4KB 431M ± 2% 579M ± 4% +34.56% (p=0.000 n=10+10) +// MultiSum64Uint32/4KB 605M ± 2% 799M ± 4% +31.94% (p=0.000 n=10+10) +// MultiSum64Uint64/4KB 471M ± 2% 618M ± 2% +31.14% (p=0.000 n=9+10) +// MultiSum64Uint128/4KB 231M ± 2% 389M ± 4% +68.70% (p=0.000 n=9+10) +// +// The benchmarks measure the throughput of hashes produced, as a rate of values +// and bytes. + +var hasAVX512 = cpu.X86.HasAVX512 && + cpu.X86.HasAVX512F && + cpu.X86.HasAVX512CD + +//go:noescape +func MultiSum64Uint8(h []uint64, v []uint8) int + +//go:noescape +func MultiSum64Uint16(h []uint64, v []uint16) int + +//go:noescape +func MultiSum64Uint32(h []uint64, v []uint32) int + +//go:noescape +func MultiSum64Uint64(h []uint64, v []uint64) int + +//go:noescape +func MultiSum64Uint128(h []uint64, v [][16]byte) int diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.s b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.s new file mode 100644 index 00000000000..9d420a9d910 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_amd64.s @@ -0,0 +1,755 @@ +//go:build !purego + +#include "textflag.h" + +/* +The algorithms in this file are assembly versions of the Go functions in the +sum64uint_default.go file. + +The implementations are mostly direct translations of the Go code to assembly, +leveraging SIMD instructions to process chunks of the input variables in +parallel at each loop iteration. To maximize utilization of the CPU capacity, +some of the functions unroll two steps of the vectorized loop per iteration, +which yields further throughput because the CPU is able to process some of the +instruction from the two steps in parallel due to having no data dependencies +between the inputs and outputs. + +The use of AVX-512 yields a significant increase in throughput on all the +algorithms, in most part thanks to the VPMULLQ instructions which compute +8 x 64 bits multiplication. There were no equivalent instruction in AVX2, which +required emulating vector multiplication with a combination of 32 bits multiply, +additions, shifts, and masks: the amount of instructions and data dependencies +resulted in the AVX2 code yielding equivalent performance characteristics for a +much higher complexity. + +The benchmark results below showcase the improvements that the AVX-512 code +yields on the XXH64 algorithms: + +name old speed new speed delta +MultiSum64Uint8/4KB 4.97GB/s ± 0% 14.59GB/s ± 1% +193.73% (p=0.000 n=10+10) +MultiSum64Uint16/4KB 3.55GB/s ± 0% 9.46GB/s ± 0% +166.20% (p=0.000 n=10+9) +MultiSum64Uint32/4KB 4.48GB/s ± 0% 13.93GB/s ± 1% +210.93% (p=0.000 n=10+10) +MultiSum64Uint64/4KB 3.57GB/s ± 0% 11.12GB/s ± 1% +211.73% (p=0.000 n=9+10) +MultiSum64Uint128/4KB 2.54GB/s ± 0% 6.49GB/s ± 1% +155.69% (p=0.000 n=10+10) + +name old hash/s new hash/s delta +MultiSum64Uint8/4KB 621M ± 0% 1823M ± 1% +193.73% (p=0.000 n=10+10) +MultiSum64Uint16/4KB 444M ± 0% 1182M ± 0% +166.20% (p=0.000 n=10+9) +MultiSum64Uint32/4KB 560M ± 0% 1742M ± 1% +210.93% (p=0.000 n=10+10) +MultiSum64Uint64/4KB 446M ± 0% 1391M ± 1% +211.73% (p=0.000 n=9+10) +MultiSum64Uint128/4KB 317M ± 0% 811M ± 1% +155.69% (p=0.000 n=10+10) + +The functions perform runtime detection of AVX-512 support by testing the value +of the xxhash.hasAVX512 variable declared and initialized in sum64uint_amd64.go. +Branch mispredictions on those tests are very unlikely since the value is never +modified by the application. The cost of the comparisons are also amortized by +the bulk APIs of the MultiSum64* functions (a single test is required per call). + +If a bug is suspected in the vectorized code, compiling the program or running +the tests with -tags=purego can help verify whether the behavior changes when +the program does not use the assembly versions. + +Maintenance of these functions can be complex; however, the XXH64 algorithm is +unlikely to evolve, and the implementations unlikely to change. The tests in +sum64uint_test.go compare the outputs of MultiSum64* functions with the +reference xxhash.Sum64 function, future maintainers can rely on those tests +passing as a guarantee that they have not introduced regressions. +*/ + +#define PRIME1 0x9E3779B185EBCA87 +#define PRIME2 0xC2B2AE3D27D4EB4F +#define PRIME3 0x165667B19E3779F9 +#define PRIME4 0x85EBCA77C2B2AE63 +#define PRIME5 0x27D4EB2F165667C5 + +#define prime1 R12 +#define prime2 R13 +#define prime3 R14 +#define prime4 R11 +#define prime5 R11 // same as prime4 because they are not used together + +#define prime1ZMM Z12 +#define prime2ZMM Z13 +#define prime3ZMM Z14 +#define prime4ZMM Z15 +#define prime5ZMM Z15 + +DATA prime1vec<>+0(SB)/8, $PRIME1 +DATA prime1vec<>+8(SB)/8, $PRIME1 +DATA prime1vec<>+16(SB)/8, $PRIME1 +DATA prime1vec<>+24(SB)/8, $PRIME1 +DATA prime1vec<>+32(SB)/8, $PRIME1 +DATA prime1vec<>+40(SB)/8, $PRIME1 +DATA prime1vec<>+48(SB)/8, $PRIME1 +DATA prime1vec<>+56(SB)/8, $PRIME1 +GLOBL prime1vec<>(SB), RODATA|NOPTR, $64 + +DATA prime2vec<>+0(SB)/8, $PRIME2 +DATA prime2vec<>+8(SB)/8, $PRIME2 +DATA prime2vec<>+16(SB)/8, $PRIME2 +DATA prime2vec<>+24(SB)/8, $PRIME2 +DATA prime2vec<>+32(SB)/8, $PRIME2 +DATA prime2vec<>+40(SB)/8, $PRIME2 +DATA prime2vec<>+48(SB)/8, $PRIME2 +DATA prime2vec<>+56(SB)/8, $PRIME2 +GLOBL prime2vec<>(SB), RODATA|NOPTR, $64 + +DATA prime3vec<>+0(SB)/8, $PRIME3 +DATA prime3vec<>+8(SB)/8, $PRIME3 +DATA prime3vec<>+16(SB)/8, $PRIME3 +DATA prime3vec<>+24(SB)/8, $PRIME3 +DATA prime3vec<>+32(SB)/8, $PRIME3 +DATA prime3vec<>+40(SB)/8, $PRIME3 +DATA prime3vec<>+48(SB)/8, $PRIME3 +DATA prime3vec<>+56(SB)/8, $PRIME3 +GLOBL prime3vec<>(SB), RODATA|NOPTR, $64 + +DATA prime4vec<>+0(SB)/8, $PRIME4 +DATA prime4vec<>+8(SB)/8, $PRIME4 +DATA prime4vec<>+16(SB)/8, $PRIME4 +DATA prime4vec<>+24(SB)/8, $PRIME4 +DATA prime4vec<>+32(SB)/8, $PRIME4 +DATA prime4vec<>+40(SB)/8, $PRIME4 +DATA prime4vec<>+48(SB)/8, $PRIME4 +DATA prime4vec<>+56(SB)/8, $PRIME4 +GLOBL prime4vec<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec<>+0(SB)/8, $PRIME5 +DATA prime5vec<>+8(SB)/8, $PRIME5 +DATA prime5vec<>+16(SB)/8, $PRIME5 +DATA prime5vec<>+24(SB)/8, $PRIME5 +DATA prime5vec<>+32(SB)/8, $PRIME5 +DATA prime5vec<>+40(SB)/8, $PRIME5 +DATA prime5vec<>+48(SB)/8, $PRIME5 +DATA prime5vec<>+56(SB)/8, $PRIME5 +GLOBL prime5vec<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec1<>+0(SB)/8, $PRIME5+1 +DATA prime5vec1<>+8(SB)/8, $PRIME5+1 +DATA prime5vec1<>+16(SB)/8, $PRIME5+1 +DATA prime5vec1<>+24(SB)/8, $PRIME5+1 +DATA prime5vec1<>+32(SB)/8, $PRIME5+1 +DATA prime5vec1<>+40(SB)/8, $PRIME5+1 +DATA prime5vec1<>+48(SB)/8, $PRIME5+1 +DATA prime5vec1<>+56(SB)/8, $PRIME5+1 +GLOBL prime5vec1<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec2<>+0(SB)/8, $PRIME5+2 +DATA prime5vec2<>+8(SB)/8, $PRIME5+2 +DATA prime5vec2<>+16(SB)/8, $PRIME5+2 +DATA prime5vec2<>+24(SB)/8, $PRIME5+2 +DATA prime5vec2<>+32(SB)/8, $PRIME5+2 +DATA prime5vec2<>+40(SB)/8, $PRIME5+2 +DATA prime5vec2<>+48(SB)/8, $PRIME5+2 +DATA prime5vec2<>+56(SB)/8, $PRIME5+2 +GLOBL prime5vec2<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec4<>+0(SB)/8, $PRIME5+4 +DATA prime5vec4<>+8(SB)/8, $PRIME5+4 +DATA prime5vec4<>+16(SB)/8, $PRIME5+4 +DATA prime5vec4<>+24(SB)/8, $PRIME5+4 +DATA prime5vec4<>+32(SB)/8, $PRIME5+4 +DATA prime5vec4<>+40(SB)/8, $PRIME5+4 +DATA prime5vec4<>+48(SB)/8, $PRIME5+4 +DATA prime5vec4<>+56(SB)/8, $PRIME5+4 +GLOBL prime5vec4<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec8<>+0(SB)/8, $PRIME5+8 +DATA prime5vec8<>+8(SB)/8, $PRIME5+8 +DATA prime5vec8<>+16(SB)/8, $PRIME5+8 +DATA prime5vec8<>+24(SB)/8, $PRIME5+8 +DATA prime5vec8<>+32(SB)/8, $PRIME5+8 +DATA prime5vec8<>+40(SB)/8, $PRIME5+8 +DATA prime5vec8<>+48(SB)/8, $PRIME5+8 +DATA prime5vec8<>+56(SB)/8, $PRIME5+8 +GLOBL prime5vec8<>(SB), RODATA|NOPTR, $64 + +DATA prime5vec16<>+0(SB)/8, $PRIME5+16 +DATA prime5vec16<>+8(SB)/8, $PRIME5+16 +DATA prime5vec16<>+16(SB)/8, $PRIME5+16 +DATA prime5vec16<>+24(SB)/8, $PRIME5+16 +DATA prime5vec16<>+32(SB)/8, $PRIME5+16 +DATA prime5vec16<>+40(SB)/8, $PRIME5+16 +DATA prime5vec16<>+48(SB)/8, $PRIME5+16 +DATA prime5vec16<>+56(SB)/8, $PRIME5+16 +GLOBL prime5vec16<>(SB), RODATA|NOPTR, $64 + +DATA lowbytemask<>+0(SB)/8, $0xFF +DATA lowbytemask<>+8(SB)/8, $0xFF +DATA lowbytemask<>+16(SB)/8, $0xFF +DATA lowbytemask<>+24(SB)/8, $0xFF +DATA lowbytemask<>+32(SB)/8, $0xFF +DATA lowbytemask<>+40(SB)/8, $0xFF +DATA lowbytemask<>+48(SB)/8, $0xFF +DATA lowbytemask<>+56(SB)/8, $0xFF +GLOBL lowbytemask<>(SB), RODATA|NOPTR, $64 + +DATA vpermi2qeven<>+0(SB)/8, $0 +DATA vpermi2qeven<>+8(SB)/8, $2 +DATA vpermi2qeven<>+16(SB)/8, $4 +DATA vpermi2qeven<>+24(SB)/8, $6 +DATA vpermi2qeven<>+32(SB)/8, $(1<<3)|0 +DATA vpermi2qeven<>+40(SB)/8, $(1<<3)|2 +DATA vpermi2qeven<>+48(SB)/8, $(1<<3)|4 +DATA vpermi2qeven<>+56(SB)/8, $(1<<3)|6 +GLOBL vpermi2qeven<>(SB), RODATA|NOPTR, $64 + +DATA vpermi2qodd<>+0(SB)/8, $1 +DATA vpermi2qodd<>+8(SB)/8, $3 +DATA vpermi2qodd<>+16(SB)/8, $5 +DATA vpermi2qodd<>+24(SB)/8, $7 +DATA vpermi2qodd<>+32(SB)/8, $(1<<3)|1 +DATA vpermi2qodd<>+40(SB)/8, $(1<<3)|3 +DATA vpermi2qodd<>+48(SB)/8, $(1<<3)|5 +DATA vpermi2qodd<>+56(SB)/8, $(1<<3)|7 +GLOBL vpermi2qodd<>(SB), RODATA|NOPTR, $64 + +#define round(input, acc) \ + IMULQ prime2, input \ + ADDQ input, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +#define avalanche(tmp, acc) \ + MOVQ acc, tmp \ + SHRQ $33, tmp \ + XORQ tmp, acc \ + IMULQ prime2, acc \ + MOVQ acc, tmp \ + SHRQ $29, tmp \ + XORQ tmp, acc \ + IMULQ prime3, acc \ + MOVQ acc, tmp \ + SHRQ $32, tmp \ + XORQ tmp, acc + +#define round8x64(input, acc) \ + VPMULLQ prime2ZMM, input, input \ + VPADDQ input, acc, acc \ + VPROLQ $31, acc, acc \ + VPMULLQ prime1ZMM, acc, acc + +#define avalanche8x64(tmp, acc) \ + VPSRLQ $33, acc, tmp \ + VPXORQ tmp, acc, acc \ + VPMULLQ prime2ZMM, acc, acc \ + VPSRLQ $29, acc, tmp \ + VPXORQ tmp, acc, acc \ + VPMULLQ prime3ZMM, acc, acc \ + VPSRLQ $32, acc, tmp \ + VPXORQ tmp, acc, acc + +// func MultiSum64Uint8(h []uint64, v []uint8) int +TEXT ·MultiSum64Uint8(SB), NOSPLIT, $0-54 + MOVQ $PRIME1, prime1 + MOVQ $PRIME2, prime2 + MOVQ $PRIME3, prime3 + MOVQ $PRIME5, prime5 + + MOVQ h_base+0(FP), AX + MOVQ h_len+8(FP), CX + MOVQ v_base+24(FP), BX + MOVQ v_len+32(FP), DX + + CMPQ CX, DX + CMOVQGT DX, CX + MOVQ CX, ret+48(FP) + + XORQ SI, SI + CMPQ CX, $32 + JB loop + CMPB ·hasAVX512(SB), $0 + JE loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + + VMOVDQU64 prime1vec<>(SB), prime1ZMM + VMOVDQU64 prime2vec<>(SB), prime2ZMM + VMOVDQU64 prime3vec<>(SB), prime3ZMM + VMOVDQU64 prime5vec<>(SB), prime5ZMM + VMOVDQU64 prime5vec1<>(SB), Z6 +loop32x64: + VMOVDQA64 Z6, Z0 + VMOVDQA64 Z6, Z3 + VMOVDQA64 Z6, Z20 + VMOVDQA64 Z6, Z23 + VPMOVZXBQ (BX)(SI*1), Z1 + VPMOVZXBQ 8(BX)(SI*1), Z4 + VPMOVZXBQ 16(BX)(SI*1), Z21 + VPMOVZXBQ 24(BX)(SI*1), Z24 + + VPMULLQ prime5ZMM, Z1, Z1 + VPMULLQ prime5ZMM, Z4, Z4 + VPMULLQ prime5ZMM, Z21, Z21 + VPMULLQ prime5ZMM, Z24, Z24 + VPXORQ Z1, Z0, Z0 + VPXORQ Z4, Z3, Z3 + VPXORQ Z21, Z20, Z20 + VPXORQ Z24, Z23, Z23 + VPROLQ $11, Z0, Z0 + VPROLQ $11, Z3, Z3 + VPROLQ $11, Z20, Z20 + VPROLQ $11, Z23, Z23 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z3, Z3 + VPMULLQ prime1ZMM, Z20, Z20 + VPMULLQ prime1ZMM, Z23, Z23 + + avalanche8x64(Z1, Z0) + avalanche8x64(Z4, Z3) + avalanche8x64(Z21, Z20) + avalanche8x64(Z24, Z23) + + VMOVDQU64 Z0, (AX)(SI*8) + VMOVDQU64 Z3, 64(AX)(SI*8) + VMOVDQU64 Z20, 128(AX)(SI*8) + VMOVDQU64 Z23, 192(AX)(SI*8) + ADDQ $32, SI + CMPQ SI, DI + JB loop32x64 + VZEROUPPER +loop: + CMPQ SI, CX + JE done + MOVQ $PRIME5+1, R8 + MOVBQZX (BX)(SI*1), R9 + + IMULQ prime5, R9 + XORQ R9, R8 + ROLQ $11, R8 + IMULQ prime1, R8 + avalanche(R9, R8) + + MOVQ R8, (AX)(SI*8) + INCQ SI + JMP loop +done: + RET + +// func MultiSum64Uint16(h []uint64, v []uint16) int +TEXT ·MultiSum64Uint16(SB), NOSPLIT, $0-54 + MOVQ $PRIME1, prime1 + MOVQ $PRIME2, prime2 + MOVQ $PRIME3, prime3 + MOVQ $PRIME5, prime5 + + MOVQ h_base+0(FP), AX + MOVQ h_len+8(FP), CX + MOVQ v_base+24(FP), BX + MOVQ v_len+32(FP), DX + + CMPQ CX, DX + CMOVQGT DX, CX + MOVQ CX, ret+48(FP) + + XORQ SI, SI + CMPQ CX, $16 + JB loop + CMPB ·hasAVX512(SB), $0 + JE loop + + MOVQ CX, DI + SHRQ $4, DI + SHLQ $4, DI + + VMOVDQU64 prime1vec<>(SB), prime1ZMM + VMOVDQU64 prime2vec<>(SB), prime2ZMM + VMOVDQU64 prime3vec<>(SB), prime3ZMM + VMOVDQU64 prime5vec<>(SB), prime5ZMM + VMOVDQU64 prime5vec2<>(SB), Z6 + VMOVDQU64 lowbytemask<>(SB), Z7 +loop16x64: + VMOVDQA64 Z6, Z0 + VMOVDQA64 Z6, Z3 + VPMOVZXWQ (BX)(SI*2), Z1 + VPMOVZXWQ 16(BX)(SI*2), Z4 + + VMOVDQA64 Z1, Z8 + VMOVDQA64 Z4, Z9 + VPSRLQ $8, Z8, Z8 + VPSRLQ $8, Z9, Z9 + VPANDQ Z7, Z1, Z1 + VPANDQ Z7, Z4, Z4 + + VPMULLQ prime5ZMM, Z1, Z1 + VPMULLQ prime5ZMM, Z4, Z4 + VPXORQ Z1, Z0, Z0 + VPXORQ Z4, Z3, Z3 + VPROLQ $11, Z0, Z0 + VPROLQ $11, Z3, Z3 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z3, Z3 + + VPMULLQ prime5ZMM, Z8, Z8 + VPMULLQ prime5ZMM, Z9, Z9 + VPXORQ Z8, Z0, Z0 + VPXORQ Z9, Z3, Z3 + VPROLQ $11, Z0, Z0 + VPROLQ $11, Z3, Z3 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z3, Z3 + + avalanche8x64(Z1, Z0) + avalanche8x64(Z4, Z3) + + VMOVDQU64 Z0, (AX)(SI*8) + VMOVDQU64 Z3, 64(AX)(SI*8) + ADDQ $16, SI + CMPQ SI, DI + JB loop16x64 + VZEROUPPER +loop: + CMPQ SI, CX + JE done + MOVQ $PRIME5+2, R8 + MOVWQZX (BX)(SI*2), R9 + + MOVQ R9, R10 + SHRQ $8, R10 + ANDQ $0xFF, R9 + + IMULQ prime5, R9 + XORQ R9, R8 + ROLQ $11, R8 + IMULQ prime1, R8 + + IMULQ prime5, R10 + XORQ R10, R8 + ROLQ $11, R8 + IMULQ prime1, R8 + + avalanche(R9, R8) + + MOVQ R8, (AX)(SI*8) + INCQ SI + JMP loop +done: + RET + +// func MultiSum64Uint32(h []uint64, v []uint32) int +TEXT ·MultiSum64Uint32(SB), NOSPLIT, $0-54 + MOVQ $PRIME1, prime1 + MOVQ $PRIME2, prime2 + MOVQ $PRIME3, prime3 + + MOVQ h_base+0(FP), AX + MOVQ h_len+8(FP), CX + MOVQ v_base+24(FP), BX + MOVQ v_len+32(FP), DX + + CMPQ CX, DX + CMOVQGT DX, CX + MOVQ CX, ret+48(FP) + + XORQ SI, SI + CMPQ CX, $32 + JB loop + CMPB ·hasAVX512(SB), $0 + JE loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + + VMOVDQU64 prime1vec<>(SB), prime1ZMM + VMOVDQU64 prime2vec<>(SB), prime2ZMM + VMOVDQU64 prime3vec<>(SB), prime3ZMM + VMOVDQU64 prime5vec4<>(SB), Z6 +loop32x64: + VMOVDQA64 Z6, Z0 + VMOVDQA64 Z6, Z3 + VMOVDQA64 Z6, Z20 + VMOVDQA64 Z6, Z23 + VPMOVZXDQ (BX)(SI*4), Z1 + VPMOVZXDQ 32(BX)(SI*4), Z4 + VPMOVZXDQ 64(BX)(SI*4), Z21 + VPMOVZXDQ 96(BX)(SI*4), Z24 + + VPMULLQ prime1ZMM, Z1, Z1 + VPMULLQ prime1ZMM, Z4, Z4 + VPMULLQ prime1ZMM, Z21, Z21 + VPMULLQ prime1ZMM, Z24, Z24 + VPXORQ Z1, Z0, Z0 + VPXORQ Z4, Z3, Z3 + VPXORQ Z21, Z20, Z20 + VPXORQ Z24, Z23, Z23 + VPROLQ $23, Z0, Z0 + VPROLQ $23, Z3, Z3 + VPROLQ $23, Z20, Z20 + VPROLQ $23, Z23, Z23 + VPMULLQ prime2ZMM, Z0, Z0 + VPMULLQ prime2ZMM, Z3, Z3 + VPMULLQ prime2ZMM, Z20, Z20 + VPMULLQ prime2ZMM, Z23, Z23 + VPADDQ prime3ZMM, Z0, Z0 + VPADDQ prime3ZMM, Z3, Z3 + VPADDQ prime3ZMM, Z20, Z20 + VPADDQ prime3ZMM, Z23, Z23 + + avalanche8x64(Z1, Z0) + avalanche8x64(Z4, Z3) + avalanche8x64(Z21, Z20) + avalanche8x64(Z24, Z23) + + VMOVDQU64 Z0, (AX)(SI*8) + VMOVDQU64 Z3, 64(AX)(SI*8) + VMOVDQU64 Z20, 128(AX)(SI*8) + VMOVDQU64 Z23, 192(AX)(SI*8) + ADDQ $32, SI + CMPQ SI, DI + JB loop32x64 + VZEROUPPER +loop: + CMPQ SI, CX + JE done + MOVQ $PRIME5+4, R8 + MOVLQZX (BX)(SI*4), R9 + + IMULQ prime1, R9 + XORQ R9, R8 + ROLQ $23, R8 + IMULQ prime2, R8 + ADDQ prime3, R8 + avalanche(R9, R8) + + MOVQ R8, (AX)(SI*8) + INCQ SI + JMP loop +done: + RET + +// func MultiSum64Uint64(h []uint64, v []uint64) int +TEXT ·MultiSum64Uint64(SB), NOSPLIT, $0-54 + MOVQ $PRIME1, prime1 + MOVQ $PRIME2, prime2 + MOVQ $PRIME3, prime3 + MOVQ $PRIME4, prime4 + + MOVQ h_base+0(FP), AX + MOVQ h_len+8(FP), CX + MOVQ v_base+24(FP), BX + MOVQ v_len+32(FP), DX + + CMPQ CX, DX + CMOVQGT DX, CX + MOVQ CX, ret+48(FP) + + XORQ SI, SI + CMPQ CX, $32 + JB loop + CMPB ·hasAVX512(SB), $0 + JE loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + + VMOVDQU64 prime1vec<>(SB), prime1ZMM + VMOVDQU64 prime2vec<>(SB), prime2ZMM + VMOVDQU64 prime3vec<>(SB), prime3ZMM + VMOVDQU64 prime4vec<>(SB), prime4ZMM + VMOVDQU64 prime5vec8<>(SB), Z6 +loop32x64: + VMOVDQA64 Z6, Z0 + VMOVDQA64 Z6, Z3 + VMOVDQA64 Z6, Z20 + VMOVDQA64 Z6, Z23 + VMOVDQU64 (BX)(SI*8), Z1 + VMOVDQU64 64(BX)(SI*8), Z4 + VMOVDQU64 128(BX)(SI*8), Z21 + VMOVDQU64 192(BX)(SI*8), Z24 + + VPXORQ Z2, Z2, Z2 + VPXORQ Z5, Z5, Z5 + VPXORQ Z22, Z22, Z22 + VPXORQ Z25, Z25, Z25 + round8x64(Z1, Z2) + round8x64(Z4, Z5) + round8x64(Z21, Z22) + round8x64(Z24, Z25) + + VPXORQ Z2, Z0, Z0 + VPXORQ Z5, Z3, Z3 + VPXORQ Z22, Z20, Z20 + VPXORQ Z25, Z23, Z23 + VPROLQ $27, Z0, Z0 + VPROLQ $27, Z3, Z3 + VPROLQ $27, Z20, Z20 + VPROLQ $27, Z23, Z23 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z3, Z3 + VPMULLQ prime1ZMM, Z20, Z20 + VPMULLQ prime1ZMM, Z23, Z23 + VPADDQ prime4ZMM, Z0, Z0 + VPADDQ prime4ZMM, Z3, Z3 + VPADDQ prime4ZMM, Z20, Z20 + VPADDQ prime4ZMM, Z23, Z23 + + avalanche8x64(Z1, Z0) + avalanche8x64(Z4, Z3) + avalanche8x64(Z21, Z20) + avalanche8x64(Z24, Z23) + + VMOVDQU64 Z0, (AX)(SI*8) + VMOVDQU64 Z3, 64(AX)(SI*8) + VMOVDQU64 Z20, 128(AX)(SI*8) + VMOVDQU64 Z23, 192(AX)(SI*8) + ADDQ $32, SI + CMPQ SI, DI + JB loop32x64 + VZEROUPPER +loop: + CMPQ SI, CX + JE done + MOVQ $PRIME5+8, R8 + MOVQ (BX)(SI*8), R9 + + XORQ R10, R10 + round(R9, R10) + XORQ R10, R8 + ROLQ $27, R8 + IMULQ prime1, R8 + ADDQ prime4, R8 + avalanche(R9, R8) + + MOVQ R8, (AX)(SI*8) + INCQ SI + JMP loop +done: + RET + +// func MultiSum64Uint128(h []uint64, v [][16]byte) int +TEXT ·MultiSum64Uint128(SB), NOSPLIT, $0-54 + MOVQ $PRIME1, prime1 + MOVQ $PRIME2, prime2 + MOVQ $PRIME3, prime3 + MOVQ $PRIME4, prime4 + + MOVQ h_base+0(FP), AX + MOVQ h_len+8(FP), CX + MOVQ v_base+24(FP), BX + MOVQ v_len+32(FP), DX + + CMPQ CX, DX + CMOVQGT DX, CX + MOVQ CX, ret+48(FP) + + XORQ SI, SI + CMPQ CX, $16 + JB loop + CMPB ·hasAVX512(SB), $0 + JE loop + + MOVQ CX, DI + SHRQ $4, DI + SHLQ $4, DI + + VMOVDQU64 prime1vec<>(SB), prime1ZMM + VMOVDQU64 prime2vec<>(SB), prime2ZMM + VMOVDQU64 prime3vec<>(SB), prime3ZMM + VMOVDQU64 prime4vec<>(SB), prime4ZMM + VMOVDQU64 prime5vec16<>(SB), Z6 + VMOVDQU64 vpermi2qeven<>(SB), Z7 + VMOVDQU64 vpermi2qodd<>(SB), Z8 +loop16x64: + // This algorithm is slightly different from the other ones, because it is + // the only case where the input values are larger than the output (128 bits + // vs 64 bits). + // + // Computing the XXH64 of 128 bits values requires doing two passes over the + // lower and upper 64 bits. The lower and upper quad/ words are split in + // separate vectors, the first pass is applied on the vector holding the + // lower bits of 8 input values, then the second pass is applied with the + // vector holding the upper bits. + // + // Following the model used in the other functions, we unroll the work of + // two consecutive groups of 8 values per loop iteration in order to + // maximize utilization of CPU resources. + CMPQ SI, DI + JE loop + VMOVDQA64 Z6, Z0 + VMOVDQA64 Z6, Z20 + VMOVDQU64 (BX), Z1 + VMOVDQU64 64(BX), Z9 + VMOVDQU64 128(BX), Z21 + VMOVDQU64 192(BX), Z29 + + VMOVDQA64 Z7, Z2 + VMOVDQA64 Z8, Z3 + VMOVDQA64 Z7, Z22 + VMOVDQA64 Z8, Z23 + + VPERMI2Q Z9, Z1, Z2 + VPERMI2Q Z9, Z1, Z3 + VPERMI2Q Z29, Z21, Z22 + VPERMI2Q Z29, Z21, Z23 + + // Compute the rounds on inputs. + VPXORQ Z4, Z4, Z4 + VPXORQ Z5, Z5, Z5 + VPXORQ Z24, Z24, Z24 + VPXORQ Z25, Z25, Z25 + round8x64(Z2, Z4) + round8x64(Z3, Z5) + round8x64(Z22, Z24) + round8x64(Z23, Z25) + + // Lower 64 bits. + VPXORQ Z4, Z0, Z0 + VPXORQ Z24, Z20, Z20 + VPROLQ $27, Z0, Z0 + VPROLQ $27, Z20, Z20 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z20, Z20 + VPADDQ prime4ZMM, Z0, Z0 + VPADDQ prime4ZMM, Z20, Z20 + + // Upper 64 bits. + VPXORQ Z5, Z0, Z0 + VPXORQ Z25, Z20, Z20 + VPROLQ $27, Z0, Z0 + VPROLQ $27, Z20, Z20 + VPMULLQ prime1ZMM, Z0, Z0 + VPMULLQ prime1ZMM, Z20, Z20 + VPADDQ prime4ZMM, Z0, Z0 + VPADDQ prime4ZMM, Z20, Z20 + + avalanche8x64(Z1, Z0) + avalanche8x64(Z21, Z20) + VMOVDQU64 Z0, (AX)(SI*8) + VMOVDQU64 Z20, 64(AX)(SI*8) + ADDQ $256, BX + ADDQ $16, SI + JMP loop16x64 + VZEROUPPER +loop: + CMPQ SI, CX + JE done + MOVQ $PRIME5+16, R8 + MOVQ (BX), DX + MOVQ 8(BX), DI + + XORQ R9, R9 + XORQ R10, R10 + round(DX, R9) + round(DI, R10) + + XORQ R9, R8 + ROLQ $27, R8 + IMULQ prime1, R8 + ADDQ prime4, R8 + + XORQ R10, R8 + ROLQ $27, R8 + IMULQ prime1, R8 + ADDQ prime4, R8 + + avalanche(R9, R8) + MOVQ R8, (AX)(SI*8) + ADDQ $16, BX + INCQ SI + JMP loop +done: + RET diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_purego.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_purego.go new file mode 100644 index 00000000000..4b420dc4f72 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/sum64uint_purego.go @@ -0,0 +1,53 @@ +//go:build purego || !amd64 + +package xxhash + +func MultiSum64Uint8(h []uint64, v []uint8) int { + n := min(len(h), len(v)) + h = h[:n] + v = v[:n] + for i := range v { + h[i] = Sum64Uint8(v[i]) + } + return n +} + +func MultiSum64Uint16(h []uint64, v []uint16) int { + n := min(len(h), len(v)) + h = h[:n] + v = v[:n] + for i := range v { + h[i] = Sum64Uint16(v[i]) + } + return n +} + +func MultiSum64Uint32(h []uint64, v []uint32) int { + n := min(len(h), len(v)) + h = h[:n] + v = v[:n] + for i := range v { + h[i] = Sum64Uint32(v[i]) + } + return n +} + +func MultiSum64Uint64(h []uint64, v []uint64) int { + n := min(len(h), len(v)) + h = h[:n] + v = v[:n] + for i := range v { + h[i] = Sum64Uint64(v[i]) + } + return n +} + +func MultiSum64Uint128(h []uint64, v [][16]byte) int { + n := min(len(h), len(v)) + h = h[:n] + v = v[:n] + for i := range v { + h[i] = Sum64Uint128(v[i]) + } + return n +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash.go new file mode 100644 index 00000000000..0d4b44b2dc8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash.go @@ -0,0 +1,55 @@ +// Package xxhash is an extension of github.com/cespare/xxhash which adds +// routines optimized to hash arrays of fixed size elements. +package xxhash + +import ( + "encoding/binary" + "math/bits" +) + +const ( + prime1 uint64 = 0x9E3779B185EBCA87 + prime2 uint64 = 0xC2B2AE3D27D4EB4F + prime3 uint64 = 0x165667B19E3779F9 + prime4 uint64 = 0x85EBCA77C2B2AE63 + prime5 uint64 = 0x27D4EB2F165667C5 + // Pre-computed operations because the compiler otherwise complains that the + // results overflow 64 bit integers. + prime1plus2 uint64 = 0x60EA27EEADC0B5D6 // prime1 + prime2 + negprime1 uint64 = 0x61C8864E7A143579 // -prime1 +) + +func avalanche(h uint64) uint64 { + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + return h +} + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.go new file mode 100644 index 00000000000..4b32923def5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.go @@ -0,0 +1,6 @@ +//go:build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.s b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.s new file mode 100644 index 00000000000..8f88dc1d6ab --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_amd64.s @@ -0,0 +1,180 @@ +//go:build !purego + +#include "textflag.h" + +#define PRIME1 0x9E3779B185EBCA87 +#define PRIME2 0xC2B2AE3D27D4EB4F +#define PRIME3 0x165667B19E3779F9 +#define PRIME4 0x85EBCA77C2B2AE63 +#define PRIME5 0x27D4EB2F165667C5 + +DATA prime3<>+0(SB)/8, $PRIME3 +GLOBL prime3<>(SB), RODATA|NOPTR, $8 + +DATA prime5<>+0(SB)/8, $PRIME5 +GLOBL prime5<>(SB), RODATA|NOPTR, $8 + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 PRIME1 +// R14 PRIME2 +// DI PRIME4 + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has PRIME1 and R14 has PRIME2. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has PRIME1, R14 has PRIME2, and DI has PRIME4. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ $PRIME1, R13 + MOVQ $PRIME2, R14 + MOVQ $PRIME4, DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ $PRIME5, AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ prime3<>(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ prime5<>(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ prime3<>(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_purego.go b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_purego.go new file mode 100644 index 00000000000..896f817a5f1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom/xxhash/xxhash_purego.go @@ -0,0 +1,50 @@ +//go:build purego || !amd64 + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + var n = len(b) + var h uint64 + + if n >= 32 { + v1 := prime1plus2 + v2 := prime2 + v3 := uint64(0) + v4 := negprime1 + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + return avalanche(h) +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom_be.go b/vendor/github.com/parquet-go/parquet-go/bloom_be.go new file mode 100644 index 00000000000..f7800301a68 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom_be.go @@ -0,0 +1,19 @@ +//go:build s390x + +package parquet + +import ( + "encoding/binary" + + "github.com/parquet-go/parquet-go/deprecated" +) + +func unsafecastInt96ToBytes(src []deprecated.Int96) []byte { + out := make([]byte, len(src)*12) + for i := range src { + binary.LittleEndian.PutUint32(out[(i*12):4+(i*12)], uint32(src[i][0])) + binary.LittleEndian.PutUint32(out[4+(i*12):8+(i*12)], uint32(src[i][1])) + binary.LittleEndian.PutUint32(out[8+(i*12):12+(i*12)], uint32(src[i][2])) + } + return out +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom_le.go b/vendor/github.com/parquet-go/parquet-go/bloom_le.go new file mode 100644 index 00000000000..6f8bbdcd846 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom_le.go @@ -0,0 +1,12 @@ +//go:build !s390x + +package parquet + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" +) + +func unsafecastInt96ToBytes(src []deprecated.Int96) []byte { + return unsafecast.Slice[byte](src) +} diff --git a/vendor/github.com/parquet-go/parquet-go/buf.gen.yaml b/vendor/github.com/parquet-go/parquet-go/buf.gen.yaml new file mode 100644 index 00000000000..f2a2315a9b0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/buf.gen.yaml @@ -0,0 +1,6 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go + out: testdata/gen/go + opt: + - paths=source_relative diff --git a/vendor/github.com/parquet-go/parquet-go/buf.yaml b/vendor/github.com/parquet-go/parquet-go/buf.yaml new file mode 100644 index 00000000000..9cf6e83a4b5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/buf.yaml @@ -0,0 +1,9 @@ +version: v2 +modules: + - path: testdata/proto +lint: + use: + - STANDARD +breaking: + use: + - FILE diff --git a/vendor/github.com/parquet-go/parquet-go/buffer.go b/vendor/github.com/parquet-go/parquet-go/buffer.go new file mode 100644 index 00000000000..2bb2dea7449 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/buffer.go @@ -0,0 +1,708 @@ +package parquet + +import ( + "log" + "reflect" + "runtime" + "slices" + "sort" + "sync/atomic" + + "github.com/parquet-go/parquet-go/internal/debug" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// GenericBuffer is similar to a Buffer but uses a type parameter to define the +// Go type representing the schema of rows in the buffer. +// +// See GenericWriter for details about the benefits over the classic Buffer API. +type GenericBuffer[T any] struct { + base Buffer + write bufferFunc[T] +} + +// NewGenericBuffer is like NewBuffer but returns a GenericBuffer[T] suited to write +// rows of Go type T. +// +// The type parameter T should be a map, struct, or any. Any other types will +// cause a panic at runtime. Type checking is a lot more effective when the +// generic parameter is a struct type, using map and interface types is somewhat +// similar to using a Writer. If using an interface type for the type parameter, +// then providing a schema at instantiation is required. +// +// If the option list may explicitly declare a schema, it must be compatible +// with the schema generated from T. +func NewGenericBuffer[T any](options ...RowGroupOption) *GenericBuffer[T] { + config, err := NewRowGroupConfig(options...) + if err != nil { + panic(err) + } + + t := typeOf[T]() + if config.Schema == nil && t != nil { + config.Schema = schemaOf(dereference(t)) + } + + if config.Schema == nil { + panic("generic buffer must be instantiated with schema or concrete type.") + } + + buf := &GenericBuffer[T]{ + base: Buffer{config: config}, + } + buf.base.configure(config.Schema) + buf.write = bufferFuncOf[T](t, config.Schema) + return buf +} + +func typeOf[T any]() reflect.Type { + var v T + return reflect.TypeOf(v) +} + +type bufferFunc[T any] func(*GenericBuffer[T], []T) (int, error) + +func bufferFuncOf[T any](t reflect.Type, schema *Schema) bufferFunc[T] { + if t == nil { + return (*GenericBuffer[T]).writeRows + } + switch t.Kind() { + case reflect.Interface, reflect.Map: + return (*GenericBuffer[T]).writeRows + + case reflect.Struct: + return makeBufferFunc[T](t, schema) + + case reflect.Pointer: + if e := t.Elem(); e.Kind() == reflect.Struct { + return makeBufferFunc[T](t, schema) + } + } + panic("cannot create buffer for values of type " + t.String()) +} + +func makeBufferFunc[T any](t reflect.Type, schema *Schema) bufferFunc[T] { + writeRows := writeRowsFuncOf(t, schema, nil, nil) + return func(buf *GenericBuffer[T], rows []T) (n int, err error) { + writeRows(buf.base.columns, columnLevels{}, makeArrayFromSlice(rows)) + return len(rows), nil + } +} + +func (buf *GenericBuffer[T]) Size() int64 { + return buf.base.Size() +} + +func (buf *GenericBuffer[T]) NumRows() int64 { + return buf.base.NumRows() +} + +func (buf *GenericBuffer[T]) ColumnChunks() []ColumnChunk { + return buf.base.ColumnChunks() +} + +func (buf *GenericBuffer[T]) ColumnBuffers() []ColumnBuffer { + return buf.base.ColumnBuffers() +} + +func (buf *GenericBuffer[T]) SortingColumns() []SortingColumn { + return buf.base.SortingColumns() +} + +func (buf *GenericBuffer[T]) Len() int { + return buf.base.Len() +} + +func (buf *GenericBuffer[T]) Less(i, j int) bool { + return buf.base.Less(i, j) +} + +func (buf *GenericBuffer[T]) Swap(i, j int) { + buf.base.Swap(i, j) +} + +func (buf *GenericBuffer[T]) Reset() { + buf.base.Reset() +} + +func (buf *GenericBuffer[T]) Write(rows []T) (int, error) { + if len(rows) == 0 { + return 0, nil + } + return buf.write(buf, rows) +} + +func (buf *GenericBuffer[T]) WriteRows(rows []Row) (int, error) { + return buf.base.WriteRows(rows) +} + +func (buf *GenericBuffer[T]) WriteRowGroup(rowGroup RowGroup) (int64, error) { + return buf.base.WriteRowGroup(rowGroup) +} + +func (buf *GenericBuffer[T]) Rows() Rows { + return buf.base.Rows() +} + +func (buf *GenericBuffer[T]) Schema() *Schema { + return buf.base.Schema() +} + +func (buf *GenericBuffer[T]) writeRows(rows []T) (int, error) { + if cap(buf.base.rowbuf) < len(rows) { + buf.base.rowbuf = make([]Row, len(rows)) + } else { + buf.base.rowbuf = buf.base.rowbuf[:len(rows)] + } + defer clearRows(buf.base.rowbuf) + + schema := buf.base.Schema() + for i := range rows { + buf.base.rowbuf[i] = schema.Deconstruct(buf.base.rowbuf[i], &rows[i]) + } + + return buf.base.WriteRows(buf.base.rowbuf) +} + +var ( + _ RowGroup = (*GenericBuffer[any])(nil) + _ RowGroupWriter = (*GenericBuffer[any])(nil) + _ sort.Interface = (*GenericBuffer[any])(nil) + + _ RowGroup = (*GenericBuffer[struct{}])(nil) + _ RowGroupWriter = (*GenericBuffer[struct{}])(nil) + _ sort.Interface = (*GenericBuffer[struct{}])(nil) + + _ RowGroup = (*GenericBuffer[map[struct{}]struct{}])(nil) + _ RowGroupWriter = (*GenericBuffer[map[struct{}]struct{}])(nil) + _ sort.Interface = (*GenericBuffer[map[struct{}]struct{}])(nil) +) + +// Buffer represents an in-memory group of parquet rows. +// +// The main purpose of the Buffer type is to provide a way to sort rows before +// writing them to a parquet file. Buffer implements sort.Interface as a way +// to support reordering the rows that have been written to it. +type Buffer struct { + config *RowGroupConfig + schema *Schema + rowbuf []Row + colbuf [][]Value + chunks []ColumnChunk + columns []ColumnBuffer + sorted []ColumnBuffer +} + +// NewBuffer constructs a new buffer, using the given list of buffer options +// to configure the buffer returned by the function. +// +// The function panics if the buffer configuration is invalid. Programs that +// cannot guarantee the validity of the options passed to NewBuffer should +// construct the buffer configuration independently prior to calling this +// function: +// +// config, err := parquet.NewRowGroupConfig(options...) +// if err != nil { +// // handle the configuration error +// ... +// } else { +// // this call to create a buffer is guaranteed not to panic +// buffer := parquet.NewBuffer(config) +// ... +// } +func NewBuffer(options ...RowGroupOption) *Buffer { + config, err := NewRowGroupConfig(options...) + if err != nil { + panic(err) + } + buf := &Buffer{ + config: config, + } + if config.Schema != nil { + buf.configure(config.Schema) + } + return buf +} + +// configure sets up the buffer's columns based on the provided schema. +// It also prepares the internal sorting logic by using only the requested sorting columns +// (from buf.config.Sorting.SortingColumns) that are actually found within the schema, +// preserving the requested order but ignoring missing columns. +func (buf *Buffer) configure(schema *Schema) { + if schema == nil { + return + } + sortingColumns := buf.config.Sorting.SortingColumns + buf.sorted = make([]ColumnBuffer, len(sortingColumns)) + + forEachLeafColumnOf(schema, func(leaf leafColumn) { + nullOrdering := nullsGoLast + columnIndex := int(leaf.columnIndex) + columnType := leaf.node.Type() + bufferCap := buf.config.ColumnBufferCapacity + dictionary := (Dictionary)(nil) + encoding := encodingOf(leaf.node, nil) + + if isDictionaryEncoding(encoding) { + estimatedDictBufferSize := columnType.EstimateSize(bufferCap) + dictBuffer := columnType.NewValues( + make([]byte, 0, estimatedDictBufferSize), + nil, + ) + dictionary = columnType.NewDictionary(columnIndex, 0, dictBuffer) + columnType = dictionary.Type() + } + + sortingIndex := searchSortingColumn(sortingColumns, leaf.path) + if sortingIndex < len(sortingColumns) && sortingColumns[sortingIndex].NullsFirst() { + nullOrdering = nullsGoFirst + } + + column := columnType.NewColumnBuffer(columnIndex, bufferCap) + switch { + case leaf.maxRepetitionLevel > 0: + column = newRepeatedColumnBuffer(column, leaf.maxRepetitionLevel, leaf.maxDefinitionLevel, nullOrdering) + case leaf.maxDefinitionLevel > 0: + column = newOptionalColumnBuffer(column, leaf.maxDefinitionLevel, nullOrdering) + } + buf.columns = append(buf.columns, column) + + if sortingIndex < len(sortingColumns) { + if sortingColumns[sortingIndex].Descending() { + column = &reversedColumnBuffer{column} + } + buf.sorted[sortingIndex] = column + } + }) + + buf.sorted = slices.DeleteFunc(buf.sorted, func(cb ColumnBuffer) bool { return cb == nil }) + + buf.schema = schema + buf.rowbuf = make([]Row, 0, 1) + buf.colbuf = make([][]Value, len(buf.columns)) + buf.chunks = make([]ColumnChunk, len(buf.columns)) + + for i, column := range buf.columns { + buf.chunks[i] = column + } +} + +// Size returns the estimated size of the buffer in memory (in bytes). +func (buf *Buffer) Size() int64 { + size := int64(0) + for _, col := range buf.columns { + size += col.Size() + } + return size +} + +// NumRows returns the number of rows written to the buffer. +func (buf *Buffer) NumRows() int64 { return int64(buf.Len()) } + +// ColumnChunks returns the buffer columns. +func (buf *Buffer) ColumnChunks() []ColumnChunk { return buf.chunks } + +// ColumnBuffers returns the buffer columns. +// +// This method is similar to ColumnChunks, but returns a list of ColumnBuffer +// instead of a list of ColumnChunk (the latter being read-only); calling +// ColumnBuffers or ColumnChunks with the same index returns the same underlying +// objects, but with different types, which removes the need for making a type +// assertion if the program needed to write directly to the column buffers. +// The presence of the ColumnChunks method is still required to satisfy the +// RowGroup interface. +func (buf *Buffer) ColumnBuffers() []ColumnBuffer { return buf.columns } + +// Schema returns the schema of the buffer. +// +// The schema is either configured by passing a Schema in the option list when +// constructing the buffer, or lazily discovered when the first row is written. +func (buf *Buffer) Schema() *Schema { return buf.schema } + +// SortingColumns returns the list of columns by which the buffer will be +// sorted. +// +// The sorting order is configured by passing a SortingColumns option when +// constructing the buffer. +func (buf *Buffer) SortingColumns() []SortingColumn { return buf.config.Sorting.SortingColumns } + +// Len returns the number of rows written to the buffer. +func (buf *Buffer) Len() int { + if len(buf.columns) == 0 { + return 0 + } else { + // All columns have the same number of rows. + return buf.columns[0].Len() + } +} + +// Less returns true if row[i] < row[j] in the buffer. +func (buf *Buffer) Less(i, j int) bool { + for _, col := range buf.sorted { + switch { + case col.Less(i, j): + return true + case col.Less(j, i): + return false + } + } + return false +} + +// Swap exchanges the rows at indexes i and j. +func (buf *Buffer) Swap(i, j int) { + for _, col := range buf.columns { + col.Swap(i, j) + } +} + +// Reset clears the content of the buffer, allowing it to be reused. +func (buf *Buffer) Reset() { + for _, col := range buf.columns { + col.Reset() + } +} + +// Write writes a row held in a Go value to the buffer. +func (buf *Buffer) Write(row any) error { + if buf.schema == nil { + buf.configure(SchemaOf(row)) + } + + buf.rowbuf = buf.rowbuf[:1] + defer clearRows(buf.rowbuf) + + buf.rowbuf[0] = buf.schema.Deconstruct(buf.rowbuf[0], row) + _, err := buf.WriteRows(buf.rowbuf) + return err +} + +// WriteRows writes parquet rows to the buffer. +func (buf *Buffer) WriteRows(rows []Row) (int, error) { + defer func() { + for i, colbuf := range buf.colbuf { + clearValues(colbuf) + buf.colbuf[i] = colbuf[:0] + } + }() + + if buf.schema == nil { + return 0, ErrRowGroupSchemaMissing + } + + for _, row := range rows { + for _, value := range row { + columnIndex := value.Column() + buf.colbuf[columnIndex] = append(buf.colbuf[columnIndex], value) + } + } + + for columnIndex, values := range buf.colbuf { + if _, err := buf.columns[columnIndex].WriteValues(values); err != nil { + // TODO: an error at this stage will leave the buffer in an invalid + // state since the row was partially written. Applications are not + // expected to continue using the buffer after getting an error, + // maybe we can enforce it? + return 0, err + } + } + + return len(rows), nil +} + +// WriteRowGroup satisfies the RowGroupWriter interface. +func (buf *Buffer) WriteRowGroup(rowGroup RowGroup) (int64, error) { + rowGroupSchema := rowGroup.Schema() + switch { + case rowGroupSchema == nil: + return 0, ErrRowGroupSchemaMissing + case buf.schema == nil: + buf.configure(rowGroupSchema) + case !EqualNodes(buf.schema, rowGroupSchema): + return 0, ErrRowGroupSchemaMismatch + } + if !sortingColumnsHavePrefix(rowGroup.SortingColumns(), buf.SortingColumns()) { + return 0, ErrRowGroupSortingColumnsMismatch + } + n := buf.NumRows() + r := rowGroup.Rows() + defer r.Close() + _, err := CopyRows(bufferWriter{buf}, r) + return buf.NumRows() - n, err +} + +// Rows returns a reader exposing the current content of the buffer. +// +// The buffer and the returned reader share memory. Mutating the buffer +// concurrently to reading rows may result in non-deterministic behavior. +func (buf *Buffer) Rows() Rows { return NewRowGroupRowReader(buf) } + +// bufferWriter is an adapter for Buffer which implements both RowWriter and +// PageWriter to enable optimizations in CopyRows for types that support writing +// rows by copying whole pages instead of calling WriteRow repeatedly. +type bufferWriter struct{ buf *Buffer } + +func (w bufferWriter) WriteRows(rows []Row) (int, error) { + return w.buf.WriteRows(rows) +} + +func (w bufferWriter) WriteValues(values []Value) (int, error) { + return w.buf.columns[values[0].Column()].WriteValues(values) +} + +func (w bufferWriter) WritePage(page Page) (int64, error) { + return CopyValues(w.buf.columns[page.Column()], page.Values()) +} + +var ( + _ RowGroup = (*Buffer)(nil) + _ RowGroupWriter = (*Buffer)(nil) + _ sort.Interface = (*Buffer)(nil) + + _ RowWriter = (*bufferWriter)(nil) + _ PageWriter = (*bufferWriter)(nil) + _ ValueWriter = (*bufferWriter)(nil) +) + +type buffer[T memory.Datum] struct { + data memory.SliceBuffer[T] + refc atomic.Int32 + pool *bufferPool[T] + stack []byte + id uint64 +} + +func newBuffer[T memory.Datum](data []T) *buffer[T] { + b := &buffer[T]{data: memory.SliceBufferFrom(data)} + b.refc.Store(1) + return b +} + +func (b *buffer[T]) reset() { + b.data.Resize(0) +} + +func (b *buffer[T]) ref() { + if b.refc.Add(1) <= 1 { + panic("BUG: buffer reference count overflow") + } +} + +func (b *buffer[T]) unref() { + switch refc := b.refc.Add(-1); { + case refc < 0: + panic("BUG: buffer reference count underflow") + case refc == 0: + b.data.Reset() + if b.pool != nil { + b.pool.put(b) + } + } +} + +func monitorBufferRelease[T memory.Datum](b *buffer[T]) { + if rc := b.refc.Load(); rc != 0 { + log.Printf("PARQUETGODEBUG: buffer[%d] garbage collected with non-zero reference count (rc=%d)\n%s", b.id, rc, string(b.stack)) + } +} + +type bufferPool[T memory.Datum] struct { + pool memory.Pool[buffer[T]] +} + +var bufferIDCounter atomic.Uint64 + +func (p *bufferPool[T]) get(size int) *buffer[T] { + b := p.pool.Get( + func() *buffer[T] { + b := &buffer[T]{pool: p} + if debug.TRACEBUF > 0 { + b.stack = make([]byte, 4096) + runtime.SetFinalizer(b, monitorBufferRelease[T]) + } + return b + }, + func(*buffer[T]) {}, + ) + + if debug.TRACEBUF > 0 { + b.id = bufferIDCounter.Add(1) + b.stack = b.stack[:runtime.Stack(b.stack[:cap(b.stack)], false)] + } + + b.data.Resize(size) + b.refc.Store(1) + return b +} + +func (p *bufferPool[T]) put(b *buffer[T]) { + if b.pool != p { + panic("BUG: buffer returned to a different pool than the one it was allocated from") + } + if b.refc.Load() != 0 { + panic("BUG: buffer returned to pool with a non-zero reference count") + } + p.pool.Put(b) +} + +var ( + buffers bufferPool[byte] + indexes bufferPool[int32] + offsets bufferPool[uint32] +) + +type bufferedPage struct { + Page + offsets *buffer[uint32] + values *buffer[byte] + repetitionLevels *buffer[byte] + definitionLevels *buffer[byte] +} + +func newBufferedPage(page Page, offsets *buffer[uint32], values *buffer[byte], definitionLevels, repetitionLevels *buffer[byte]) *bufferedPage { + p := &bufferedPage{ + Page: page, + offsets: offsets, + values: values, + definitionLevels: definitionLevels, + repetitionLevels: repetitionLevels, + } + bufferRef(offsets) + bufferRef(values) + bufferRef(definitionLevels) + bufferRef(repetitionLevels) + return p +} + +func (p *bufferedPage) Slice(i, j int64) Page { + return newBufferedPage( + p.Page.Slice(i, j), + p.offsets, + p.values, + p.definitionLevels, + p.repetitionLevels, + ) +} + +func (p *bufferedPage) Retain() { + Retain(p.Page) + bufferRef(p.offsets) + bufferRef(p.values) + bufferRef(p.definitionLevels) + bufferRef(p.repetitionLevels) +} + +func (p *bufferedPage) Release() { + Release(p.Page) + bufferUnref(p.offsets) + bufferUnref(p.values) + bufferUnref(p.definitionLevels) + bufferUnref(p.repetitionLevels) +} + +// ReleaseAndDetachValues releases all underlying buffers except the one backing byte-array contents. This +// allows row and values read from the buffer to continue to be valid, instead relying +// on the garbage collector after it is no longer needed. +func (p *bufferedPage) ReleaseAndDetachValues() { + // We don't return the values buffer to the pool and allow + // standard GC to track it. Remove debug finalizer. + if debug.TRACEBUF > 0 { + runtime.SetFinalizer(p.values, nil) + } + + // Return everything else back to pools. + Release(p.Page) + bufferUnref(p.offsets) + bufferUnref(p.definitionLevels) + bufferUnref(p.repetitionLevels) +} + +func bufferRef[T memory.Datum](buf *buffer[T]) { + if buf != nil { + buf.ref() + } +} + +func bufferUnref[T memory.Datum](buf *buffer[T]) { + if buf != nil { + buf.unref() + } +} + +// Retain is a helper function to increment the reference counter of pages +// backed by memory which can be granularly managed by the application. +// +// Usage of this function is optional and with Release, is intended to allow +// finer grain memory management in the application. Most programs should be +// able to rely on automated memory management provided by the Go garbage +// collector instead. +// +// The function should be called when a page lifetime is about to be shared +// between multiple goroutines or layers of an application, and the program +// wants to express "sharing ownership" of the page. +// +// Calling this function on pages that do not embed a reference counter does +// nothing. +func Retain(page Page) { + if p, _ := page.(retainable); p != nil { + p.Retain() + } +} + +// Release is a helper function to decrement the reference counter of pages +// backed by memory which can be granularly managed by the application. +// +// Usage of this is optional and with Retain, is intended to allow finer grained +// memory management in the application, at the expense of potentially causing +// panics if the page is used after its reference count has reached zero. Most +// programs should be able to rely on automated memory management provided by +// the Go garbage collector instead. +// +// The function should be called to return a page to the internal buffer pool, +// when a goroutine "releases ownership" it acquired either by being the single +// owner (e.g. capturing the return value from a ReadPage call) or having gotten +// shared ownership by calling Retain. +// +// Calling this function on pages that do not embed a reference counter does +// nothing. +func Release(page Page) { + if p, _ := page.(releasable); p != nil { + p.Release() + } +} + +// releaseAndDetachValues is an optional granular memory management method like Release, +// that releases ownership of the page and potentially allows its underlying buffers +// to be reused for new pages acquired from ReadPage. However this method makes the +// additional guarantee that string and byte array values read from the page will +// continue to be valid past the page lifetime. Page-specific implementations do this +// by reusing what buffers they can, while not invaliding the string and byte array values. +// Those are relinquished to the garbage collector and cleaned up when no longer referenced +// by the calling application. +// +// Usage of this is optional and follows the guidelines as Release. +// +// Calling this function on pages that do not embed a reference counter does nothing. +func releaseAndDetachValues(page Page) { + if p, _ := page.(detachable); p != nil { + p.ReleaseAndDetachValues() + } +} + +type retainable interface { + Retain() +} + +type releasable interface { + Release() +} + +type detachable interface { + ReleaseAndDetachValues() +} + +var ( + _ retainable = (*bufferedPage)(nil) + _ releasable = (*bufferedPage)(nil) + _ detachable = (*bufferedPage)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/buffer_pool.go b/vendor/github.com/parquet-go/parquet-go/buffer_pool.go new file mode 100644 index 00000000000..d2260a652d1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/buffer_pool.go @@ -0,0 +1,146 @@ +package parquet + +import ( + "io" + "os" + "path/filepath" + + "github.com/parquet-go/parquet-go/internal/memory" +) + +// BufferPool is an interface abstracting the underlying implementation of +// page buffer pools. +// +// The parquet-go package provides two implementations of this interface, one +// backed by in-memory buffers (on the Go heap), and the other using temporary +// files on disk. +// +// Applications which need finer grain control over the allocation and retention +// of page buffers may choose to provide their own implementation and install it +// via the parquet.ColumnPageBuffers writer option. +// +// BufferPool implementations must be safe to use concurrently from multiple +// goroutines. +type BufferPool interface { + // GetBuffer is called when a parquet writer needs to acquire a new + // page buffer from the pool. + GetBuffer() io.ReadWriteSeeker + + // PutBuffer is called when a parquet writer releases a page buffer to + // the pool. + // + // The parquet.Writer type guarantees that the buffers it calls this method + // with were previously acquired by a call to GetBuffer on the same + // pool, and that it will not use them anymore after the call. + PutBuffer(io.ReadWriteSeeker) +} + +const defaultChunkSize = 32 * 1024 // 32 KiB + +// NewBufferPool creates a new in-memory page buffer pool. +// +// The implementation is backed by sync.Pool and allocates memory buffers on the +// Go heap. +func NewBufferPool() BufferPool { return NewChunkBufferPool(defaultChunkSize) } + +// NewChunkBufferPool creates a new in-memory page buffer pool. +// +// The implementation is backed by sync.Pool and allocates memory buffers on the +// Go heap in fixed-size chunks. +func NewChunkBufferPool(chunkSize int) BufferPool { + return &memoryBufferPool{size: chunkSize} +} + +type memoryBufferPool struct { + pool memory.Pool[memory.Buffer] + size int +} + +func (m *memoryBufferPool) GetBuffer() io.ReadWriteSeeker { + return m.pool.Get( + func() *memory.Buffer { return memory.NewBuffer(m.size) }, + (*memory.Buffer).Reset, + ) +} + +func (m *memoryBufferPool) PutBuffer(buf io.ReadWriteSeeker) { + if b, _ := buf.(*memory.Buffer); b != nil { + b.Reset() // release internal buffers + m.pool.Put(b) + } +} + +type fileBufferPool struct { + err error + tempdir string + pattern string +} + +// NewFileBufferPool creates a new on-disk page buffer pool. +func NewFileBufferPool(tempdir, pattern string) BufferPool { + pool := &fileBufferPool{ + tempdir: tempdir, + pattern: pattern, + } + pool.tempdir, pool.err = filepath.Abs(pool.tempdir) + return pool +} + +func (pool *fileBufferPool) GetBuffer() io.ReadWriteSeeker { + if pool.err != nil { + return &errorBuffer{err: pool.err} + } + f, err := os.CreateTemp(pool.tempdir, pool.pattern) + if err != nil { + return &errorBuffer{err: err} + } + return f +} + +func (pool *fileBufferPool) PutBuffer(buf io.ReadWriteSeeker) { + if f, _ := buf.(*os.File); f != nil { + _ = f.Close() + _ = os.Remove(f.Name()) + } +} + +type errorBuffer struct{ err error } + +func (buf *errorBuffer) Read([]byte) (int, error) { return 0, buf.err } +func (buf *errorBuffer) Write([]byte) (int, error) { return 0, buf.err } +func (buf *errorBuffer) ReadFrom(io.Reader) (int64, error) { return 0, buf.err } +func (buf *errorBuffer) WriteTo(io.Writer) (int64, error) { return 0, buf.err } +func (buf *errorBuffer) Seek(int64, int) (int64, error) { return 0, buf.err } + +var ( + defaultColumnBufferPool = memoryBufferPool{size: defaultChunkSize} + defaultSortingBufferPool = memoryBufferPool{size: defaultChunkSize} + + _ io.ReaderFrom = (*errorBuffer)(nil) + _ io.WriterTo = (*errorBuffer)(nil) +) + +type readerAt struct { + reader io.ReadSeeker + offset int64 +} + +func (r *readerAt) ReadAt(b []byte, off int64) (int, error) { + if r.offset < 0 || off != r.offset { + off, err := r.reader.Seek(off, io.SeekStart) + if err != nil { + return 0, err + } + r.offset = off + } + n, err := r.reader.Read(b) + r.offset += int64(n) + return n, err +} + +func newReaderAt(r io.ReadSeeker) io.ReaderAt { + if rr, ok := r.(io.ReaderAt); ok { + return rr + } + return &readerAt{reader: r, offset: -1} +} diff --git a/vendor/github.com/parquet-go/parquet-go/column.go b/vendor/github.com/parquet-go/parquet-go/column.go new file mode 100644 index 00000000000..68d8807112f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column.go @@ -0,0 +1,849 @@ +package parquet + +import ( + "encoding/binary" + "fmt" + "io" + "reflect" + + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// Column represents a column in a parquet file. +// +// Methods of Column values are safe to call concurrently from multiple +// goroutines. +// +// Column instances satisfy the Node interface. +type Column struct { + typ Type + file *File + schema *format.SchemaElement + order *format.ColumnOrder + path columnPath + fields []Field + columns []*Column + chunks []*format.ColumnChunk + columnIndex []*format.ColumnIndex + offsetIndex []*format.OffsetIndex + encoding encoding.Encoding + compression compress.Codec + + depth int8 + maxRepetitionLevel byte + maxDefinitionLevel byte + index int16 +} + +// Type returns the type of the column. +// +// The returned value is unspecified if c is not a leaf column. +func (c *Column) Type() Type { return c.typ } + +// Optional returns true if the column is optional. +func (c *Column) Optional() bool { return schemaRepetitionTypeOf(c.schema) == format.Optional } + +// Repeated returns true if the column may repeat. +func (c *Column) Repeated() bool { return schemaRepetitionTypeOf(c.schema) == format.Repeated } + +// Required returns true if the column is required. +func (c *Column) Required() bool { return schemaRepetitionTypeOf(c.schema) == format.Required } + +// Leaf returns true if c is a leaf column. +func (c *Column) Leaf() bool { return isLeafSchemaElement(c.schema) } + +// Fields returns the list of fields on the column. +func (c *Column) Fields() []Field { return c.fields } + +// Encoding returns the encodings used by this column. +func (c *Column) Encoding() encoding.Encoding { return c.encoding } + +// Compression returns the compression codecs used by this column. +func (c *Column) Compression() compress.Codec { return c.compression } + +// Path of the column in the parquet schema. +func (c *Column) Path() []string { return c.path[1:] } + +// Name returns the column name. +func (c *Column) Name() string { return c.schema.Name } + +// ID returns column field id +func (c *Column) ID() int { return int(c.schema.FieldID) } + +// Columns returns the list of child columns. +// +// The method returns the same slice across multiple calls, the program must +// treat it as a read-only value. +func (c *Column) Columns() []*Column { return c.columns } + +// Column returns the child column matching the given name. +func (c *Column) Column(name string) *Column { + for _, child := range c.columns { + if child.Name() == name { + return child + } + } + return nil +} + +// Pages returns a reader exposing all pages in this column, across row groups. +func (c *Column) Pages() Pages { + if c.file == nil { + return emptyPages{} + } + return c.PagesFrom(c.file.reader) +} + +func (c *Column) PagesFrom(reader io.ReaderAt) Pages { + if c.index < 0 || c.file == nil { + return emptyPages{} + } + r := &columnPages{ + pages: make([]FilePages, len(c.file.rowGroups)), + } + for i := range r.pages { + r.pages[i].init(c.file.rowGroups[i].(*FileRowGroup).columns[c.index].(*FileColumnChunk), reader) + } + return r +} + +type columnPages struct { + pages []FilePages + index int +} + +func (c *columnPages) ReadPage() (Page, error) { + for { + if c.index >= len(c.pages) { + return nil, io.EOF + } + p, err := c.pages[c.index].ReadPage() + if err == nil || err != io.EOF { + return p, err + } + c.index++ + } +} + +func (c *columnPages) SeekToRow(rowIndex int64) error { + c.index = 0 + + for c.index < len(c.pages) && c.pages[c.index].chunk.rowGroup.NumRows < rowIndex { + rowIndex -= c.pages[c.index].chunk.rowGroup.NumRows + c.index++ + } + + if c.index < len(c.pages) { + if err := c.pages[c.index].SeekToRow(rowIndex); err != nil { + return err + } + for i := c.index + 1; i < len(c.pages); i++ { + p := &c.pages[i] + if err := p.SeekToRow(0); err != nil { + return err + } + } + } + return nil +} + +func (c *columnPages) Close() error { + var lastErr error + + for i := range c.pages { + if err := c.pages[i].Close(); err != nil { + lastErr = err + } + } + + c.pages = nil + c.index = 0 + return lastErr +} + +// Depth returns the position of the column relative to the root. +func (c *Column) Depth() int { return int(c.depth) } + +// MaxRepetitionLevel returns the maximum value of repetition levels on this +// column. +func (c *Column) MaxRepetitionLevel() int { return int(c.maxRepetitionLevel) } + +// MaxDefinitionLevel returns the maximum value of definition levels on this +// column. +func (c *Column) MaxDefinitionLevel() int { return int(c.maxDefinitionLevel) } + +// Index returns the position of the column in a row. Only leaf columns have a +// column index, the method returns -1 when called on non-leaf columns. +func (c *Column) Index() int { return int(c.index) } + +// GoType returns the Go type that best represents the parquet column. +func (c *Column) GoType() reflect.Type { return goTypeOf(c) } + +// Value returns the sub-value in base for the child column at the given +// index. +func (c *Column) Value(base reflect.Value) reflect.Value { + return base.MapIndex(reflect.ValueOf(&c.schema.Name).Elem()) +} + +// String returns a human-readable string representation of the column. +func (c *Column) String() string { return c.path.String() + ": " + sprint(c.Name(), c) } + +func (c *Column) forEachLeaf(do func(*Column)) { + if isLeafSchemaElement(c.schema) { + do(c) + } else { + for _, child := range c.columns { + child.forEachLeaf(do) + } + } +} + +func openColumns(file *File, metadata *format.FileMetaData, columnIndexes []format.ColumnIndex, offsetIndexes []format.OffsetIndex) (*Column, error) { + cl := columnLoader{} + + c, err := cl.open(file, metadata, columnIndexes, offsetIndexes, nil) + if err != nil { + return nil, err + } + + // Validate that there aren't extra entries in the row group columns, + // which would otherwise indicate that there are dangling data pages + // in the file. + for index, rowGroup := range metadata.RowGroups { + if cl.rowGroupColumnIndex != len(rowGroup.Columns) { + return nil, fmt.Errorf("row group at index %d contains %d columns but %d were referenced by the column schemas", + index, len(rowGroup.Columns), cl.rowGroupColumnIndex) + } + } + + _, err = c.setLevels(0, 0, 0, 0) + return c, err +} + +func (c *Column) setLevels(depth, repetition, definition, index int) (int, error) { + if depth > MaxColumnDepth { + return -1, fmt.Errorf("cannot represent parquet columns with more than %d nested levels: %s", MaxColumnDepth, c.path) + } + if index > MaxColumnIndex { + return -1, fmt.Errorf("cannot represent parquet rows with more than %d columns: %s", MaxColumnIndex, c.path) + } + if repetition > MaxRepetitionLevel { + return -1, fmt.Errorf("cannot represent parquet columns with more than %d repetition levels: %s", MaxRepetitionLevel, c.path) + } + if definition > MaxDefinitionLevel { + return -1, fmt.Errorf("cannot represent parquet columns with more than %d definition levels: %s", MaxDefinitionLevel, c.path) + } + + switch schemaRepetitionTypeOf(c.schema) { + case format.Optional: + definition++ + case format.Repeated: + repetition++ + definition++ + } + + c.depth = int8(depth) + c.maxRepetitionLevel = byte(repetition) + c.maxDefinitionLevel = byte(definition) + depth++ + + // Only leaf columns get a column index. + if isLeafSchemaElement(c.schema) { + c.index = int16(index) + index++ + } else { + // Groups (including empty groups) don't get a column index + c.index = -1 + } + + var err error + for _, child := range c.columns { + if index, err = child.setLevels(depth, repetition, definition, index); err != nil { + return -1, err + } + } + return index, nil +} + +type columnLoader struct { + schemaIndex int + columnOrderIndex int + rowGroupColumnIndex int +} + +func (cl *columnLoader) open(file *File, metadata *format.FileMetaData, columnIndexes []format.ColumnIndex, offsetIndexes []format.OffsetIndex, path []string) (*Column, error) { + c := &Column{ + file: file, + schema: &metadata.Schema[cl.schemaIndex], + } + c.path = columnPath(path).append(c.schema.Name) + + cl.schemaIndex++ + numChildren := 0 + if c.schema.NumChildren != nil { + numChildren = int(*c.schema.NumChildren) + } + + if isLeafSchemaElement(c.schema) { + c.typ = schemaElementTypeOf(c.schema) + + if cl.columnOrderIndex < len(metadata.ColumnOrders) { + c.order = &metadata.ColumnOrders[cl.columnOrderIndex] + cl.columnOrderIndex++ + } + + rowGroups := metadata.RowGroups + rowGroupColumnIndex := cl.rowGroupColumnIndex + cl.rowGroupColumnIndex++ + + c.chunks = make([]*format.ColumnChunk, 0, len(rowGroups)) + c.columnIndex = make([]*format.ColumnIndex, 0, len(rowGroups)) + c.offsetIndex = make([]*format.OffsetIndex, 0, len(rowGroups)) + + for i, rowGroup := range rowGroups { + if rowGroupColumnIndex >= len(rowGroup.Columns) { + return nil, fmt.Errorf("row group at index %d does not have enough columns", i) + } + c.chunks = append(c.chunks, &rowGroup.Columns[rowGroupColumnIndex]) + } + + if len(columnIndexes) > 0 { + for i := range rowGroups { + if rowGroupColumnIndex >= len(columnIndexes) { + return nil, fmt.Errorf("row group at index %d does not have enough column index pages", i) + } + c.columnIndex = append(c.columnIndex, &columnIndexes[rowGroupColumnIndex]) + } + } + + if len(offsetIndexes) > 0 { + for i := range rowGroups { + if rowGroupColumnIndex >= len(offsetIndexes) { + return nil, fmt.Errorf("row group at index %d does not have enough offset index pages", i) + } + c.offsetIndex = append(c.offsetIndex, &offsetIndexes[rowGroupColumnIndex]) + } + } + + if len(c.chunks) > 0 { + // Pick the encoding and compression codec of the first chunk. + // + // Technically each column chunk may use a different compression + // codec, and each page of the column chunk might have a different + // encoding. Exposing these details does not provide a lot of value + // to the end user. + // + // Programs that wish to determine the encoding and compression of + // each page of the column should iterate through the pages and read + // the page headers to determine which compression and encodings are + // applied. + for _, encoding := range c.chunks[0].MetaData.Encoding { + if c.encoding == nil { + c.encoding = LookupEncoding(encoding) + } + if encoding != format.Plain && encoding != format.RLE { + c.encoding = LookupEncoding(encoding) + break + } + } + c.compression = LookupCompressionCodec(c.chunks[0].MetaData.Codec) + } + + return c, nil + } + + c.typ = &groupType{} + if lt := c.schema.LogicalType; lt != nil && lt.Map != nil { + c.typ = &mapType{} + } else if lt != nil && lt.List != nil { + c.typ = &listType{} + } else if lt != nil && lt.Variant != nil { + c.typ = &variantType{} + } + c.columns = make([]*Column, numChildren) + + for i := range c.columns { + if cl.schemaIndex >= len(metadata.Schema) { + return nil, fmt.Errorf("column %q has more children than there are schemas in the file: %d > %d", + c.schema.Name, cl.schemaIndex+1, len(metadata.Schema)) + } + + var err error + c.columns[i], err = cl.open(file, metadata, columnIndexes, offsetIndexes, c.path) + if err != nil { + return nil, fmt.Errorf("%s: %w", c.schema.Name, err) + } + } + + c.fields = make([]Field, len(c.columns)) + for i, column := range c.columns { + c.fields[i] = column + } + return c, nil +} + +// isLeafSchemaElement returns true if the schema element represents a leaf node +// (a column with actual data). According to the Parquet specification, the Type +// field is set for leaf nodes and not set (nil) for group nodes. +// +// This is the authoritative way to distinguish between: +// - Leaf nodes: Type != nil (has column data) +// - Group nodes: Type == nil (including empty groups with NumChildren == 0) +func isLeafSchemaElement(element *format.SchemaElement) bool { + return element.Type != nil +} + +func schemaElementTypeOf(s *format.SchemaElement) Type { + if lt := s.LogicalType; lt != nil { + // A logical type exists, the Type interface implementations in this + // package are all based on the logical parquet types declared in the + // format sub-package so we can return them directly via a pointer type + // conversion. + switch { + case lt.UTF8 != nil: + return (*stringType)(lt.UTF8) + case lt.Map != nil: + return (*mapType)(lt.Map) + case lt.List != nil: + return (*listType)(lt.List) + case lt.Enum != nil: + return (*enumType)(lt.Enum) + case lt.Decimal != nil: + // A parquet decimal can be one of several different physical types. + if t := s.Type; t != nil { + var typ Type + switch kind := Kind(*s.Type); kind { + case Int32: + typ = Int32Type + case Int64: + typ = Int64Type + case ByteArray: + typ = ByteArrayType + case FixedLenByteArray: + if s.TypeLength == nil { + panic("DECIMAL using FIXED_LEN_BYTE_ARRAY must specify a length") + } + typ = FixedLenByteArrayType(int(*s.TypeLength)) + default: + panic("DECIMAL must be of type INT32, INT64, BYTE_ARRAY or FIXED_LEN_BYTE_ARRAY but got " + kind.String()) + } + return &decimalType{ + decimal: *lt.Decimal, + Type: typ, + } + } + case lt.Date != nil: + return (*dateType)(lt.Date) + case lt.Time != nil: + return (*timeType)(lt.Time) + case lt.Timestamp != nil: + return (*timestampType)(lt.Timestamp) + case lt.Integer != nil: + return (*intType)(lt.Integer) + case lt.Unknown != nil: + return (*nullType)(lt.Unknown) + case lt.Json != nil: + return (*jsonType)(lt.Json) + case lt.Bson != nil: + return (*bsonType)(lt.Bson) + case lt.UUID != nil: + return (*uuidType)(lt.UUID) + } + } + + if ct := s.ConvertedType; ct != nil { + // This column contains no logical type but has a converted type, it + // was likely created by an older parquet writer. Convert the legacy + // type representation to the equivalent logical parquet type. + switch *ct { + case deprecated.UTF8: + return &stringType{} + case deprecated.Map: + return &mapType{} + case deprecated.MapKeyValue: + return &groupType{} + case deprecated.List: + return &listType{} + case deprecated.Enum: + return &enumType{} + case deprecated.Decimal: + if s.Scale != nil && s.Precision != nil { + // A parquet decimal can be one of several different physical types. + if t := s.Type; t != nil { + var typ Type + switch kind := Kind(*s.Type); kind { + case Int32: + typ = Int32Type + case Int64: + typ = Int64Type + case FixedLenByteArray: + if s.TypeLength == nil { + panic("DECIMAL using FIXED_LEN_BYTE_ARRAY must specify a length") + } + typ = FixedLenByteArrayType(int(*s.TypeLength)) + case ByteArray: + typ = ByteArrayType + default: + panic("DECIMAL must be of type INT32, INT64, BYTE_ARRAY or FIXED_LEN_BYTE_ARRAY but got " + kind.String()) + } + return &decimalType{ + decimal: format.DecimalType{ + Scale: *s.Scale, + Precision: *s.Precision, + }, + Type: typ, + } + } + } + case deprecated.Date: + return &dateType{} + case deprecated.TimeMillis: + return &timeType{IsAdjustedToUTC: true, Unit: Millisecond.TimeUnit()} + case deprecated.TimeMicros: + return &timeType{IsAdjustedToUTC: true, Unit: Microsecond.TimeUnit()} + case deprecated.TimestampMillis: + return ×tampType{IsAdjustedToUTC: true, Unit: Millisecond.TimeUnit()} + case deprecated.TimestampMicros: + return ×tampType{IsAdjustedToUTC: true, Unit: Microsecond.TimeUnit()} + case deprecated.Uint8: + return &unsignedIntTypes[0] + case deprecated.Uint16: + return &unsignedIntTypes[1] + case deprecated.Uint32: + return &unsignedIntTypes[2] + case deprecated.Uint64: + return &unsignedIntTypes[3] + case deprecated.Int8: + return &signedIntTypes[0] + case deprecated.Int16: + return &signedIntTypes[1] + case deprecated.Int32: + return &signedIntTypes[2] + case deprecated.Int64: + return &signedIntTypes[3] + case deprecated.Json: + return &jsonType{} + case deprecated.Bson: + return &bsonType{} + case deprecated.Interval: + // TODO + } + } + + if t := s.Type; t != nil { + // The column only has a physical type, convert it to one of the + // primitive types supported by this package. + switch kind := Kind(*t); kind { + case Boolean: + return BooleanType + case Int32: + return Int32Type + case Int64: + return Int64Type + case Int96: + return Int96Type + case Float: + return FloatType + case Double: + return DoubleType + case ByteArray: + return ByteArrayType + case FixedLenByteArray: + if s.TypeLength != nil { + return FixedLenByteArrayType(int(*s.TypeLength)) + } + } + } + + // If we reach this point, we are likely reading a parquet column that was + // written with a non-standard type or is in a newer version of the format + // than this package supports. + return &nullType{} +} + +func schemaRepetitionTypeOf(s *format.SchemaElement) format.FieldRepetitionType { + if s.RepetitionType != nil { + return *s.RepetitionType + } + return format.Required +} + +func (c *Column) decompress(compressedPageData []byte, uncompressedPageSize int32) (page *buffer[byte], err error) { + page = buffers.get(int(uncompressedPageSize)) + decoded, err := c.compression.Decode(page.data.Slice(), compressedPageData) + switch { + case err != nil: + page.unref() + page = nil + case len(decoded) < int(uncompressedPageSize): + page.data.Resize(len(decoded)) + case len(decoded) > int(uncompressedPageSize): + page.data = memory.SliceBufferFrom(decoded) + } + return page, err +} + +// DecodeDataPageV1 decodes a data page from the header, compressed data, and +// optional dictionary passed as arguments. +func (c *Column) DecodeDataPageV1(header DataPageHeaderV1, page []byte, dict Dictionary) (Page, error) { + return c.decodeDataPageV1(header, newBuffer(page), dict, -1) +} + +func (c *Column) decodeDataPageV1(header DataPageHeaderV1, page *buffer[byte], dict Dictionary, size int32) (Page, error) { + var ( + pageData = page.data.Slice() + err error + ) + + if isCompressed(c.compression) { + if page, err = c.decompress(pageData, size); err != nil { + return nil, fmt.Errorf("decompressing data page v1: %w", err) + } + defer page.unref() + pageData = page.data.Slice() + } + + var ( + numValues = int(header.NumValues()) + repetitionLevels *buffer[byte] + definitionLevels *buffer[byte] + ) + + if c.maxRepetitionLevel > 0 { + encoding := lookupLevelEncoding(header.RepetitionLevelEncoding(), c.maxRepetitionLevel) + repetitionLevels, pageData, err = decodeLevelsV1(encoding, numValues, pageData) + if err != nil { + return nil, fmt.Errorf("decoding repetition levels of data page v1: %w", err) + } + defer repetitionLevels.unref() + } + + if c.maxDefinitionLevel > 0 { + encoding := lookupLevelEncoding(header.DefinitionLevelEncoding(), c.maxDefinitionLevel) + definitionLevels, pageData, err = decodeLevelsV1(encoding, numValues, pageData) + if err != nil { + return nil, fmt.Errorf("decoding definition levels of data page v1: %w", err) + } + defer definitionLevels.unref() + + // Data pages v1 did not embed the number of null values, + // so we have to compute it from the definition levels. + numValues -= countLevelsNotEqual(definitionLevels.data.Slice(), c.maxDefinitionLevel) + } + + return c.decodeDataPage(header, numValues, repetitionLevels, definitionLevels, page, pageData, dict) +} + +// DecodeDataPageV2 decodes a data page from the header, compressed data, and +// optional dictionary passed as arguments. +func (c *Column) DecodeDataPageV2(header DataPageHeaderV2, page []byte, dict Dictionary) (Page, error) { + return c.decodeDataPageV2(header, newBuffer(page), dict, -1) +} + +func (c *Column) decodeDataPageV2(header DataPageHeaderV2, page *buffer[byte], dict Dictionary, size int32) (Page, error) { + numValues := int(header.NumValues()) + pageData := page.data.Slice() + var err error + + var repetitionLevels *buffer[byte] + var definitionLevels *buffer[byte] + + if length := header.RepetitionLevelsByteLength(); length > 0 { + if c.maxRepetitionLevel == 0 { + // In some cases we've observed files which have a non-zero + // repetition level despite the column not being repeated + // (nor nested within a repeated column). + // + // See https://github.com/apache/parquet-testing/pull/24 + pageData, err = skipLevelsV2(pageData, length) + } else { + encoding := lookupLevelEncoding(header.RepetitionLevelEncoding(), c.maxRepetitionLevel) + repetitionLevels, pageData, err = decodeLevelsV2(encoding, numValues, pageData, length) + } + if err != nil { + return nil, fmt.Errorf("decoding repetition levels of data page v2: %w", io.ErrUnexpectedEOF) + } + if repetitionLevels != nil { + defer repetitionLevels.unref() + + repLevels := repetitionLevels.data.Slice() + if len(repLevels) != 0 && repLevels[0] != 0 { + return nil, fmt.Errorf("%w: first repetition level for column %d (%s) is %d instead of zero, indicating that the page contains trailing values from the previous page (this is forbidden for data pages v2)", + ErrMalformedRepetitionLevel, c.Index(), c.Name(), repLevels[0]) + } + } + } + + if length := header.DefinitionLevelsByteLength(); length > 0 { + if c.maxDefinitionLevel == 0 { + pageData, err = skipLevelsV2(pageData, length) + } else { + encoding := lookupLevelEncoding(header.DefinitionLevelEncoding(), c.maxDefinitionLevel) + definitionLevels, pageData, err = decodeLevelsV2(encoding, numValues, pageData, length) + } + if err != nil { + return nil, fmt.Errorf("decoding definition levels of data page v2: %w", io.ErrUnexpectedEOF) + } + if definitionLevels != nil { + defer definitionLevels.unref() + } + } + + if isCompressed(c.compression) && header.IsCompressed() { + if page, err = c.decompress(pageData, size); err != nil { + return nil, fmt.Errorf("decompressing data page v2: %w", err) + } + defer page.unref() + pageData = page.data.Slice() + } + + numValues -= int(header.NumNulls()) + return c.decodeDataPage(header, numValues, repetitionLevels, definitionLevels, page, pageData, dict) +} + +func (c *Column) decodeDataPage(header DataPageHeader, numValues int, repetitionLevels, definitionLevels, page *buffer[byte], data []byte, dict Dictionary) (Page, error) { + pageEncoding := LookupEncoding(header.Encoding()) + pageType := c.Type() + pageKind := pageType.Kind() + + if isDictionaryEncoding(pageEncoding) { + // In some legacy configurations, the PLAIN_DICTIONARY encoding is used + // on data page headers to indicate that the page contains indexes into + // the dictionary page, but the page is still encoded using the RLE + // encoding in this case, so we convert it to RLE_DICTIONARY. + pageEncoding = &RLEDictionary + pageType = indexedPageType{newIndexedType(pageType, dict)} + } + + var obuf *buffer[uint32] + var vbuf *buffer[byte] + var pageOffsets []uint32 + var pageValues []byte + switch { + case pageEncoding.CanDecodeInPlace(): + vbuf = page + pageValues = data + default: + vbuf = buffers.get(pageType.EstimateDecodeSize(numValues, data, pageEncoding)) + defer vbuf.unref() + pageValues = vbuf.data.Slice() + } + + // Page offsets not needed when dictionary-encoded + if pageKind == ByteArray && !isDictionaryEncoding(pageEncoding) { + obuf = offsets.get(numValues + 1) + defer obuf.unref() + pageOffsets = obuf.data.Slice() + } + + values := pageType.NewValues(pageValues, pageOffsets) + values, err := pageType.Decode(values, data, pageEncoding) + if err != nil { + return nil, err + } + + newPage := pageType.NewPage(c.Index(), numValues, values) + switch { + case c.maxRepetitionLevel > 0: + newPage = newRepeatedPage( + newPage, + c.maxRepetitionLevel, + c.maxDefinitionLevel, + repetitionLevels.data.Slice(), + definitionLevels.data.Slice(), + ) + case c.maxDefinitionLevel > 0: + newPage = newOptionalPage( + newPage, + c.maxDefinitionLevel, + definitionLevels.data.Slice(), + ) + } + + return newBufferedPage(newPage, obuf, vbuf, definitionLevels, repetitionLevels), nil +} + +func decodeLevelsV1(enc encoding.Encoding, numValues int, data []byte) (*buffer[byte], []byte, error) { + if len(data) < 4 { + return nil, data, io.ErrUnexpectedEOF + } + i := 4 + j := 4 + int(binary.LittleEndian.Uint32(data)) + if j > len(data) { + return nil, data, io.ErrUnexpectedEOF + } + levels, err := decodeLevels(enc, numValues, data[i:j]) + return levels, data[j:], err +} + +func decodeLevelsV2(enc encoding.Encoding, numValues int, data []byte, length int64) (*buffer[byte], []byte, error) { + levels, err := decodeLevels(enc, numValues, data[:length]) + return levels, data[length:], err +} + +func decodeLevels(enc encoding.Encoding, numValues int, data []byte) (levels *buffer[byte], err error) { + levels = buffers.get(numValues) + decoded, err := enc.DecodeLevels(levels.data.Slice(), data) + if err != nil { + levels.unref() + levels = nil + } else { + levels.data.Resize(0) + levels.data.Append(decoded...) + switch { + case levels.data.Len() < numValues: + err = fmt.Errorf("decoding level expected %d values but got only %d", numValues, levels.data.Len()) + case levels.data.Len() > numValues: + levels.data.Resize(numValues) + } + } + return levels, err +} + +func skipLevelsV2(data []byte, length int64) ([]byte, error) { + if length >= int64(len(data)) { + return data, io.ErrUnexpectedEOF + } + return data[length:], nil +} + +// DecodeDictionary decodes a data page from the header and compressed data +// passed as arguments. +func (c *Column) DecodeDictionary(header DictionaryPageHeader, page []byte) (Dictionary, error) { + return c.decodeDictionary(header, newBuffer(page), -1) +} + +func (c *Column) decodeDictionary(header DictionaryPageHeader, page *buffer[byte], size int32) (Dictionary, error) { + pageData := page.data.Slice() + + if isCompressed(c.compression) { + var err error + if page, err = c.decompress(pageData, size); err != nil { + return nil, fmt.Errorf("decompressing dictionary page: %w", err) + } + defer page.unref() + pageData = page.data.Slice() + } + + pageType := c.Type() + pageEncoding := header.Encoding() + if pageEncoding == format.PlainDictionary { + pageEncoding = format.Plain + } + + // Dictionaries always have PLAIN encoding, so we need to allocate offsets for the decoded page. + numValues := int(header.NumValues()) + dictBufferSize := pageType.EstimateDecodeSize(numValues, pageData, LookupEncoding(pageEncoding)) + values := pageType.NewValues(make([]byte, 0, dictBufferSize), make([]uint32, 0, numValues)) + values, err := pageType.Decode(values, pageData, LookupEncoding(pageEncoding)) + if err != nil { + return nil, err + } + return pageType.NewDictionary(int(c.index), numValues, values), nil +} + +var _ Node = (*Column)(nil) diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer.go b/vendor/github.com/parquet-go/parquet-go/column_buffer.go new file mode 100644 index 00000000000..9fd7b433eaa --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer.go @@ -0,0 +1,150 @@ +package parquet + +import ( + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/sparse" +) + +// ColumnBuffer is an interface representing columns of a row group. +// +// ColumnBuffer implements sort.Interface as a way to support reordering the +// rows that have been written to it. +// +// The current implementation has a limitation which prevents applications from +// providing custom versions of this interface because it contains unexported +// methods. The only way to create ColumnBuffer values is to call the +// NewColumnBuffer of Type instances. This limitation may be lifted in future +// releases. +type ColumnBuffer interface { + // Exposes a read-only view of the column buffer. + ColumnChunk + + // The column implements ValueReaderAt as a mechanism to read values at + // specific locations within the buffer. + ValueReaderAt + + // The column implements ValueWriter as a mechanism to optimize the copy + // of values into the buffer in contexts where the row information is + // provided by the values because the repetition and definition levels + // are set. + ValueWriter + + // For indexed columns, returns the underlying dictionary holding the column + // values. If the column is not indexed, nil is returned. + Dictionary() Dictionary + + // Returns a copy of the column. The returned copy shares no memory with + // the original, mutations of either column will not modify the other. + Clone() ColumnBuffer + + // Returns the column as a Page. + Page() Page + + // Clears all rows written to the column. + Reset() + + // Returns the current capacity of the column (rows). + Cap() int + + // Returns the number of rows currently written to the column. + Len() int + + // Compares rows at index i and j and reports whether i < j. + Less(i, j int) bool + + // Swaps rows at index i and j. + Swap(i, j int) + + // Returns the size of the column buffer in bytes. + Size() int64 + + // This method is employed to write rows from arrays of Go values into the + // column buffer. The method is currently unexported because it uses unsafe + // APIs which would be difficult for applications to leverage, increasing + // the risk of introducing bugs in the code. As a consequence, applications + // cannot use custom implementations of the ColumnBuffer interface since + // they cannot declare an unexported method that would match this signature. + // It means that in order to create a ColumnBuffer value, programs need to + // go through a call to NewColumnBuffer on a Type instance. We make this + // trade off for now as it is preferrable to optimize for safety over + // extensibility in the public APIs, we might revisit in the future if we + // learn about valid use cases for custom column buffer types. + writeValues(levels columnLevels, rows sparse.Array) + + // Parquet primitive type write methods. Each column buffer implementation + // supports only the Parquet types it can handle and panics for others. + // These methods are unexported for the same reasons as writeValues above. + writeBoolean(levels columnLevels, value bool) + writeInt32(levels columnLevels, value int32) + writeInt64(levels columnLevels, value int64) + writeInt96(levels columnLevels, value deprecated.Int96) + writeFloat(levels columnLevels, value float32) + writeDouble(levels columnLevels, value float64) + writeByteArray(levels columnLevels, value []byte) + writeNull(levels columnLevels) +} + +func columnIndexOfNullable(base ColumnBuffer, maxDefinitionLevel byte, definitionLevels []byte) (ColumnIndex, error) { + index, err := base.ColumnIndex() + if err != nil { + return nil, err + } + return &nullableColumnIndex{ + ColumnIndex: index, + maxDefinitionLevel: maxDefinitionLevel, + definitionLevels: definitionLevels, + }, nil +} + +type nullableColumnIndex struct { + ColumnIndex + maxDefinitionLevel byte + definitionLevels []byte +} + +func (index *nullableColumnIndex) NullPage(i int) bool { + return index.NullCount(i) == int64(len(index.definitionLevels)) +} + +func (index *nullableColumnIndex) NullCount(i int) int64 { + return int64(countLevelsNotEqual(index.definitionLevels, index.maxDefinitionLevel)) +} + +type nullOrdering func(column ColumnBuffer, i, j int, maxDefinitionLevel, definitionLevel1, definitionLevel2 byte) bool + +func nullsGoFirst(column ColumnBuffer, i, j int, maxDefinitionLevel, definitionLevel1, definitionLevel2 byte) bool { + if definitionLevel1 != maxDefinitionLevel { + return definitionLevel2 == maxDefinitionLevel + } else { + return definitionLevel2 == maxDefinitionLevel && column.Less(i, j) + } +} + +func nullsGoLast(column ColumnBuffer, i, j int, maxDefinitionLevel, definitionLevel1, definitionLevel2 byte) bool { + return definitionLevel1 == maxDefinitionLevel && (definitionLevel2 != maxDefinitionLevel || column.Less(i, j)) +} + +// reversedColumnBuffer is an adapter of ColumnBuffer which inverses the order +// in which rows are ordered when the column gets sorted. +// +// This type is used when buffers are constructed with sorting columns ordering +// values in descending order. +type reversedColumnBuffer struct{ ColumnBuffer } + +func (col *reversedColumnBuffer) Less(i, j int) bool { return col.ColumnBuffer.Less(j, i) } + +var ( + _ ColumnBuffer = (*optionalColumnBuffer)(nil) + _ ColumnBuffer = (*repeatedColumnBuffer)(nil) + _ ColumnBuffer = (*booleanColumnBuffer)(nil) + _ ColumnBuffer = (*int32ColumnBuffer)(nil) + _ ColumnBuffer = (*int64ColumnBuffer)(nil) + _ ColumnBuffer = (*int96ColumnBuffer)(nil) + _ ColumnBuffer = (*floatColumnBuffer)(nil) + _ ColumnBuffer = (*doubleColumnBuffer)(nil) + _ ColumnBuffer = (*byteArrayColumnBuffer)(nil) + _ ColumnBuffer = (*fixedLenByteArrayColumnBuffer)(nil) + _ ColumnBuffer = (*uint32ColumnBuffer)(nil) + _ ColumnBuffer = (*uint64ColumnBuffer)(nil) + _ ColumnBuffer = (*be128ColumnBuffer)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.go new file mode 100644 index 00000000000..8f7e91f654e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.go @@ -0,0 +1,30 @@ +//go:build !purego + +package parquet + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/internal/bytealg" + "github.com/parquet-go/parquet-go/sparse" + "golang.org/x/sys/cpu" +) + +func broadcastValueInt32(dst []int32, src int8) { + bytealg.Broadcast(unsafecast.Slice[byte](dst), byte(src)) +} + +//go:noescape +func broadcastRangeInt32AVX2(dst []int32, base int32) + +func broadcastRangeInt32(dst []int32, base int32) { + if len(dst) >= 8 && cpu.X86.HasAVX2 { + broadcastRangeInt32AVX2(dst, base) + } else { + for i := range dst { + dst[i] = base + int32(i) + } + } +} + +//go:noescape +func writePointersBE128(values [][16]byte, rows sparse.Array) diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.s b/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.s new file mode 100644 index 00000000000..1eef03d1d22 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_amd64.s @@ -0,0 +1,67 @@ +//go:build !purego + +#include "textflag.h" + +// func broadcastRangeInt32AVX2(dst []int32, base int32) +TEXT ·broadcastRangeInt32AVX2(SB), NOSPLIT, $0-28 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVL base+24(FP), CX + XORQ SI, SI + + CMPQ BX, $8 + JB test1x4 + + VMOVDQU ·range0n8(SB), Y0 // [0,1,2,3,4,5,6,7] + VPBROADCASTD ·range0n8+32(SB), Y1 // [8,8,8,8,8,8,8,8] + VPBROADCASTD base+24(FP), Y2 // [base...] + VPADDD Y2, Y0, Y0 // [base,base+1,...] + + MOVQ BX, DI + SHRQ $3, DI + SHLQ $3, DI + JMP test8x4 +loop8x4: + VMOVDQU Y0, (AX)(SI*4) + VPADDD Y1, Y0, Y0 + ADDQ $8, SI +test8x4: + CMPQ SI, DI + JNE loop8x4 + VZEROUPPER + JMP test1x4 + +loop1x4: + INCQ SI + MOVL CX, DX + IMULL SI, DX + MOVL DX, -4(AX)(SI*4) +test1x4: + CMPQ SI, BX + JNE loop1x4 + RET + +// func writePointersBE128(values [][16]byte, rows sparse.Array) +TEXT ·writePointersBE128(SB), NOSPLIT, $0-48 + MOVQ values_base+0(FP), AX + MOVQ rows_array_ptr+24(FP), BX + MOVQ rows_array_len+32(FP), CX + MOVQ rows_array_off+40(FP), DX + + XORQ SI, SI + JMP test +loop: + PXOR X0, X0 + MOVQ (BX), DI // *[16]byte + CMPQ DI, $0 + JE next + MOVOU (DI), X0 +next: + MOVOU X0, (AX) + ADDQ $16, AX + ADDQ DX, BX + INCQ SI +test: + CMPQ SI, CX + JNE loop + RET diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_be128.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_be128.go new file mode 100644 index 00000000000..da7ee864dc4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_be128.go @@ -0,0 +1,157 @@ +package parquet + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "slices" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/sparse" +) + +type be128ColumnBuffer struct{ be128Page } + +func newBE128ColumnBuffer(typ Type, columnIndex int16, numValues int32) *be128ColumnBuffer { + return &be128ColumnBuffer{ + be128Page: be128Page{ + typ: typ, + values: make([][16]byte, 0, numValues), + columnIndex: ^columnIndex, + }, + } +} + +func (col *be128ColumnBuffer) Clone() ColumnBuffer { + return &be128ColumnBuffer{ + be128Page: be128Page{ + typ: col.typ, + values: slices.Clone(col.values), + columnIndex: col.columnIndex, + }, + } +} + +func (col *be128ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return be128ColumnIndex{&col.be128Page}, nil +} + +func (col *be128ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return be128OffsetIndex{&col.be128Page}, nil +} + +func (col *be128ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *be128ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *be128ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *be128ColumnBuffer) Page() Page { return &col.be128Page } + +func (col *be128ColumnBuffer) Reset() { col.values = col.values[:0] } + +func (col *be128ColumnBuffer) Cap() int { return cap(col.values) } + +func (col *be128ColumnBuffer) Len() int { return len(col.values) } + +func (col *be128ColumnBuffer) Less(i, j int) bool { + return lessBE128(&col.values[i], &col.values[j]) +} + +func (col *be128ColumnBuffer) Swap(i, j int) { + col.values[i], col.values[j] = col.values[j], col.values[i] +} + +func (col *be128ColumnBuffer) WriteValues(values []Value) (int, error) { + if n := len(col.values) + len(values); n > cap(col.values) { + col.values = append(make([][16]byte, 0, max(n, 2*cap(col.values))), col.values...) + } + n := len(col.values) + col.values = col.values[:n+len(values)] + newValues := col.values[n:] + for i, v := range values { + copy(newValues[i][:], v.byteArray()) + } + return len(values), nil +} + +func (col *be128ColumnBuffer) writeValues(_ columnLevels, rows sparse.Array) { + if n := len(col.values) + rows.Len(); n > cap(col.values) { + col.values = append(make([][16]byte, 0, max(n, 2*cap(col.values))), col.values...) + } + n := len(col.values) + col.values = col.values[:n+rows.Len()] + sparse.GatherUint128(col.values[n:], rows.Uint128Array()) +} + +func (col *be128ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var be128Value [16]byte + if value { + be128Value[15] = 1 + } + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeInt32(levels columnLevels, value int32) { + var be128Value [16]byte + binary.BigEndian.PutUint32(be128Value[12:16], uint32(value)) + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeInt64(levels columnLevels, value int64) { + var be128Value [16]byte + binary.BigEndian.PutUint64(be128Value[8:16], uint64(value)) + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + var be128Value [16]byte + binary.BigEndian.PutUint32(be128Value[4:8], value[2]) + binary.BigEndian.PutUint32(be128Value[8:12], value[1]) + binary.BigEndian.PutUint32(be128Value[12:16], value[0]) + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeFloat(levels columnLevels, value float32) { + var be128Value [16]byte + binary.BigEndian.PutUint32(be128Value[12:16], math.Float32bits(value)) + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeDouble(levels columnLevels, value float64) { + var be128Value [16]byte + binary.BigEndian.PutUint64(be128Value[8:16], math.Float64bits(value)) + col.values = append(col.values, be128Value) +} + +func (col *be128ColumnBuffer) writeByteArray(_ columnLevels, value []byte) { + if len(value) != 16 { + panic(fmt.Sprintf("cannot write %d bytes to [16]byte column", len(value))) + } + col.values = append(col.values, [16]byte(value)) +} + +func (col *be128ColumnBuffer) writeNull(levels columnLevels) { + col.values = append(col.values, [16]byte{}) +} + +func (col *be128ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(col.values))) + case i >= len(col.values): + return 0, io.EOF + default: + for n < len(values) && i < len(col.values) { + values[n] = col.makeValue(&col.values[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_boolean.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_boolean.go new file mode 100644 index 00000000000..33e06c53e09 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_boolean.go @@ -0,0 +1,221 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type booleanColumnBuffer struct{ booleanPage } + +func newBooleanColumnBuffer(typ Type, columnIndex int16, numValues int32) *booleanColumnBuffer { + // Boolean values are bit-packed, we can fit up to 8 values per byte. + bufferSize := (numValues + 7) / 8 + return &booleanColumnBuffer{ + booleanPage: booleanPage{ + typ: typ, + bits: memory.SliceBufferFor[byte](int(bufferSize)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *booleanColumnBuffer) Clone() ColumnBuffer { + return &booleanColumnBuffer{ + booleanPage: booleanPage{ + typ: col.typ, + bits: col.bits.Clone(), + offset: col.offset, + numValues: col.numValues, + columnIndex: col.columnIndex, + }, + } +} + +func (col *booleanColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return booleanColumnIndex{&col.booleanPage}, nil +} + +func (col *booleanColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return booleanOffsetIndex{&col.booleanPage}, nil +} + +func (col *booleanColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *booleanColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *booleanColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *booleanColumnBuffer) Page() Page { return &col.booleanPage } + +func (col *booleanColumnBuffer) Reset() { + col.bits.Reset() + col.offset = 0 + col.numValues = 0 +} + +func (col *booleanColumnBuffer) Cap() int { return 8 * col.bits.Cap() } + +func (col *booleanColumnBuffer) Len() int { return int(col.numValues) } + +func (col *booleanColumnBuffer) Less(i, j int) bool { + a := col.valueAt(i) + b := col.valueAt(j) + return a != b && !a +} + +func (col *booleanColumnBuffer) valueAt(i int) bool { + bits := col.bits.Slice() + j := uint32(i) / 8 + k := uint32(i) % 8 + return ((bits[j] >> k) & 1) != 0 +} + +func (col *booleanColumnBuffer) setValueAt(i int, v bool) { + // `offset` is always zero in the page of a column buffer + bits := col.bits.Slice() + j := uint32(i) / 8 + k := uint32(i) % 8 + x := byte(0) + if v { + x = 1 + } + bits[j] = (bits[j] & ^(1 << k)) | (x << k) +} + +func (col *booleanColumnBuffer) Swap(i, j int) { + a := col.valueAt(i) + b := col.valueAt(j) + col.setValueAt(i, b) + col.setValueAt(j, a) +} + +func (col *booleanColumnBuffer) WriteBooleans(values []bool) (int, error) { + col.writeValues(columnLevels{}, sparse.MakeBoolArray(values).UnsafeArray()) + return len(values), nil +} + +func (col *booleanColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfBool)) + return len(values), nil +} + +func (col *booleanColumnBuffer) writeValues(_ columnLevels, rows sparse.Array) { + numBytes := bitpack.ByteCount(uint(col.numValues) + uint(rows.Len())) + if col.bits.Cap() < numBytes { + col.bits.Grow(numBytes - col.bits.Len()) + } + col.bits.Resize(numBytes) + bits := col.bits.Slice() + i := 0 + r := 8 - (int(col.numValues) % 8) + bytes := rows.Uint8Array() + + if r <= bytes.Len() { + // First we attempt to write enough bits to align the number of values + // in the column buffer on 8 bytes. After this step the next bit should + // be written at the zero'th index of a byte of the buffer. + if r < 8 { + var b byte + for i < r { + v := bytes.Index(i) + b |= (v & 1) << uint(i) + i++ + } + x := uint(col.numValues) / 8 + y := uint(col.numValues) % 8 + bits[x] = (b << y) | (bits[x] & ^(0xFF << y)) + col.numValues += int32(i) + } + + if n := ((bytes.Len() - i) / 8) * 8; n > 0 { + // At this stage, we know that that we have at least 8 bits to write + // and the bits will be aligned on the address of a byte in the + // output buffer. We can work on 8 values per loop iteration, + // packing them into a single byte and writing it to the output + // buffer. This effectively reduces by 87.5% the number of memory + // stores that the program needs to perform to generate the values. + i += sparse.GatherBits(bits[col.numValues/8:], bytes.Slice(i, i+n)) + col.numValues += int32(n) + } + } + + for i < bytes.Len() { + x := uint(col.numValues) / 8 + y := uint(col.numValues) % 8 + b := bytes.Index(i) + bits[x] = ((b & 1) << y) | (bits[x] & ^(1 << y)) + col.numValues++ + i++ + } + + col.bits.Resize(bitpack.ByteCount(uint(col.numValues))) +} + +func (col *booleanColumnBuffer) writeBoolean(levels columnLevels, value bool) { + numBytes := bitpack.ByteCount(uint(col.numValues) + 1) + if col.bits.Cap() < numBytes { + col.bits.Grow(numBytes - col.bits.Len()) + } + col.bits.Resize(numBytes) + bits := col.bits.Slice() + x := uint(col.numValues) / 8 + y := uint(col.numValues) % 8 + bit := byte(0) + if value { + bit = 1 + } + bits[x] = (bit << y) | (bits[x] & ^(1 << y)) + col.numValues++ +} + +func (col *booleanColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.writeBoolean(levels, value != 0) +} + +func (col *booleanColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.writeBoolean(levels, value != 0) +} + +func (col *booleanColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.writeBoolean(levels, !value.IsZero()) +} + +func (col *booleanColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.writeBoolean(levels, value != 0) +} + +func (col *booleanColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.writeBoolean(levels, value != 0) +} + +func (col *booleanColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + col.writeBoolean(levels, len(value) != 0) +} + +func (col *booleanColumnBuffer) writeNull(levels columnLevels) { + col.writeBoolean(levels, false) +} + +func (col *booleanColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(col.numValues)) + case i >= int(col.numValues): + return 0, io.EOF + default: + for n < len(values) && i < int(col.numValues) { + values[n] = col.makeValue(col.valueAt(i)) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_byte_array.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_byte_array.go new file mode 100644 index 00000000000..64b9e16907c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_byte_array.go @@ -0,0 +1,271 @@ +package parquet + +import ( + "bytes" + "io" + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type byteArrayColumnBuffer struct { + byteArrayPage + lengths memory.SliceBuffer[uint32] + scratch memory.SliceBuffer[byte] +} + +func newByteArrayColumnBuffer(typ Type, columnIndex int16, numValues int32) *byteArrayColumnBuffer { + return &byteArrayColumnBuffer{ + byteArrayPage: byteArrayPage{ + typ: typ, + offsets: memory.SliceBufferFor[uint32](int(numValues)), + columnIndex: ^columnIndex, + }, + lengths: memory.SliceBufferFor[uint32](int(numValues)), + } +} + +func (col *byteArrayColumnBuffer) Clone() ColumnBuffer { + return &byteArrayColumnBuffer{ + byteArrayPage: byteArrayPage{ + typ: col.typ, + values: col.cloneValues(), + offsets: col.cloneOffsets(), + columnIndex: col.columnIndex, + }, + lengths: col.cloneLengths(), + } +} + +func (col *byteArrayColumnBuffer) cloneLengths() memory.SliceBuffer[uint32] { + return col.lengths.Clone() +} + +func (col *byteArrayColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return byteArrayColumnIndex{col.page()}, nil +} + +func (col *byteArrayColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return byteArrayOffsetIndex{col.page()}, nil +} + +func (col *byteArrayColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *byteArrayColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *byteArrayColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *byteArrayColumnBuffer) page() *byteArrayPage { + lengths := col.lengths.Slice() + offsets := col.offsets.Slice() + + if len(lengths) > 0 && orderOfUint32(offsets) < 1 { // unordered? + if col.scratch.Cap() < col.values.Len() { + col.scratch.Grow(col.values.Len()) + } + col.scratch.Resize(0) + + for i := range lengths { + n := col.scratch.Len() + col.scratch.Append(col.index(i)...) + offsets[i] = uint32(n) + } + + col.values, col.scratch = col.scratch, col.values + } + col.offsets.Resize(len(lengths)) + col.offsets.AppendValue(uint32(col.values.Len())) + return &col.byteArrayPage +} + +func (col *byteArrayColumnBuffer) Page() Page { + return col.page() +} + +func (col *byteArrayColumnBuffer) Reset() { + col.values.Reset() + col.offsets.Reset() + col.lengths.Reset() +} + +func (col *byteArrayColumnBuffer) NumRows() int64 { return int64(col.Len()) } + +func (col *byteArrayColumnBuffer) NumValues() int64 { return int64(col.Len()) } + +func (col *byteArrayColumnBuffer) Cap() int { return col.lengths.Cap() } + +func (col *byteArrayColumnBuffer) Len() int { return col.lengths.Len() } + +func (col *byteArrayColumnBuffer) Less(i, j int) bool { + return bytes.Compare(col.index(i), col.index(j)) < 0 +} + +func (col *byteArrayColumnBuffer) Swap(i, j int) { + col.offsets.Swap(i, j) + col.lengths.Swap(i, j) +} + +func (col *byteArrayColumnBuffer) Write(b []byte) (int, error) { + _, n, err := col.writeByteArrays(b) + return n, err +} + +func (col *byteArrayColumnBuffer) WriteByteArrays(values []byte) (int, error) { + n, _, err := col.writeByteArrays(values) + return n, err +} + +func (col *byteArrayColumnBuffer) writeByteArrays(values []byte) (count, bytes int, err error) { + baseCount := col.lengths.Len() + baseBytes := col.values.Len() + (plain.ByteArrayLengthSize * col.lengths.Len()) + + err = plain.RangeByteArray(values, func(value []byte) error { + col.offsets.AppendValue(uint32(col.values.Len())) + col.lengths.AppendValue(uint32(len(value))) + col.values.Append(value...) + return nil + }) + + count = col.lengths.Len() - baseCount + bytes = (col.values.Len() - baseBytes) + (plain.ByteArrayLengthSize * count) + return count, bytes, err +} + +func (col *byteArrayColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfPtr)) + return len(values), nil +} + +func (col *byteArrayColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + n := rows.Len() + if n == 0 { + return + } + + stringArray := rows.StringArray() + totalBytes := 0 + for i := range n { + totalBytes += len(stringArray.Index(i)) + } + + offsetsStart := col.offsets.Len() + lengthsStart := col.lengths.Len() + valuesStart := col.values.Len() + + col.offsets.Resize(offsetsStart + n) + col.lengths.Resize(lengthsStart + n) + col.values.Resize(valuesStart + totalBytes) + + offsets := col.offsets.Slice()[:offsetsStart+n] + lengths := col.lengths.Slice()[:lengthsStart+n] + values := col.values.Slice()[:valuesStart+totalBytes] + + valueOffset := valuesStart + for i := range n { + s := stringArray.Index(i) + offsets[offsetsStart+i] = uint32(valueOffset) + lengths[lengthsStart+i] = uint32(len(s)) + copy(values[valueOffset:], s) + valueOffset += len(s) + } +} + +func (col *byteArrayColumnBuffer) writeBoolean(levels columnLevels, value bool) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + return strconv.AppendBool(b, value) + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeInt32(levels columnLevels, value int32) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + return strconv.AppendInt(b, int64(value), 10) + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeInt64(levels columnLevels, value int64) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + return strconv.AppendInt(b, value, 10) + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + result, _ := value.Int().AppendText(b) + return result + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeFloat(levels columnLevels, value float32) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + return strconv.AppendFloat(b, float64(value), 'g', -1, 32) + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeDouble(levels columnLevels, value float64) { + offset := col.values.Len() + col.values.AppendFunc(func(b []byte) []byte { + return strconv.AppendFloat(b, value, 'g', -1, 64) + }) + col.offsets.AppendValue(uint32(offset)) + col.lengths.AppendValue(uint32(col.values.Len() - offset)) +} + +func (col *byteArrayColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + col.offsets.AppendValue(uint32(col.values.Len())) + col.lengths.AppendValue(uint32(len(value))) + col.values.Append(value...) +} + +func (col *byteArrayColumnBuffer) writeNull(levels columnLevels) { + col.offsets.AppendValue(uint32(col.values.Len())) + col.lengths.AppendValue(0) +} + +func (col *byteArrayColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + numLengths := col.lengths.Len() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(numLengths)) + case i >= numLengths: + return 0, io.EOF + default: + for n < len(values) && i < numLengths { + values[n] = col.makeValueBytes(col.index(i)) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} + +func (col *byteArrayColumnBuffer) index(i int) []byte { + offsets := col.offsets.Slice() + lengths := col.lengths.Slice() + values := col.values.Slice() + offset := offsets[i] + length := lengths[i] + end := offset + length + return values[offset:end:end] +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_double.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_double.go new file mode 100644 index 00000000000..a11c0adf59a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_double.go @@ -0,0 +1,145 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type doubleColumnBuffer struct{ doublePage } + +func newDoubleColumnBuffer(typ Type, columnIndex int16, numValues int32) *doubleColumnBuffer { + return &doubleColumnBuffer{ + doublePage: doublePage{ + typ: typ, + values: memory.SliceBufferFor[float64](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *doubleColumnBuffer) Clone() ColumnBuffer { + return &doubleColumnBuffer{ + doublePage: doublePage{ + typ: col.typ, + values: col.values.Clone(), + columnIndex: col.columnIndex, + }, + } +} + +func (col *doubleColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return doubleColumnIndex{&col.doublePage}, nil +} + +func (col *doubleColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return doubleOffsetIndex{&col.doublePage}, nil +} + +func (col *doubleColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *doubleColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *doubleColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *doubleColumnBuffer) Page() Page { return &col.doublePage } + +func (col *doubleColumnBuffer) Reset() { col.values.Reset() } + +func (col *doubleColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *doubleColumnBuffer) Len() int { return col.values.Len() } + +func (col *doubleColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *doubleColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *doubleColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 8) != 0 { + return 0, fmt.Errorf("cannot write DOUBLE values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[float64](b)...) + return len(b), nil +} + +func (col *doubleColumnBuffer) WriteDoubles(values []float64) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *doubleColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU64)) + return len(values), nil +} + +func (col *doubleColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherFloat64(col.values.Slice()[offset:], rows.Float64Array()) +} + +func (col *doubleColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var uintValue float64 + if value { + uintValue = 1 + } + col.values.AppendValue(uintValue) +} + +func (col *doubleColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(float64(value)) +} + +func (col *doubleColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(float64(value)) +} + +func (col *doubleColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(float64(value.Int32())) +} + +func (col *doubleColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(float64(value)) +} + +func (col *doubleColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(float64(value)) +} + +func (col *doubleColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + floatValue, err := strconv.ParseFloat(unsafecast.String(value), 64) + if err != nil { + panic("cannot write byte array to double column: " + err.Error()) + } + col.values.AppendValue(floatValue) +} + +func (col *doubleColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *doubleColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_fixed_len_byte_array.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_fixed_len_byte_array.go new file mode 100644 index 00000000000..ea6bb406a0c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_fixed_len_byte_array.go @@ -0,0 +1,218 @@ +package parquet + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "unsafe" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type fixedLenByteArrayColumnBuffer struct { + fixedLenByteArrayPage + tmp []byte + buf [32]byte +} + +func newFixedLenByteArrayColumnBuffer(typ Type, columnIndex int16, numValues int32) *fixedLenByteArrayColumnBuffer { + size := typ.Length() + col := &fixedLenByteArrayColumnBuffer{ + fixedLenByteArrayPage: fixedLenByteArrayPage{ + typ: typ, + size: size, + data: memory.SliceBufferFor[byte](int(numValues) * size), + columnIndex: ^columnIndex, + }, + } + if size <= len(col.buf) { + col.tmp = col.buf[:size] + } else { + col.tmp = make([]byte, size) + } + return col +} + +func (col *fixedLenByteArrayColumnBuffer) Clone() ColumnBuffer { + return &fixedLenByteArrayColumnBuffer{ + fixedLenByteArrayPage: fixedLenByteArrayPage{ + typ: col.typ, + size: col.size, + data: col.data.Clone(), + columnIndex: col.columnIndex, + }, + tmp: make([]byte, col.size), + } +} + +func (col *fixedLenByteArrayColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return fixedLenByteArrayColumnIndex{&col.fixedLenByteArrayPage}, nil +} + +func (col *fixedLenByteArrayColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return fixedLenByteArrayOffsetIndex{&col.fixedLenByteArrayPage}, nil +} + +func (col *fixedLenByteArrayColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *fixedLenByteArrayColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *fixedLenByteArrayColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *fixedLenByteArrayColumnBuffer) Page() Page { return &col.fixedLenByteArrayPage } + +func (col *fixedLenByteArrayColumnBuffer) Reset() { col.data.Reset() } + +func (col *fixedLenByteArrayColumnBuffer) Cap() int { return col.data.Cap() / col.size } + +func (col *fixedLenByteArrayColumnBuffer) Len() int { return col.data.Len() / col.size } + +func (col *fixedLenByteArrayColumnBuffer) Less(i, j int) bool { + return bytes.Compare(col.index(i), col.index(j)) < 0 +} + +func (col *fixedLenByteArrayColumnBuffer) Swap(i, j int) { + t, u, v := col.tmp[:col.size], col.index(i), col.index(j) + copy(t, u) + copy(u, v) + copy(v, t) +} + +func (col *fixedLenByteArrayColumnBuffer) index(i int) []byte { + data := col.data.Slice() + j := (i + 0) * col.size + k := (i + 1) * col.size + return data[j:k:k] +} + +func (col *fixedLenByteArrayColumnBuffer) Write(b []byte) (int, error) { + n, err := col.WriteFixedLenByteArrays(b) + return n * col.size, err +} + +func (col *fixedLenByteArrayColumnBuffer) WriteFixedLenByteArrays(values []byte) (int, error) { + if len(values) == 0 { + return 0, nil + } + d, m := len(values)/col.size, len(values)%col.size + if d == 0 || m != 0 { + return 0, fmt.Errorf("cannot write FIXED_LEN_BYTE_ARRAY values of size %d from input of size %d", col.size, len(values)) + } + col.data.Append(values...) + return d, nil +} + +func (col *fixedLenByteArrayColumnBuffer) WriteValues(values []Value) (int, error) { + for i, v := range values { + if n := len(v.byteArray()); n != col.size { + return i, fmt.Errorf("cannot write FIXED_LEN_BYTE_ARRAY values of size %d from input of size %d", col.size, n) + } + col.data.Append(v.byteArray()...) + } + return len(values), nil +} + +func (col *fixedLenByteArrayColumnBuffer) writeValues(_ columnLevels, rows sparse.Array) { + n := col.size * rows.Len() + i := col.data.Len() + j := col.data.Len() + n + + if col.data.Cap() < j { + col.data.Grow(j - col.data.Len()) + } + + col.data.Resize(j) + data := col.data.Slice() + newData := data[i:] + + for i := range rows.Len() { + p := rows.Index(i) + copy(newData[i*col.size:], unsafe.Slice((*byte)(p), col.size)) + } +} + +func (col *fixedLenByteArrayColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var fixedLenByteArrayValue [1]byte + if value { + fixedLenByteArrayValue[0] = 1 + } + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeInt32(levels columnLevels, value int32) { + var fixedLenByteArrayValue [4]byte + binary.BigEndian.PutUint32(fixedLenByteArrayValue[:], uint32(value)) + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeInt64(levels columnLevels, value int64) { + var fixedLenByteArrayValue [8]byte + binary.BigEndian.PutUint64(fixedLenByteArrayValue[:], uint64(value)) + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + var fixedLenByteArrayValue [12]byte + binary.BigEndian.PutUint32(fixedLenByteArrayValue[0:4], value[2]) + binary.BigEndian.PutUint32(fixedLenByteArrayValue[4:8], value[1]) + binary.BigEndian.PutUint32(fixedLenByteArrayValue[8:12], value[0]) + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeFloat(levels columnLevels, value float32) { + var fixedLenByteArrayValue [4]byte + binary.BigEndian.PutUint32(fixedLenByteArrayValue[:], math.Float32bits(value)) + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeDouble(levels columnLevels, value float64) { + var fixedLenByteArrayValue [8]byte + binary.BigEndian.PutUint64(fixedLenByteArrayValue[:], math.Float64bits(value)) + col.writeBigEndian(fixedLenByteArrayValue[:]) +} + +func (col *fixedLenByteArrayColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + if col.size != len(value) { + panic(fmt.Sprintf("cannot write byte array of length %d to fixed length byte array column of size %d", len(value), col.size)) + } + col.data.Append(value...) +} + +func (col *fixedLenByteArrayColumnBuffer) writeNull(levels columnLevels) { + clear(col.tmp) + col.data.Append(col.tmp...) +} + +func (col *fixedLenByteArrayColumnBuffer) writeBigEndian(value []byte) { + if col.size < len(value) { + panic(fmt.Sprintf("cannot write byte array of length %d to fixed length byte array column of size %d", len(value), col.size)) + } + clear(col.tmp) + copy(col.tmp[col.size-len(value):], value) + col.data.Append(col.tmp...) +} + +func (col *fixedLenByteArrayColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + data := col.data.Slice() + i := int(offset) * col.size + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(data)/col.size)) + case i >= len(data): + return 0, io.EOF + default: + for n < len(values) && i < len(data) { + values[n] = col.makeValueBytes(data[i : i+col.size]) + n++ + i += col.size + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_float.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_float.go new file mode 100644 index 00000000000..4baa9fc45a6 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_float.go @@ -0,0 +1,145 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type floatColumnBuffer struct{ floatPage } + +func newFloatColumnBuffer(typ Type, columnIndex int16, numValues int32) *floatColumnBuffer { + return &floatColumnBuffer{ + floatPage: floatPage{ + typ: typ, + values: memory.SliceBufferFor[float32](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *floatColumnBuffer) Clone() ColumnBuffer { + return &floatColumnBuffer{ + floatPage: floatPage{ + typ: col.typ, + values: col.values.Clone(), + columnIndex: col.columnIndex, + }, + } +} + +func (col *floatColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return floatColumnIndex{&col.floatPage}, nil +} + +func (col *floatColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return floatOffsetIndex{&col.floatPage}, nil +} + +func (col *floatColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *floatColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *floatColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *floatColumnBuffer) Page() Page { return &col.floatPage } + +func (col *floatColumnBuffer) Reset() { col.values.Reset() } + +func (col *floatColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *floatColumnBuffer) Len() int { return col.values.Len() } + +func (col *floatColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *floatColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *floatColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 4) != 0 { + return 0, fmt.Errorf("cannot write FLOAT values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[float32](b)...) + return len(b), nil +} + +func (col *floatColumnBuffer) WriteFloats(values []float32) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *floatColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU32)) + return len(values), nil +} + +func (col *floatColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherFloat32(col.values.Slice()[offset:], rows.Float32Array()) +} + +func (col *floatColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var uintValue float32 + if value { + uintValue = 1 + } + col.values.AppendValue(uintValue) +} + +func (col *floatColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(float32(value)) +} + +func (col *floatColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(float32(value)) +} + +func (col *floatColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(float32(value.Int32())) +} + +func (col *floatColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(float32(value)) +} + +func (col *floatColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(float32(value)) +} + +func (col *floatColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + floatValue, err := strconv.ParseFloat(unsafecast.String(value), 32) + if err != nil { + panic("cannot write byte array to float column: " + err.Error()) + } + col.values.AppendValue(float32(floatValue)) +} + +func (col *floatColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *floatColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_int32.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_int32.go new file mode 100644 index 00000000000..3fc7ff9e8dc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_int32.go @@ -0,0 +1,145 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type int32ColumnBuffer struct{ int32Page } + +func newInt32ColumnBuffer(typ Type, columnIndex int16, numValues int32) *int32ColumnBuffer { + return &int32ColumnBuffer{ + int32Page: int32Page{ + typ: typ, + values: memory.SliceBufferFor[int32](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *int32ColumnBuffer) Clone() ColumnBuffer { + return &int32ColumnBuffer{ + int32Page: int32Page{ + typ: col.typ, + values: col.values.Clone(), + columnIndex: col.columnIndex, + }, + } +} + +func (col *int32ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return int32ColumnIndex{&col.int32Page}, nil +} + +func (col *int32ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return int32OffsetIndex{&col.int32Page}, nil +} + +func (col *int32ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *int32ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *int32ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *int32ColumnBuffer) Page() Page { return &col.int32Page } + +func (col *int32ColumnBuffer) Reset() { col.values.Reset() } + +func (col *int32ColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *int32ColumnBuffer) Len() int { return col.values.Len() } + +func (col *int32ColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *int32ColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *int32ColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 4) != 0 { + return 0, fmt.Errorf("cannot write INT32 values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[int32](b)...) + return len(b), nil +} + +func (col *int32ColumnBuffer) WriteInt32s(values []int32) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *int32ColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU32)) + return len(values), nil +} + +func (col *int32ColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherInt32(col.values.Slice()[offset:], rows.Int32Array()) +} + +func (col *int32ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var intValue int32 + if value { + intValue = 1 + } + col.values.AppendValue(intValue) +} + +func (col *int32ColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(value) +} + +func (col *int32ColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(int32(value)) +} + +func (col *int32ColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(value.Int32()) +} + +func (col *int32ColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(int32(value)) +} + +func (col *int32ColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(int32(value)) +} + +func (col *int32ColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + intValue, err := strconv.ParseInt(unsafecast.String(value), 10, 32) + if err != nil { + panic("cannot write byte array to int32 column: " + err.Error()) + } + col.values.AppendValue(int32(intValue)) +} + +func (col *int32ColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *int32ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_int64.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_int64.go new file mode 100644 index 00000000000..73c75afeac6 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_int64.go @@ -0,0 +1,146 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type int64ColumnBuffer struct{ int64Page } + +func newInt64ColumnBuffer(typ Type, columnIndex int16, numValues int32) *int64ColumnBuffer { + return &int64ColumnBuffer{ + int64Page: int64Page{ + typ: typ, + values: memory.SliceBufferFor[int64](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *int64ColumnBuffer) Clone() ColumnBuffer { + cloned := &int64ColumnBuffer{ + int64Page: int64Page{ + typ: col.typ, + columnIndex: col.columnIndex, + }, + } + cloned.values.Append(col.values.Slice()...) + return cloned +} + +func (col *int64ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return int64ColumnIndex{&col.int64Page}, nil +} + +func (col *int64ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return int64OffsetIndex{&col.int64Page}, nil +} + +func (col *int64ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *int64ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *int64ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *int64ColumnBuffer) Page() Page { return &col.int64Page } + +func (col *int64ColumnBuffer) Reset() { col.values.Reset() } + +func (col *int64ColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *int64ColumnBuffer) Len() int { return col.values.Len() } + +func (col *int64ColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *int64ColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *int64ColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 8) != 0 { + return 0, fmt.Errorf("cannot write INT64 values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[int64](b)...) + return len(b), nil +} + +func (col *int64ColumnBuffer) WriteInt64s(values []int64) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *int64ColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU64)) + return len(values), nil +} + +func (col *int64ColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherInt64(col.values.Slice()[offset:], rows.Int64Array()) +} + +func (col *int64ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var intValue int64 + if value { + intValue = 1 + } + col.values.AppendValue(intValue) +} + +func (col *int64ColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(int64(value)) +} + +func (col *int64ColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(value) +} + +func (col *int64ColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(value.Int64()) +} + +func (col *int64ColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(int64(value)) +} + +func (col *int64ColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(int64(value)) +} + +func (col *int64ColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + intValue, err := strconv.ParseInt(unsafecast.String(value), 10, 64) + if err != nil { + panic("cannot write byte array to int64 column: " + err.Error()) + } + col.values.AppendValue(intValue) +} + +func (col *int64ColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *int64ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_int96.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_int96.go new file mode 100644 index 00000000000..f539294d7e3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_int96.go @@ -0,0 +1,149 @@ +package parquet + +import ( + "fmt" + "io" + "math/big" + "slices" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/sparse" +) + +type int96ColumnBuffer struct{ int96Page } + +func newInt96ColumnBuffer(typ Type, columnIndex int16, numValues int32) *int96ColumnBuffer { + return &int96ColumnBuffer{ + int96Page: int96Page{ + typ: typ, + values: make([]deprecated.Int96, 0, numValues), + columnIndex: ^columnIndex, + }, + } +} + +func (col *int96ColumnBuffer) Clone() ColumnBuffer { + return &int96ColumnBuffer{ + int96Page: int96Page{ + typ: col.typ, + values: slices.Clone(col.values), + columnIndex: col.columnIndex, + }, + } +} + +func (col *int96ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return int96ColumnIndex{&col.int96Page}, nil +} + +func (col *int96ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return int96OffsetIndex{&col.int96Page}, nil +} + +func (col *int96ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *int96ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *int96ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *int96ColumnBuffer) Page() Page { return &col.int96Page } + +func (col *int96ColumnBuffer) Reset() { col.values = col.values[:0] } + +func (col *int96ColumnBuffer) Cap() int { return cap(col.values) } + +func (col *int96ColumnBuffer) Len() int { return len(col.values) } + +func (col *int96ColumnBuffer) Less(i, j int) bool { return col.values[i].Less(col.values[j]) } + +func (col *int96ColumnBuffer) Swap(i, j int) { + col.values[i], col.values[j] = col.values[j], col.values[i] +} + +func (col *int96ColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 12) != 0 { + return 0, fmt.Errorf("cannot write INT96 values from input of size %d", len(b)) + } + col.values = append(col.values, unsafecast.Slice[deprecated.Int96](b)...) + return len(b), nil +} + +func (col *int96ColumnBuffer) WriteInt96s(values []deprecated.Int96) (int, error) { + col.values = append(col.values, values...) + return len(values), nil +} + +func (col *int96ColumnBuffer) WriteValues(values []Value) (int, error) { + for _, v := range values { + col.values = append(col.values, v.Int96()) + } + return len(values), nil +} + +func (col *int96ColumnBuffer) writeValues(_ columnLevels, rows sparse.Array) { + for i := range rows.Len() { + p := rows.Index(i) + col.values = append(col.values, *(*deprecated.Int96)(p)) + } +} + +func (col *int96ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + if value { + col.writeInt96(levels, deprecated.Int96{1, 0, 0}) + } else { + col.writeInt96(levels, deprecated.Int96{0, 0, 0}) + } +} + +func (col *int96ColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.writeInt96(levels, deprecated.Int32ToInt96(value)) +} + +func (col *int96ColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.writeInt96(levels, deprecated.Int64ToInt96(value)) +} + +func (col *int96ColumnBuffer) writeInt96(_ columnLevels, value deprecated.Int96) { + col.values = append(col.values, value) +} + +func (col *int96ColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.writeInt96(levels, deprecated.Int64ToInt96(int64(value))) +} + +func (col *int96ColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.writeInt96(levels, deprecated.Int64ToInt96(int64(value))) +} + +func (col *int96ColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + v, ok := new(big.Int).SetString(string(value), 10) + if !ok || v == nil { + panic("invalid byte array for int96: cannot parse") + } + col.writeInt96(levels, deprecated.Int64ToInt96(v.Int64())) +} + +func (col *int96ColumnBuffer) writeNull(_ columnLevels) { + panic("cannot write null to int96 column") +} + +func (col *int96ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(col.values))) + case i >= len(col.values): + return 0, io.EOF + default: + for n < len(values) && i < len(col.values) { + values[n] = col.makeValue(col.values[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_json.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_json.go new file mode 100644 index 00000000000..efea249d312 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_json.go @@ -0,0 +1,226 @@ +package parquet + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/jsonlite" +) + +var jsonNull jsonlite.Value + +func init() { + v, _ := jsonlite.Parse("null") + jsonNull = *v +} + +func jsonParse(data []byte) (*jsonlite.Value, error) { + if len(data) == 0 { + return &jsonNull, nil + } + return jsonlite.Parse(unsafecast.String(data)) +} + +func writeJSONToLeaf(col ColumnBuffer, levels columnLevels, val *jsonlite.Value, node Node) { + typ := node.Type() + if typ.Kind() == ByteArray { + if logicalType := typ.LogicalType(); logicalType != nil && logicalType.Json != nil { + writeJSONToByteArray(col, levels, val, node) + return + } + } + switch val.Kind() { + case jsonlite.Null: + col.writeNull(levels) + case jsonlite.True, jsonlite.False: + col.writeBoolean(levels, val.Kind() == jsonlite.True) + case jsonlite.Number: + writeJSONNumber(col, levels, val.Number(), node) + case jsonlite.String: + writeJSONString(col, levels, val.String(), node) + default: + writeJSONToByteArray(col, levels, val, node) + } +} + +func writeJSONToByteArray(col ColumnBuffer, levels columnLevels, val *jsonlite.Value, node Node) { + if val.Kind() == jsonlite.Null && node.Optional() { + col.writeNull(levels) + } else { + col.writeByteArray(levels, unsafeByteArrayFromString(val.JSON())) + } +} + +func writeJSONToGroup(columns []ColumnBuffer, levels columnLevels, val *jsonlite.Value, node Node, writers []fieldWriter) { + if val.Kind() != jsonlite.Object { + for i := range writers { + w := &writers[i] + w.writeValue(columns, levels, reflect.Value{}) + } + return + } + + for i := range writers { + w := &writers[i] + f := val.Lookup(w.fieldName) + if f == nil { + w.writeValue(columns, levels, reflect.Value{}) + } else { + w.writeValue(columns, levels, reflect.ValueOf(f)) + } + } +} + +func writeJSONToRepeated(columns []ColumnBuffer, levels columnLevels, val *jsonlite.Value, elementWriter writeValueFunc) { + if val.Kind() == jsonlite.Array { + if val.Len() == 0 { + elementWriter(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + for elem := range val.Array { + elementWriter(columns, levels, reflect.ValueOf(elem)) + levels.repetitionLevel = levels.repetitionDepth + } + return + } + + // Auto-wrap scalar to single-element array + levels.repetitionDepth++ + levels.definitionLevel++ + elementWriter(columns, levels, reflect.ValueOf(val)) +} + +func writeJSONString(col ColumnBuffer, levels columnLevels, str string, node Node) { + typ := node.Type() + + if logicalType := typ.LogicalType(); logicalType != nil { + switch { + case logicalType.Timestamp != nil: + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(fmt.Errorf("cannot parse JSON string %q as timestamp: %w", str, err)) + } + writeTime(col, levels, t, node) + return + + case logicalType.Date != nil: + t, err := time.Parse("2006-01-02", str) + if err != nil { + panic(fmt.Errorf("cannot parse JSON string %q as date: %w", str, err)) + } + writeTime(col, levels, t, node) + return + + case logicalType.Time != nil: + t, err := time.Parse("15:04:05.000000000", str) + if err != nil { + panic(fmt.Errorf("cannot parse JSON string %q as time: %w", str, err)) + } + d := time.Duration(t.Hour())*time.Hour + + time.Duration(t.Minute())*time.Minute + + time.Duration(t.Second())*time.Second + + time.Duration(t.Nanosecond())*time.Nanosecond + writeDuration(col, levels, d, node) + return + + case logicalType.UUID != nil: + // Only parse UUID strings when writing to binary UUID columns + // (FIXED_LEN_BYTE_ARRAY with 16 bytes). If writing to a STRING + // column with UUID logical type, write the string as-is. + writeUUID(col, levels, str, typ) + return + } + } + + col.writeByteArray(levels, unsafeByteArrayFromString(str)) +} + +func writeJSONNumber(col ColumnBuffer, levels columnLevels, num json.Number, node Node) { + typ := node.Type() + str := num.String() + + if logicalType := typ.LogicalType(); logicalType != nil { + switch { + case logicalType.Timestamp != nil: + // Interpret number as seconds since Unix epoch (with sub-second precision) + f, err := num.Float64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to float64 for timestamp: %w", num, err)) + } + sec, frac := math.Modf(f) + t := time.Unix(int64(sec), int64(frac*1e9)).UTC() + writeTime(col, levels, t, node) + return + + case logicalType.Date != nil: + // Interpret number as seconds since Unix epoch + i, err := num.Int64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to int64 for date: %w", num, err)) + } + t := time.Unix(i, 0).UTC() + writeTime(col, levels, t, node) + return + + case logicalType.Time != nil: + // Interpret number as seconds since midnight + f, err := num.Float64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to float64 for time: %w", num, err)) + } + d := time.Duration(f * float64(time.Second)) + writeDuration(col, levels, d, node) + return + } + } + + switch kind := typ.Kind(); kind { + case Boolean: + f, err := num.Float64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to float64 for boolean: %w", num, err)) + } + col.writeBoolean(levels, f != 0) + + case Int32, Int64: + switch jsonlite.NumberTypeOf(str) { + case jsonlite.Int: + i, err := num.Int64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to int: %w", num, err)) + } + col.writeInt64(levels, i) + case jsonlite.Uint: + u, err := strconv.ParseUint(str, 10, 64) + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to int: %w", num, err)) + } + col.writeInt64(levels, int64(u)) + case jsonlite.Float: + f, err := num.Float64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to float: %w", num, err)) + } + col.writeInt64(levels, int64(f)) + } + + case Float, Double: + f, err := num.Float64() + if err != nil { + panic(fmt.Errorf("cannot convert json.Number %q to float64: %w", num, err)) + } + col.writeDouble(levels, f) + + default: + col.writeByteArray(levels, unsafeByteArrayFromString(str)) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_optional.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_optional.go new file mode 100644 index 00000000000..0fbaed79e13 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_optional.go @@ -0,0 +1,367 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +// optionalColumnBuffer is an implementation of the ColumnBuffer interface used +// as a wrapper to an underlying ColumnBuffer to manage the creation of +// definition levels. +// +// Null values are not written to the underlying column; instead, the buffer +// tracks offsets of row values in the column, null row values are represented +// by the value -1 and a definition level less than the max. +// +// This column buffer type is used for all leaf columns that have a non-zero +// max definition level and a zero repetition level, which may be because the +// column or one of its parent(s) are marked optional. +type optionalColumnBuffer struct { + base ColumnBuffer + reordered bool + maxDefinitionLevel byte + rows memory.SliceBuffer[int32] + sortIndex memory.SliceBuffer[int32] + definitionLevels memory.SliceBuffer[byte] + nullOrdering nullOrdering +} + +func newOptionalColumnBuffer(base ColumnBuffer, maxDefinitionLevel byte, nullOrdering nullOrdering) *optionalColumnBuffer { + return &optionalColumnBuffer{ + base: base, + maxDefinitionLevel: maxDefinitionLevel, + nullOrdering: nullOrdering, + } +} + +func (col *optionalColumnBuffer) Clone() ColumnBuffer { + return &optionalColumnBuffer{ + base: col.base.Clone(), + reordered: col.reordered, + maxDefinitionLevel: col.maxDefinitionLevel, + rows: col.rows.Clone(), + definitionLevels: col.definitionLevels.Clone(), + nullOrdering: col.nullOrdering, + } +} + +func (col *optionalColumnBuffer) Type() Type { + return col.base.Type() +} + +func (col *optionalColumnBuffer) NumValues() int64 { + return int64(col.definitionLevels.Len()) +} + +func (col *optionalColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return columnIndexOfNullable(col.base, col.maxDefinitionLevel, col.definitionLevels.Slice()) +} + +func (col *optionalColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return col.base.OffsetIndex() +} + +func (col *optionalColumnBuffer) BloomFilter() BloomFilter { + return col.base.BloomFilter() +} + +func (col *optionalColumnBuffer) Dictionary() Dictionary { + return col.base.Dictionary() +} + +func (col *optionalColumnBuffer) Column() int { + return col.base.Column() +} + +func (col *optionalColumnBuffer) Pages() Pages { + return onePage(col.Page()) +} + +func (col *optionalColumnBuffer) Page() Page { + // No need for any cyclic sorting if the rows have not been reordered. + // This case is also important because the cyclic sorting modifies the + // buffer which makes it unsafe to read the buffer concurrently. + if col.reordered { + numNulls := countLevelsNotEqual(col.definitionLevels.Slice(), col.maxDefinitionLevel) + numValues := col.rows.Len() - numNulls + + if numValues > 0 { + if col.sortIndex.Cap() < numValues { + col.sortIndex = memory.SliceBufferFor[int32](numValues) + } + col.sortIndex.Resize(numValues) + sortIndex := col.sortIndex.Slice() + rows := col.rows.Slice() + i := 0 + for _, j := range rows { + if j >= 0 { + sortIndex[j] = int32(i) + i++ + } + } + + // Cyclic sort: O(N) + for i := range sortIndex { + for j := int(sortIndex[i]); i != j; j = int(sortIndex[i]) { + col.base.Swap(i, j) + sortIndex[i], sortIndex[j] = sortIndex[j], sortIndex[i] + } + } + } + + rows := col.rows.Slice() + i := 0 + for _, r := range rows { + if r >= 0 { + rows[i] = int32(i) + i++ + } + } + + col.reordered = false + } + + return newOptionalPage(col.base.Page(), col.maxDefinitionLevel, col.definitionLevels.Slice()) +} + +func (col *optionalColumnBuffer) Reset() { + col.base.Reset() + col.rows.Resize(0) + col.definitionLevels.Resize(0) +} + +func (col *optionalColumnBuffer) Size() int64 { + return int64(4*col.rows.Len()+4*col.sortIndex.Len()+col.definitionLevels.Len()) + col.base.Size() +} + +func (col *optionalColumnBuffer) Cap() int { return col.rows.Cap() } + +func (col *optionalColumnBuffer) Len() int { return col.rows.Len() } + +func (col *optionalColumnBuffer) Less(i, j int) bool { + rows := col.rows.Slice() + definitionLevels := col.definitionLevels.Slice() + return col.nullOrdering( + col.base, + int(rows[i]), + int(rows[j]), + col.maxDefinitionLevel, + definitionLevels[i], + definitionLevels[j], + ) +} + +func (col *optionalColumnBuffer) Swap(i, j int) { + // Because the underlying column does not contain null values, we cannot + // swap its values at indexes i and j. We swap the row indexes only, then + // reorder the underlying buffer using a cyclic sort when the buffer is + // materialized into a page view. + col.reordered = true + rows := col.rows.Slice() + definitionLevels := col.definitionLevels.Slice() + rows[i], rows[j] = rows[j], rows[i] + definitionLevels[i], definitionLevels[j] = definitionLevels[j], definitionLevels[i] +} + +func (col *optionalColumnBuffer) WriteValues(values []Value) (n int, err error) { + rowIndex := int32(col.base.Len()) + + for n < len(values) { + // Collect index range of contiguous null values, from i to n. If this + // for loop exhausts the values, all remaining if statements and for + // loops will be no-ops and the loop will terminate. + i := n + for n < len(values) && values[n].definitionLevel != col.maxDefinitionLevel { + n++ + } + + // Write the contiguous null values up until the first non-null value + // obtained in the for loop above. + for _, v := range values[i:n] { + col.rows.AppendValue(-1) + col.definitionLevels.AppendValue(v.definitionLevel) + } + + // Collect index range of contiguous non-null values, from i to n. + i = n + for n < len(values) && values[n].definitionLevel == col.maxDefinitionLevel { + n++ + } + + // As long as i < n we have non-null values still to write. It is + // possible that we just exhausted the input values in which case i == n + // and the outer for loop will terminate. + if i < n { + count, err := col.base.WriteValues(values[i:n]) + + for range count { + col.definitionLevels.AppendValue(col.maxDefinitionLevel) + } + + for count > 0 { + col.rows.AppendValue(rowIndex) + rowIndex++ + count-- + } + + if err != nil { + return n, err + } + } + } + return n, nil +} + +func (col *optionalColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + // The row count is zero when writing an null optional value, in which case + // we still need to output a row to the buffer to record the definition + // level. + if rows.Len() == 0 { + col.definitionLevels.AppendValue(levels.definitionLevel) + col.rows.AppendValue(-1) + return + } + + baseLen := col.base.Len() + for range rows.Len() { + col.definitionLevels.AppendValue(levels.definitionLevel) + } + + i := col.rows.Len() + j := col.rows.Len() + rows.Len() + + if j <= col.rows.Cap() { + col.rows.Resize(j) + } else { + col.rows.Grow(j - col.rows.Len()) + col.rows.Resize(j) + } + + rowsSlice := col.rows.Slice() + if levels.definitionLevel != col.maxDefinitionLevel { + broadcastValueInt32(rowsSlice[i:], -1) + } else { + broadcastRangeInt32(rowsSlice[i:], int32(baseLen)) + col.base.writeValues(levels, rows) + } +} + +func (col *optionalColumnBuffer) writeBoolean(levels columnLevels, value bool) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeBoolean(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeInt32(levels columnLevels, value int32) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeInt32(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeInt64(levels columnLevels, value int64) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeInt64(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeInt96(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeFloat(levels columnLevels, value float32) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeFloat(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeDouble(levels columnLevels, value float64) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeDouble(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + if levels.definitionLevel != col.maxDefinitionLevel { + col.writeNull(levels) + } else { + col.base.writeByteArray(levels, value) + col.writeLevel() + } +} + +func (col *optionalColumnBuffer) writeNull(levels columnLevels) { + col.definitionLevels.AppendValue(levels.definitionLevel) + col.rows.AppendValue(-1) +} + +func (col *optionalColumnBuffer) writeLevel() { + col.definitionLevels.AppendValue(col.maxDefinitionLevel) + col.rows.AppendValue(int32(col.base.Len() - 1)) +} + +func (col *optionalColumnBuffer) ReadValuesAt(values []Value, offset int64) (int, error) { + definitionLevels := col.definitionLevels.Slice() + length := int64(len(definitionLevels)) + if offset < 0 { + return 0, errRowIndexOutOfBounds(offset, length) + } + if offset >= length { + return 0, io.EOF + } + if length -= offset; length < int64(len(values)) { + values = values[:length] + } + + numNulls1 := int64(countLevelsNotEqual(definitionLevels[:offset], col.maxDefinitionLevel)) + numNulls2 := int64(countLevelsNotEqual(definitionLevels[offset:offset+length], col.maxDefinitionLevel)) + + if numNulls2 < length { + n, err := col.base.ReadValuesAt(values[:length-numNulls2], offset-numNulls1) + if err != nil { + return n, err + } + } + + if numNulls2 > 0 { + columnIndex := ^int16(col.Column()) + i := numNulls2 - 1 + j := length - 1 + definitionLevelsSlice := definitionLevels[offset : offset+length] + maxDefinitionLevel := col.maxDefinitionLevel + + for n := len(definitionLevelsSlice) - 1; n >= 0 && j > i; n-- { + if definitionLevelsSlice[n] != maxDefinitionLevel { + values[j] = Value{definitionLevel: definitionLevelsSlice[n], columnIndex: columnIndex} + } else { + values[j] = values[i] + i-- + } + j-- + } + } + + return int(length), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_proto.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto.go new file mode 100644 index 00000000000..31bb7f1147c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto.go @@ -0,0 +1,206 @@ +package parquet + +import ( + "fmt" + "slices" + "strconv" + "strings" + "time" + + "github.com/parquet-go/jsonlite" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func writeProtoTimestamp(col ColumnBuffer, levels columnLevels, ts *timestamppb.Timestamp, node Node) { + if ts == nil { + col.writeNull(levels) + return + } + var typ = node.Type() + var unit format.TimeUnit + if lt := typ.LogicalType(); lt != nil && lt.Timestamp != nil { + unit = lt.Timestamp.Unit + } else { + unit = Nanosecond.TimeUnit() + } + var t = ts.AsTime() + var value int64 + switch { + case unit.Millis != nil: + value = t.UnixMilli() + case unit.Micros != nil: + value = t.UnixMicro() + default: + value = t.UnixNano() + } + switch kind := typ.Kind(); kind { + case Int32, Int64: + col.writeInt64(levels, value) + case Float, Double: + col.writeDouble(levels, t.Sub(time.Unix(0, 0)).Seconds()) + case ByteArray: + col.writeByteArray(levels, t.AppendFormat(nil, time.RFC3339Nano)) + default: + panic(fmt.Sprintf("unsupported physical type for timestamp: %v", kind)) + } +} + +func writeProtoDuration(col ColumnBuffer, levels columnLevels, dur *durationpb.Duration, node Node) { + if dur == nil { + col.writeNull(levels) + return + } + d := dur.AsDuration() + switch kind := node.Type().Kind(); kind { + case Int32, Int64: + col.writeInt64(levels, d.Nanoseconds()) + case Float, Double: + col.writeDouble(levels, d.Seconds()) + case ByteArray: + col.writeByteArray(levels, unsafeByteArrayFromString(d.String())) + default: + panic(fmt.Sprintf("unsupported physical type for duration: %v", kind)) + } +} + +func writeProtoStruct(col ColumnBuffer, levels columnLevels, s *structpb.Struct, node Node) { + b := memory.SliceBuffer[byte]{} + b.Grow(2 * proto.Size(s)) + writeProtoStructJSON(&b, s) + col.writeByteArray(levels, b.Slice()) + b.Reset() +} + +func writeProtoList(col ColumnBuffer, levels columnLevels, l *structpb.ListValue, node Node) { + b := memory.SliceBuffer[byte]{} + b.Grow(2 * proto.Size(l)) + writeProtoListValueJSON(&b, l) + col.writeByteArray(levels, b.Slice()) + b.Reset() +} + +func writeProtoStructJSON(b *memory.SliceBuffer[byte], s *structpb.Struct) { + if s == nil { + b.Append('n', 'u', 'l', 'l') + return + } + + fields := s.GetFields() + if len(fields) == 0 { + b.Append('{', '}') + return + } + + keys := make([]string, 0, 20) + for key := range fields { + keys = append(keys, key) + } + slices.Sort(keys) + + b.AppendValue('{') + for i, key := range keys { + if i > 0 { + b.AppendValue(',') + } + b.AppendFunc(func(buf []byte) []byte { + return jsonlite.AppendQuote(buf, key) + }) + b.AppendValue(':') + writeProtoValueJSON(b, fields[key]) + } + b.AppendValue('}') +} + +func writeProtoValueJSON(b *memory.SliceBuffer[byte], v *structpb.Value) { + switch k := v.GetKind().(type) { + case *structpb.Value_StringValue: + b.AppendFunc(func(buf []byte) []byte { + return jsonlite.AppendQuote(buf, k.StringValue) + }) + case *structpb.Value_BoolValue: + b.AppendFunc(func(buf []byte) []byte { + return strconv.AppendBool(buf, k.BoolValue) + }) + case *structpb.Value_NumberValue: + b.AppendFunc(func(buf []byte) []byte { + return strconv.AppendFloat(buf, k.NumberValue, 'g', -1, 64) + }) + case *structpb.Value_StructValue: + writeProtoStructJSON(b, k.StructValue) + case *structpb.Value_ListValue: + writeProtoListValueJSON(b, k.ListValue) + default: + b.Append('n', 'u', 'l', 'l') + } +} + +func writeProtoListValueJSON(b *memory.SliceBuffer[byte], l *structpb.ListValue) { + if l == nil { + b.Append('n', 'u', 'l', 'l') + return + } + values := l.GetValues() + b.AppendValue('[') + for i, v := range values { + if i > 0 { + b.AppendValue(',') + } + writeProtoValueJSON(b, v) + } + b.AppendValue(']') +} + +func writeProtoAny(col ColumnBuffer, levels columnLevels, a *anypb.Any, node Node) { + if a == nil { + col.writeNull(levels) + return + } + data, err := proto.Marshal(a) + if err != nil { + panic(fmt.Sprintf("failed to marshal anypb.Any: %v", err)) + } + col.writeByteArray(levels, data) +} + +// makeNestedMap creates a nested map structure from a dot-separated path. +// For example, "testproto.ProtoPayload" with value v creates: +// map["testproto"] = map["ProtoPayload"] = v +func makeNestedMap(path string, value any) any { + components := make([]string, 0, 8) + for component := range strings.SplitSeq(path, ".") { + components = append(components, component) + } + + result := value + for i := len(components) - 1; i >= 0; i-- { + result = map[string]any{ + components[i]: result, + } + } + return result +} + +// navigateToNestedGroup walks through a nested group structure following the given path. +// The path is expected to be a dot-separated string (e.g., "testproto.ProtoPayload"). +// Returns the node at the end of the path, or panics if the path doesn't match the schema. +func navigateToNestedGroup(node Node, path string) Node { + for component := range strings.SplitSeq(path, ".") { + var found bool + for _, field := range node.Fields() { + if field.Name() == component { + node, found = field, true + break + } + } + if !found { + panic(fmt.Sprintf("field %q not found in schema while navigating path %q", component, path)) + } + } + return node +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_any.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_any.go new file mode 100644 index 00000000000..0c166448826 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_any.go @@ -0,0 +1,60 @@ +//go:build !purego + +package parquet + +import ( + "fmt" + "reflect" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/anypb" +) + +func writeProtoAnyToGroup(msg *anypb.Any, columns []ColumnBuffer, levels columnLevels, writers []fieldWriter, node Node, value *reflect.Value) bool { + if msg == nil { + for i := range writers { + w := &writers[i] + w.writeValue(columns, levels, reflect.Value{}) + } + return true + } + + typeURL := msg.GetTypeUrl() + const prefix = "type.googleapis.com/" + if !strings.HasPrefix(typeURL, prefix) { + panic(fmt.Sprintf("invalid type_url %q: expected %q prefix", typeURL, prefix)) + } + path := typeURL[len(prefix):] + _ = navigateToNestedGroup(node, path) + + unmarshaled, err := msg.UnmarshalNew() + if err != nil { + panic(fmt.Sprintf("failed to unmarshal Any: %v", err)) + } + + *value = reflect.ValueOf(makeNestedMap(path, unmarshaled)) + return false +} + +func writeProtoMessageToGroup(msg proto.Message, columns []ColumnBuffer, levels columnLevels, writers []fieldWriter) { + protoMsg := msg.ProtoReflect() + descriptor := protoMsg.Descriptor() + for i := range writers { + w := &writers[i] + protoField := descriptor.Fields().ByName(protoreflect.Name(w.fieldName)) + if protoField == nil || !protoMsg.Has(protoField) { + w.writeValue(columns, levels, reflect.Value{}) + continue + } + protoValue := protoMsg.Get(protoField) + var fieldValue reflect.Value + if protoField.Kind() == protoreflect.MessageKind { + fieldValue = reflect.ValueOf(protoValue.Message().Interface()) + } else { + fieldValue = reflect.ValueOf(protoValue.Interface()) + } + w.writeValue(columns, levels, fieldValue) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_purego.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_purego.go new file mode 100644 index 00000000000..58e35ab64cc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_proto_purego.go @@ -0,0 +1,84 @@ +//go:build purego + +package parquet + +import ( + "fmt" + "iter" + "reflect" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func writeProtoAnyToGroup(msg *anypb.Any, columns []ColumnBuffer, levels columnLevels, writers []fieldWriter, node Node, value *reflect.Value) bool { + if msg == nil { + for i := range writers { + w := &writers[i] + w.writeValue(columns, levels, reflect.Value{}) + } + return true + } + + typeURL := msg.GetTypeUrl() + const prefix = "type.googleapis.com/" + if !strings.HasPrefix(typeURL, prefix) { + panic(fmt.Sprintf("invalid type_url %q: expected %q prefix", typeURL, prefix)) + } + path := typeURL[len(prefix):] + _ = navigateToNestedGroup(node, path) + + unmarshaled, err := msg.UnmarshalNew() + if err != nil { + panic(fmt.Sprintf("failed to unmarshal Any: %v", err)) + } + + *value = reflect.ValueOf(makeNestedMap(path, unmarshaled)) + return false +} + +func writeProtoMessageToGroup(msg proto.Message, columns []ColumnBuffer, levels columnLevels, writers []fieldWriter) { + msgValue := reflect.ValueOf(msg) + if msgValue.Kind() == reflect.Ptr { + msgValue = msgValue.Elem() + } + for i := range writers { + w := &writers[i] + fieldValue := findFieldByProtoName(msgValue, w.fieldName) + w.writeValue(columns, levels, fieldValue) + } +} + +func findFieldByProtoName(structValue reflect.Value, protoName string) reflect.Value { + structType := structValue.Type() + for i := range structType.NumField() { + f := structType.Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" && tag != "-" { + if name := parseProtoNameFromTag(tag); name == protoName { + return structValue.Field(i) + } + } + } + return reflect.Value{} +} + +func parseProtoNameFromTag(tag string) string { + for name, value := range parseProtoStructTag(tag) { + if name == "name" { + return value + } + } + return "" +} + +func parseProtoStructTag(tag string) iter.Seq2[string, string] { + return func(yield func(string, string) bool) { + for part := range strings.SplitSeq(tag, ",") { + name, value, _ := strings.Cut(part, "=") + if !yield(name, value) { + return + } + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_purego.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_purego.go new file mode 100644 index 00000000000..6f9996f1cb3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_purego.go @@ -0,0 +1,30 @@ +//go:build !amd64 || purego + +package parquet + +import "github.com/parquet-go/parquet-go/sparse" + +func broadcastValueInt32(dst []int32, src int8) { + value := 0x01010101 * int32(src) + for i := range dst { + dst[i] = value + } +} + +func broadcastRangeInt32(dst []int32, base int32) { + for i := range dst { + dst[i] = base + int32(i) + } +} + +func writePointersBE128(values [][16]byte, rows sparse.Array) { + for i := range values { + p := *(**[16]byte)(rows.Index(i)) + + if p != nil { + values[i] = *p + } else { + values[i] = [16]byte{} + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_reflect.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_reflect.go new file mode 100644 index 00000000000..02a3614ad12 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_reflect.go @@ -0,0 +1,897 @@ +package parquet + +import ( + "cmp" + "encoding/json" + "fmt" + "maps" + "math/bits" + "reflect" + "sort" + "strings" + "sync/atomic" + "time" + "unsafe" + + "github.com/google/uuid" + "github.com/parquet-go/jsonlite" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// isNullValue determines if a reflect.Value represents a null value for parquet encoding. +// This handles various types that can represent null including: +// - Invalid reflect values +// - Nil pointers/interfaces/slices/maps +// - json.RawMessage containing "null" +// - *jsonlite.Value with Kind == jsonlite.Null +// - nil *structpb.Struct, *structpb.ListValue, *structpb.Value +// - Zero values for value types +func isNullValue(value reflect.Value) bool { + if !value.IsValid() { + return true + } + + switch value.Kind() { + case reflect.Pointer, reflect.Interface: + if value.IsNil() { + return true + } + switch v := value.Interface().(type) { + case *jsonlite.Value: + return v.Kind() == jsonlite.Null + case *structpb.Value: + _, isNull := v.GetKind().(*structpb.Value_NullValue) + return isNull + } + return false + + case reflect.Slice: + if value.IsNil() { + return true + } + if value.Type() == reflect.TypeFor[json.RawMessage]() { + return string(value.Bytes()) == "null" + } + return false + + case reflect.Map: + return value.IsNil() + + default: + return value.IsZero() + } +} + +type anymap interface { + entries() (keys, values sparse.Array) +} + +type fieldWriter struct { + fieldName string + writeValue writeValueFunc +} + +type gomap[K cmp.Ordered] struct { + keys []K + vals reflect.Value // slice + swap func(int, int) + size uintptr +} + +func (m *gomap[K]) Len() int { return len(m.keys) } + +func (m *gomap[K]) Less(i, j int) bool { return cmp.Compare(m.keys[i], m.keys[j]) < 0 } + +func (m *gomap[K]) Swap(i, j int) { + m.keys[i], m.keys[j] = m.keys[j], m.keys[i] + m.swap(i, j) +} + +func (m *gomap[K]) entries() (keys, values sparse.Array) { + return makeArrayFromSlice(m.keys), makeArray(m.vals.UnsafePointer(), m.Len(), m.size) +} + +type reflectMap struct { + keys reflect.Value // slice + vals reflect.Value // slice + numKeys int + keySize uintptr + valSize uintptr +} + +func (m *reflectMap) entries() (keys, values sparse.Array) { + return makeArray(m.keys.UnsafePointer(), m.numKeys, m.keySize), makeArray(m.vals.UnsafePointer(), m.numKeys, m.valSize) +} + +func makeMapFuncOf(mapType reflect.Type) func(reflect.Value) anymap { + switch mapType.Key().Kind() { + case reflect.Int: + return makeMapFunc[int](mapType) + case reflect.Int8: + return makeMapFunc[int8](mapType) + case reflect.Int16: + return makeMapFunc[int16](mapType) + case reflect.Int32: + return makeMapFunc[int32](mapType) + case reflect.Int64: + return makeMapFunc[int64](mapType) + case reflect.Uint: + return makeMapFunc[uint](mapType) + case reflect.Uint8: + return makeMapFunc[uint8](mapType) + case reflect.Uint16: + return makeMapFunc[uint16](mapType) + case reflect.Uint32: + return makeMapFunc[uint32](mapType) + case reflect.Uint64: + return makeMapFunc[uint64](mapType) + case reflect.Uintptr: + return makeMapFunc[uintptr](mapType) + case reflect.Float32: + return makeMapFunc[float32](mapType) + case reflect.Float64: + return makeMapFunc[float64](mapType) + case reflect.String: + return makeMapFunc[string](mapType) + } + + keyType := mapType.Key() + valType := mapType.Elem() + + mapBuffer := &reflectMap{ + keySize: keyType.Size(), + valSize: valType.Size(), + } + + keySliceType := reflect.SliceOf(keyType) + valSliceType := reflect.SliceOf(valType) + return func(mapValue reflect.Value) anymap { + length := mapValue.Len() + + if !mapBuffer.keys.IsValid() || mapBuffer.keys.Len() < length { + capacity := 1 << bits.Len(uint(length)) + mapBuffer.keys = reflect.MakeSlice(keySliceType, capacity, capacity) + mapBuffer.vals = reflect.MakeSlice(valSliceType, capacity, capacity) + } + + mapBuffer.numKeys = length + for i, mapIter := 0, mapValue.MapRange(); mapIter.Next(); i++ { + mapBuffer.keys.Index(i).SetIterKey(mapIter) + mapBuffer.vals.Index(i).SetIterValue(mapIter) + } + + return mapBuffer + } +} + +func makeMapFunc[K cmp.Ordered](mapType reflect.Type) func(reflect.Value) anymap { + keyType := mapType.Key() + valType := mapType.Elem() + valSliceType := reflect.SliceOf(valType) + mapBuffer := &gomap[K]{size: valType.Size()} + return func(mapValue reflect.Value) anymap { + length := mapValue.Len() + + if cap(mapBuffer.keys) < length { + capacity := 1 << bits.Len(uint(length)) + mapBuffer.keys = make([]K, capacity) + mapBuffer.vals = reflect.MakeSlice(valSliceType, capacity, capacity) + mapBuffer.swap = reflect.Swapper(mapBuffer.vals.Interface()) + } + + mapBuffer.keys = mapBuffer.keys[:length] + for i, mapIter := 0, mapValue.MapRange(); mapIter.Next(); i++ { + reflect.NewAt(keyType, unsafe.Pointer(&mapBuffer.keys[i])).Elem().SetIterKey(mapIter) + mapBuffer.vals.Index(i).SetIterValue(mapIter) + } + + sort.Sort(mapBuffer) + return mapBuffer + } +} + +// writeValueFunc is a function that writes a single reflect.Value to a set of +// column buffers. +// Panics if the value cannot be written (similar to reflect package behavior). +type writeValueFunc func([]ColumnBuffer, columnLevels, reflect.Value) + +// timeOfDayNanos returns the nanoseconds since midnight for the given time. +func timeOfDayNanos(t time.Time) int64 { + m := nearestMidnightLessThan(t) + return t.Sub(m).Nanoseconds() +} + +func writeTime(col ColumnBuffer, levels columnLevels, t time.Time, node Node) { + typ := node.Type() + + if logicalType := typ.LogicalType(); logicalType != nil { + switch { + case logicalType.Timestamp != nil: + // TIMESTAMP logical type -> write to int64 + unit := logicalType.Timestamp.Unit + var val int64 + switch { + case unit.Millis != nil: + val = t.UnixMilli() + case unit.Micros != nil: + val = t.UnixMicro() + default: + val = t.UnixNano() + } + col.writeInt64(levels, val) + return + + case logicalType.Date != nil: + // DATE logical type -> write to int32 + col.writeInt32(levels, int32(daysSinceUnixEpoch(t))) + return + + case logicalType.Time != nil: + // TIME logical type -> write time of day + unit := logicalType.Time.Unit + nanos := timeOfDayNanos(t) + switch { + case unit.Millis != nil: + col.writeInt32(levels, int32(nanos/1e6)) + case unit.Micros != nil: + col.writeInt64(levels, nanos/1e3) + default: + col.writeInt64(levels, nanos) + } + return + } + } + + // No time logical type - use physical type + switch typ.Kind() { + case Int32: + // int32 without logical type -> days since epoch + col.writeInt32(levels, int32(daysSinceUnixEpoch(t))) + case Int64: + // int64 without logical type -> nanoseconds since epoch + col.writeInt64(levels, t.UnixNano()) + case Float: + // float -> fractional seconds since epoch + col.writeFloat(levels, float32(float64(t.UnixNano())/1e9)) + case Double: + // double -> fractional seconds since epoch + col.writeDouble(levels, float64(t.UnixNano())/1e9) + case ByteArray: + // byte array -> RFC3339Nano + s := t.Format(time.RFC3339Nano) + col.writeByteArray(levels, unsafe.Slice(unsafe.StringData(s), len(s))) + default: + panic(fmt.Sprintf("cannot write time.Time to column with physical type %v", typ)) + } +} + +func writeDuration(col ColumnBuffer, levels columnLevels, d time.Duration, node Node) { + typ := node.Type() + + if logicalType := typ.LogicalType(); logicalType != nil && logicalType.Time != nil { + // TIME logical type + unit := logicalType.Time.Unit + switch { + case unit.Millis != nil: + col.writeInt32(levels, int32(d.Milliseconds())) + case unit.Micros != nil: + col.writeInt64(levels, d.Microseconds()) + default: + col.writeInt64(levels, d.Nanoseconds()) + } + return + } + + // No TIME logical type - use physical type + switch typ.Kind() { + case Int32: + panic("cannot write time.Duration to int32 column without TIME logical type") + case Int64: + // int64 -> nanoseconds + col.writeInt64(levels, d.Nanoseconds()) + case Float: + // float -> seconds + col.writeFloat(levels, float32(d.Seconds())) + case Double: + // double -> seconds + col.writeDouble(levels, d.Seconds()) + case ByteArray: + // byte array -> String() + s := d.String() + col.writeByteArray(levels, unsafe.Slice(unsafe.StringData(s), len(s))) + default: + panic(fmt.Sprintf("cannot write time.Duration to column with physical type %v", typ)) + } +} + +// writeValueFuncOf constructs a function that writes reflect.Values to column buffers. +// It follows the deconstructFuncOf pattern, recursively building functions for the schema tree. +// Returns (nextColumnIndex, writeFunc). +func writeValueFuncOf(columnIndex int16, node Node) (int16, writeValueFunc) { + switch { + case node.Optional(): + return writeValueFuncOfOptional(columnIndex, node) + case node.Repeated(): + return writeValueFuncOfRepeated(columnIndex, node) + case isList(node): + return writeValueFuncOfList(columnIndex, node) + case isMap(node): + return writeValueFuncOfMap(columnIndex, node) + default: + return writeValueFuncOfRequired(columnIndex, node) + } +} + +func writeValueFuncOfOptional(columnIndex int16, node Node) (int16, writeValueFunc) { + nextColumnIndex, writeValue := writeValueFuncOf(columnIndex, Required(node)) + return nextColumnIndex, func(columns []ColumnBuffer, levels columnLevels, value reflect.Value) { + if isNullValue(value) { + writeValue(columns, levels, value) + } else { + levels.definitionLevel++ + writeValue(columns, levels, value) + } + } +} + +func writeValueFuncOfRepeated(columnIndex int16, node Node) (int16, writeValueFunc) { + nextColumnIndex, writeValue := writeValueFuncOf(columnIndex, Required(node)) + return nextColumnIndex, func(columns []ColumnBuffer, levels columnLevels, value reflect.Value) { + writeRepatedValue: + if !value.IsValid() { + writeValue(columns, levels, reflect.Value{}) + return + } + + switch msg := value.Interface().(type) { + case *jsonlite.Value: + writeJSONToRepeated(columns, levels, msg, writeValue) + return + + case json.RawMessage: + val, err := jsonParse(msg) + if err != nil { + panic(fmt.Errorf("failed to parse JSON: %w", err)) + } + writeJSONToRepeated(columns, levels, val, writeValue) + return + + case *structpb.Struct: + if msg == nil { + writeValue(columns, levels, reflect.Value{}) + return + } + levels.repetitionDepth++ + levels.definitionLevel++ + writeValue(columns, levels, value) + return + + case *structpb.ListValue: + n := len(msg.GetValues()) + if n == 0 { + writeValue(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + for _, v := range msg.GetValues() { + writeValue(columns, levels, structpbValueToReflectValue(v)) + levels.repetitionLevel = levels.repetitionDepth + } + return + + case protoreflect.List: + n := msg.Len() + if n == 0 { + writeValue(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + for i := range n { + var e = msg.Get(i) + var v reflect.Value + if e.IsValid() { + v = reflect.ValueOf(e.Interface()) + } + writeValue(columns, levels, v) + levels.repetitionLevel = levels.repetitionDepth + } + return + } + + switch value.Kind() { + case reflect.Interface, reflect.Pointer: + if value.IsNil() { + writeValue(columns, levels, reflect.Value{}) + return + } + value = value.Elem() + goto writeRepatedValue + + case reflect.Slice, reflect.Array: + n := value.Len() + if n == 0 { + writeValue(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + for i := range n { + writeValue(columns, levels, value.Index(i)) + levels.repetitionLevel = levels.repetitionDepth + } + + default: + levels.repetitionDepth++ + levels.definitionLevel++ + + // If this is a repeated group with a single field, and the value is a scalar, + // wrap the scalar into a struct with that field name. + if !node.Leaf() && value.IsValid() && value.Kind() != reflect.Struct && value.Kind() != reflect.Map { + fields := Required(node).Fields() + if len(fields) == 1 { + field := fields[0] + fieldType := field.GoType() + fieldName := field.Name() + + if value.Type().AssignableTo(fieldType) || value.Type().ConvertibleTo(fieldType) { + structType := reflect.StructOf([]reflect.StructField{ + {Name: fieldName, Type: fieldType}, + }) + wrappedValue := reflect.New(structType).Elem() + + if value.Type().AssignableTo(fieldType) { + wrappedValue.Field(0).Set(value) + } else { + wrappedValue.Field(0).Set(value.Convert(fieldType)) + } + + value = wrappedValue + } + } + } + + writeValue(columns, levels, value) + } + } +} + +func writeValueFuncOfRequired(columnIndex int16, node Node) (int16, writeValueFunc) { + switch { + case node.Leaf(): + return writeValueFuncOfLeaf(columnIndex, node) + default: + return writeValueFuncOfGroup(columnIndex, node) + } +} + +func writeValueFuncOfList(columnIndex int16, node Node) (int16, writeValueFunc) { + return writeValueFuncOf(columnIndex, Repeated(listElementOf(node))) +} + +func writeValueFuncOfMap(columnIndex int16, node Node) (int16, writeValueFunc) { + keyValue := mapKeyValueOf(node) + keyValueType := keyValue.GoType() + keyValueElem := keyValueType.Elem() + keyType := keyValueElem.Field(0).Type + valueType := keyValueElem.Field(1).Type + nextColumnIndex, writeValue := writeValueFuncOf(columnIndex, schemaOf(keyValueElem)) + zeroKeyValue := reflect.Zero(keyValueElem) + + return nextColumnIndex, func(columns []ColumnBuffer, levels columnLevels, mapValue reflect.Value) { + // Check for invalid or nil map first to avoid panic on Interface() or Len() + if !mapValue.IsValid() || mapValue.IsNil() { + writeValue(columns, levels, zeroKeyValue) + return + } + + switch m := mapValue.Interface().(type) { + case protoreflect.Map: + n := m.Len() + if n == 0 { + writeValue(columns, levels, zeroKeyValue) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + elem := reflect.New(keyValueElem).Elem() + k := elem.Field(0) + v := elem.Field(1) + + for mapKey, mapVal := range m.Range { + k.Set(reflect.ValueOf(mapKey.Interface()).Convert(keyType)) + v.Set(reflect.ValueOf(mapVal.Interface()).Convert(valueType)) + writeValue(columns, levels, elem) + levels.repetitionLevel = levels.repetitionDepth + } + return + } + + if mapValue.Len() == 0 { + writeValue(columns, levels, zeroKeyValue) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + mapType := mapValue.Type() + mapKey := reflect.New(mapType.Key()).Elem() + mapElem := reflect.New(mapType.Elem()).Elem() + + elem := reflect.New(keyValueElem).Elem() + k := elem.Field(0) + v := elem.Field(1) + + for it := mapValue.MapRange(); it.Next(); { + mapKey.SetIterKey(it) + mapElem.SetIterValue(it) + k.Set(mapKey.Convert(keyType)) + v.Set(mapElem.Convert(valueType)) + writeValue(columns, levels, elem) + levels.repetitionLevel = levels.repetitionDepth + } + } +} + +var structFieldsCache atomic.Value // map[reflect.Type]map[string][]int + +func writeValueFuncOfGroup(columnIndex int16, node Node) (int16, writeValueFunc) { + fields := node.Fields() + writers := make([]fieldWriter, len(fields)) + for i, field := range fields { + writers[i].fieldName = field.Name() + columnIndex, writers[i].writeValue = writeValueFuncOf(columnIndex, field) + } + + return columnIndex, func(columns []ColumnBuffer, levels columnLevels, value reflect.Value) { + writeGroupValue: + if !value.IsValid() { + for i := range writers { + w := &writers[i] + w.writeValue(columns, levels, reflect.Value{}) + } + return + } + + switch t := value.Type(); t.Kind() { + case reflect.Map: + switch { + case t.ConvertibleTo(reflect.TypeFor[map[string]string]()): + m := value.Convert(reflect.TypeFor[map[string]string]()).Interface().(map[string]string) + v := new(string) + for i := range writers { + w := &writers[i] + *v = m[w.fieldName] + w.writeValue(columns, levels, reflect.ValueOf(v).Elem()) + } + + case t.ConvertibleTo(reflect.TypeFor[map[string]any]()): + m := value.Convert(reflect.TypeFor[map[string]any]()).Interface().(map[string]any) + for i := range writers { + w := &writers[i] + v := m[w.fieldName] + w.writeValue(columns, levels, reflect.ValueOf(v)) + } + + default: + for i := range writers { + w := &writers[i] + fieldName := reflect.ValueOf(&w.fieldName).Elem() + fieldValue := value.MapIndex(fieldName) + w.writeValue(columns, levels, fieldValue) + } + } + + case reflect.Struct: + cachedFields, _ := structFieldsCache.Load().(map[reflect.Type]map[string][]int) + structFields, ok := cachedFields[t] + if !ok { + visibleStructFields := reflect.VisibleFields(t) + cachedFieldsBefore := cachedFields + structFields = make(map[string][]int, len(visibleStructFields)) + cachedFields = make(map[reflect.Type]map[string][]int, len(cachedFieldsBefore)+1) + cachedFields[t] = structFields + maps.Copy(cachedFields, cachedFieldsBefore) + + for _, visibleStructField := range visibleStructFields { + name := visibleStructField.Name + if tag, ok := visibleStructField.Tag.Lookup("parquet"); ok { + if tagName, _, _ := strings.Cut(tag, ","); tagName != "" { + name = tagName + } + } + structFields[name] = visibleStructField.Index + } + + structFieldsCache.Store(cachedFields) + } + + for i := range writers { + w := &writers[i] + fieldValue := reflect.Value{} + fieldIndex, ok := structFields[w.fieldName] + if ok { + fieldValue = value.FieldByIndex(fieldIndex) + } + w.writeValue(columns, levels, fieldValue) + } + + case reflect.Pointer, reflect.Interface: + if value.IsNil() { + value = reflect.Value{} + goto writeGroupValue + } + + switch msg := value.Interface().(type) { + case *jsonlite.Value: + writeJSONToGroup(columns, levels, msg, node, writers) + case *structpb.Struct: + var fields map[string]*structpb.Value + if msg != nil { + fields = msg.Fields + } + for i := range writers { + w := &writers[i] + v := structpbValueToReflectValue(fields[w.fieldName]) + w.writeValue(columns, levels, v) + } + case *anypb.Any: + if writeProtoAnyToGroup(msg, columns, levels, writers, node, &value) { + return + } + goto writeGroupValue + case proto.Message: + writeProtoMessageToGroup(msg, columns, levels, writers) + default: + value = value.Elem() + goto writeGroupValue + } + + case reflect.Slice: + if t == reflect.TypeFor[json.RawMessage]() { + val, err := jsonParse(value.Bytes()) + if err != nil { + panic(fmt.Errorf("failed to parse JSON: %w", err)) + } + writeJSONToGroup(columns, levels, val, node, writers) + } else { + value = reflect.Value{} + goto writeGroupValue + } + + default: + value = reflect.Value{} + goto writeGroupValue + } + } +} + +func writeValueFuncOfLeaf(columnIndex int16, node Node) (int16, writeValueFunc) { + if columnIndex < 0 { + panic("writeValueFuncOfLeaf called with invalid columnIndex -1 (empty group)") + } + if columnIndex > MaxColumnIndex { + panic("row cannot be written because it has more than 127 columns") + } + return columnIndex + 1, func(columns []ColumnBuffer, levels columnLevels, value reflect.Value) { + col := columns[columnIndex] + writeValue: + if !value.IsValid() { + col.writeNull(levels) + return + } + + switch value.Kind() { + case reflect.Pointer, reflect.Interface: + if value.IsNil() { + col.writeNull(levels) + return + } + switch msg := value.Interface().(type) { + case *jsonlite.Value: + writeJSONToLeaf(col, levels, msg, node) + case *json.Number: + writeJSONNumber(col, levels, *msg, node) + case *time.Time: + writeTime(col, levels, *msg, node) + case *time.Duration: + writeDuration(col, levels, *msg, node) + case *timestamppb.Timestamp: + writeProtoTimestamp(col, levels, msg, node) + case *durationpb.Duration: + writeProtoDuration(col, levels, msg, node) + case *wrapperspb.BoolValue: + col.writeBoolean(levels, msg.GetValue()) + case *wrapperspb.Int32Value: + col.writeInt32(levels, msg.GetValue()) + case *wrapperspb.Int64Value: + col.writeInt64(levels, msg.GetValue()) + case *wrapperspb.UInt32Value: + col.writeInt32(levels, int32(msg.GetValue())) + case *wrapperspb.UInt64Value: + col.writeInt64(levels, int64(msg.GetValue())) + case *wrapperspb.FloatValue: + col.writeFloat(levels, msg.GetValue()) + case *wrapperspb.DoubleValue: + col.writeDouble(levels, msg.GetValue()) + case *wrapperspb.StringValue: + col.writeByteArray(levels, unsafeByteArrayFromString(msg.GetValue())) + case *wrapperspb.BytesValue: + col.writeByteArray(levels, msg.GetValue()) + case *structpb.Struct: + writeProtoStruct(col, levels, msg, node) + case *structpb.ListValue: + writeProtoList(col, levels, msg, node) + case *anypb.Any: + writeProtoAny(col, levels, msg, node) + default: + value = value.Elem() + goto writeValue + } + return + + case reflect.Bool: + col.writeBoolean(levels, value.Bool()) + return + + case reflect.Int8, reflect.Int16, reflect.Int32: + col.writeInt32(levels, int32(value.Int())) + return + + case reflect.Int: + col.writeInt64(levels, value.Int()) + return + + case reflect.Int64: + if value.Type() == reflect.TypeFor[time.Duration]() { + writeDuration(col, levels, time.Duration(value.Int()), node) + } else { + col.writeInt64(levels, value.Int()) + } + return + + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + col.writeInt32(levels, int32(value.Uint())) + return + + case reflect.Uint, reflect.Uint64: + col.writeInt64(levels, int64(value.Uint())) + return + + case reflect.Float32: + col.writeFloat(levels, float32(value.Float())) + return + + case reflect.Float64: + col.writeDouble(levels, value.Float()) + return + + case reflect.String: + v := value.String() + switch value.Type() { + case reflect.TypeFor[json.Number](): + writeJSONNumber(col, levels, json.Number(v), node) + default: + typ := node.Type() + logicalType := typ.LogicalType() + if logicalType != nil && logicalType.UUID != nil { + writeUUID(col, levels, v, typ) + return + } + col.writeByteArray(levels, unsafeByteArrayFromString(v)) + } + return + + case reflect.Slice: + if t := value.Type(); t.Elem().Kind() == reflect.Uint8 { + switch t { + case reflect.TypeFor[json.RawMessage](): + val, err := jsonParse(value.Bytes()) + if err != nil { + panic(fmt.Errorf("failed to parse JSON: %w", err)) + } + writeJSONToLeaf(col, levels, val, node) + default: + col.writeByteArray(levels, value.Bytes()) + } + return + } + + case reflect.Array: + col.writeByteArray(levels, value.Bytes()) + return + + case reflect.Struct: + switch v := value.Interface().(type) { + case time.Time: + writeTime(col, levels, v, node) + return + case deprecated.Int96: + col.writeInt96(levels, v) + return + } + } + + if node.Type().Kind() != ByteArray { + panic(fmt.Sprintf("cannot write value of type %s to leaf column", value.Type())) + } + + if node.Optional() && isNullValue(value) { + col.writeNull(levels) + return + } + + b := memory.SliceBuffer[byte]{} + w := memory.SliceWriter{Buffer: &b} + defer b.Reset() + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + + if err := enc.Encode(value.Interface()); err != nil { + panic(err) + } + + data := b.Slice() + col.writeByteArray(levels, data[:len(data)-1]) + } +} + +func structpbValueToReflectValue(v *structpb.Value) reflect.Value { + switch kind := v.GetKind().(type) { + case nil: + return reflect.Value{} + case *structpb.Value_NullValue: + return reflect.Value{} + case *structpb.Value_NumberValue: + return reflect.ValueOf(&kind.NumberValue) + case *structpb.Value_StringValue: + return reflect.ValueOf(&kind.StringValue) + case *structpb.Value_BoolValue: + return reflect.ValueOf(&kind.BoolValue) + case *structpb.Value_StructValue: + return reflect.ValueOf(kind.StructValue) + case *structpb.Value_ListValue: + return reflect.ValueOf(kind.ListValue) + default: + panic(fmt.Sprintf("unsupported structpb.Value kind: %T", kind)) + } +} + +func unsafeByteArrayFromString(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +func writeUUID(col ColumnBuffer, levels columnLevels, str string, typ Type) { + if typ.Kind() != FixedLenByteArray || typ.Length() != 16 { + panic(fmt.Errorf("cannot write UUID string to non-FIXED_LEN_BYTE_ARRAY(16) column: %q", str)) + } + parsedUUID, err := uuid.Parse(str) + if err != nil { + panic(fmt.Errorf("cannot parse string %q as UUID: %w", str, err)) + } + buf := memory.SliceBuffer[byte]{} + buf.Grow(16) + buf.Append(parsedUUID[:]...) + col.writeByteArray(levels, buf.Slice()) + buf.Reset() +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_repeated.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_repeated.go new file mode 100644 index 00000000000..2073d1e6a38 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_repeated.go @@ -0,0 +1,451 @@ +package parquet + +import ( + "bytes" + "io" + "slices" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/bytealg" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +// repeatedColumnBuffer is an implementation of the ColumnBuffer interface used +// as a wrapper to an underlying ColumnBuffer to manage the creation of +// repetition levels, definition levels, and map rows to the region of the +// underlying buffer that contains their sequence of values. +// +// Null values are not written to the underlying column; instead, the buffer +// tracks offsets of row values in the column, null row values are represented +// by the value -1 and a definition level less than the max. +// +// This column buffer type is used for all leaf columns that have a non-zero +// max repetition level, which may be because the column or one of its parent(s) +// are marked repeated. +type repeatedColumnBuffer struct { + base ColumnBuffer + reordered bool + maxRepetitionLevel byte + maxDefinitionLevel byte + rows []offsetMapping + repetitionLevels memory.SliceBuffer[byte] + definitionLevels memory.SliceBuffer[byte] + buffer []Value + reordering *repeatedColumnBuffer + nullOrdering nullOrdering +} + +// The offsetMapping type maps the logical offset of rows within the repetition +// and definition levels, to the base offsets in the underlying column buffers +// where the non-null values have been written. +type offsetMapping struct { + offset uint32 + baseOffset uint32 +} + +func newRepeatedColumnBuffer(base ColumnBuffer, maxRepetitionLevel, maxDefinitionLevel byte, nullOrdering nullOrdering) *repeatedColumnBuffer { + return &repeatedColumnBuffer{ + base: base, + maxRepetitionLevel: maxRepetitionLevel, + maxDefinitionLevel: maxDefinitionLevel, + nullOrdering: nullOrdering, + } +} + +func (col *repeatedColumnBuffer) Clone() ColumnBuffer { + return &repeatedColumnBuffer{ + base: col.base.Clone(), + reordered: col.reordered, + maxRepetitionLevel: col.maxRepetitionLevel, + maxDefinitionLevel: col.maxDefinitionLevel, + rows: slices.Clone(col.rows), + repetitionLevels: col.repetitionLevels.Clone(), + definitionLevels: col.definitionLevels.Clone(), + nullOrdering: col.nullOrdering, + } +} + +func (col *repeatedColumnBuffer) Type() Type { + return col.base.Type() +} + +func (col *repeatedColumnBuffer) NumValues() int64 { + return int64(col.definitionLevels.Len()) +} + +func (col *repeatedColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return columnIndexOfNullable(col.base, col.maxDefinitionLevel, col.definitionLevels.Slice()) +} + +func (col *repeatedColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return col.base.OffsetIndex() +} + +func (col *repeatedColumnBuffer) BloomFilter() BloomFilter { + return col.base.BloomFilter() +} + +func (col *repeatedColumnBuffer) Dictionary() Dictionary { + return col.base.Dictionary() +} + +func (col *repeatedColumnBuffer) Column() int { + return col.base.Column() +} + +func (col *repeatedColumnBuffer) Pages() Pages { + return onePage(col.Page()) +} + +func (col *repeatedColumnBuffer) Page() Page { + if col.reordered { + if col.reordering == nil { + col.reordering = col.Clone().(*repeatedColumnBuffer) + } + + column := col.reordering + column.Reset() + maxNumValues := 0 + defer func() { + clearValues(col.buffer[:maxNumValues]) + }() + + baseOffset := 0 + repetitionLevels := col.repetitionLevels.Slice() + definitionLevels := col.definitionLevels.Slice() + + for _, row := range col.rows { + rowOffset := int(row.offset) + rowLength := repeatedRowLength(repetitionLevels[rowOffset:]) + numNulls := countLevelsNotEqual(definitionLevels[rowOffset:rowOffset+rowLength], col.maxDefinitionLevel) + numValues := rowLength - numNulls + + if numValues > 0 { + if numValues > cap(col.buffer) { + col.buffer = make([]Value, numValues) + } else { + col.buffer = col.buffer[:numValues] + } + n, err := col.base.ReadValuesAt(col.buffer, int64(row.baseOffset)) + if err != nil && n < numValues { + return newErrorPage(col.Type(), col.Column(), "reordering rows of repeated column: %w", err) + } + if _, err := column.base.WriteValues(col.buffer); err != nil { + return newErrorPage(col.Type(), col.Column(), "reordering rows of repeated column: %w", err) + } + if numValues > maxNumValues { + maxNumValues = numValues + } + } + + column.rows = append(column.rows, offsetMapping{ + offset: uint32(column.repetitionLevels.Len()), + baseOffset: uint32(baseOffset), + }) + + column.repetitionLevels.Append(repetitionLevels[rowOffset : rowOffset+rowLength]...) + column.definitionLevels.Append(definitionLevels[rowOffset : rowOffset+rowLength]...) + baseOffset += numValues + } + + col.swapReorderingBuffer(column) + col.reordered = false + } + + return newRepeatedPage( + col.base.Page(), + col.maxRepetitionLevel, + col.maxDefinitionLevel, + col.repetitionLevels.Slice(), + col.definitionLevels.Slice(), + ) +} + +func (col *repeatedColumnBuffer) swapReorderingBuffer(buf *repeatedColumnBuffer) { + col.base, buf.base = buf.base, col.base + col.rows, buf.rows = buf.rows, col.rows + col.repetitionLevels, buf.repetitionLevels = buf.repetitionLevels, col.repetitionLevels + col.definitionLevels, buf.definitionLevels = buf.definitionLevels, col.definitionLevels +} + +func (col *repeatedColumnBuffer) Reset() { + col.base.Reset() + col.rows = col.rows[:0] + col.repetitionLevels.Resize(0) + col.definitionLevels.Resize(0) +} + +func (col *repeatedColumnBuffer) Size() int64 { + return int64(8*len(col.rows)+col.repetitionLevels.Len()+col.definitionLevels.Len()) + col.base.Size() +} + +func (col *repeatedColumnBuffer) Cap() int { return cap(col.rows) } + +func (col *repeatedColumnBuffer) Len() int { return len(col.rows) } + +func (col *repeatedColumnBuffer) Less(i, j int) bool { + row1 := col.rows[i] + row2 := col.rows[j] + less := col.nullOrdering + repetitionLevels := col.repetitionLevels.Slice() + definitionLevels := col.definitionLevels.Slice() + row1Length := repeatedRowLength(repetitionLevels[row1.offset:]) + row2Length := repeatedRowLength(repetitionLevels[row2.offset:]) + + for k := 0; k < row1Length && k < row2Length; k++ { + x := int(row1.baseOffset) + y := int(row2.baseOffset) + definitionLevel1 := definitionLevels[int(row1.offset)+k] + definitionLevel2 := definitionLevels[int(row2.offset)+k] + switch { + case less(col.base, x, y, col.maxDefinitionLevel, definitionLevel1, definitionLevel2): + return true + case less(col.base, y, x, col.maxDefinitionLevel, definitionLevel2, definitionLevel1): + return false + } + } + + return row1Length < row2Length +} + +func (col *repeatedColumnBuffer) Swap(i, j int) { + // Because the underlying column does not contain null values, and may hold + // an arbitrary number of values per row, we cannot swap its values at + // indexes i and j. We swap the row indexes only, then reorder the base + // column buffer when its view is materialized into a page by creating a + // copy and writing rows back to it following the order of rows in the + // repeated column buffer. + col.reordered = true + col.rows[i], col.rows[j] = col.rows[j], col.rows[i] +} + +func (col *repeatedColumnBuffer) WriteValues(values []Value) (numValues int, err error) { + maxRowLen := 0 + defer func() { + clearValues(col.buffer[:maxRowLen]) + }() + + for i := 0; i < len(values); { + j := i + + if values[j].repetitionLevel == 0 { + j++ + } + + for j < len(values) && values[j].repetitionLevel != 0 { + j++ + } + + if err := col.writeRow(values[i:j]); err != nil { + return numValues, err + } + + if len(col.buffer) > maxRowLen { + maxRowLen = len(col.buffer) + } + + numValues += j - i + i = j + } + + return numValues, nil +} + +func (col *repeatedColumnBuffer) writeRow(row []Value) error { + col.buffer = col.buffer[:0] + + for _, v := range row { + if v.definitionLevel == col.maxDefinitionLevel { + col.buffer = append(col.buffer, v) + } + } + + baseOffset := col.base.NumValues() + if len(col.buffer) > 0 { + if _, err := col.base.WriteValues(col.buffer); err != nil { + return err + } + } + + if row[0].repetitionLevel == 0 { + col.rows = append(col.rows, offsetMapping{ + offset: uint32(col.repetitionLevels.Len()), + baseOffset: uint32(baseOffset), + }) + } + + for _, v := range row { + col.repetitionLevels.AppendValue(v.repetitionLevel) + col.definitionLevels.AppendValue(v.definitionLevel) + } + + return nil +} + +func (col *repeatedColumnBuffer) writeValues(levels columnLevels, row sparse.Array) { + if levels.repetitionLevel == 0 { + col.rows = append(col.rows, offsetMapping{ + offset: uint32(col.repetitionLevels.Len()), + baseOffset: uint32(col.base.NumValues()), + }) + } + + if row.Len() == 0 { + col.repetitionLevels.AppendValue(levels.repetitionLevel) + col.definitionLevels.AppendValue(levels.definitionLevel) + return + } + + // Append multiple copies of the level values + count := row.Len() + repStart := col.repetitionLevels.Len() + defStart := col.definitionLevels.Len() + col.repetitionLevels.Resize(repStart + count) + col.definitionLevels.Resize(defStart + count) + bytealg.Broadcast(col.repetitionLevels.Slice()[repStart:], levels.repetitionLevel) + bytealg.Broadcast(col.definitionLevels.Slice()[defStart:], levels.definitionLevel) + + if levels.definitionLevel == col.maxDefinitionLevel { + col.base.writeValues(levels, row) + } +} + +func (col *repeatedColumnBuffer) writeLevel(levels columnLevels) bool { + if levels.repetitionLevel == 0 { + col.rows = append(col.rows, offsetMapping{ + offset: uint32(col.repetitionLevels.Len()), + baseOffset: uint32(col.base.NumValues()), + }) + } + col.repetitionLevels.AppendValue(levels.repetitionLevel) + col.definitionLevels.AppendValue(levels.definitionLevel) + return levels.definitionLevel == col.maxDefinitionLevel +} + +func (col *repeatedColumnBuffer) writeBoolean(levels columnLevels, value bool) { + if col.writeLevel(levels) { + col.base.writeBoolean(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeInt32(levels columnLevels, value int32) { + if col.writeLevel(levels) { + col.base.writeInt32(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeInt64(levels columnLevels, value int64) { + if col.writeLevel(levels) { + col.base.writeInt64(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + if col.writeLevel(levels) { + col.base.writeInt96(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeFloat(levels columnLevels, value float32) { + if col.writeLevel(levels) { + col.base.writeFloat(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeDouble(levels columnLevels, value float64) { + if col.writeLevel(levels) { + col.base.writeDouble(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + if col.writeLevel(levels) { + col.base.writeByteArray(levels, value) + } +} + +func (col *repeatedColumnBuffer) writeNull(levels columnLevels) { + col.writeLevel(levels) +} + +func (col *repeatedColumnBuffer) ReadValuesAt(values []Value, offset int64) (int, error) { + length := int64(col.definitionLevels.Len()) + if offset < 0 { + return 0, errRowIndexOutOfBounds(offset, length) + } + if offset >= length { + return 0, io.EOF + } + if length -= offset; length < int64(len(values)) { + values = values[:length] + } + + definitionLevelsSlice := col.definitionLevels.Slice() + repetitionLevelsSlice := col.repetitionLevels.Slice() + + numNulls1 := int64(countLevelsNotEqual(definitionLevelsSlice[:offset], col.maxDefinitionLevel)) + numNulls2 := int64(countLevelsNotEqual(definitionLevelsSlice[offset:offset+length], col.maxDefinitionLevel)) + + if numNulls2 < length { + n, err := col.base.ReadValuesAt(values[:length-numNulls2], offset-numNulls1) + if err != nil { + return n, err + } + } + + definitionLevels := definitionLevelsSlice[offset : offset+length] + repetitionLevels := repetitionLevelsSlice[offset : offset+length] + + if numNulls2 > 0 { + columnIndex := ^int16(col.Column()) + i := length - numNulls2 - 1 // Last index of non-null values + j := length - 1 // Last index in output values array + maxDefinitionLevel := col.maxDefinitionLevel + + for n := len(definitionLevels) - 1; n >= 0 && j > i; n-- { + if definitionLevels[n] != maxDefinitionLevel { + values[j] = Value{ + repetitionLevel: repetitionLevels[n], + definitionLevel: definitionLevels[n], + columnIndex: columnIndex, + } + } else { + values[j] = values[i] + values[j].repetitionLevel = repetitionLevels[n] + values[j].definitionLevel = maxDefinitionLevel + i-- + } + j-- + } + + // Set levels on remaining non-null values at the beginning + for k := int64(0); k <= i; k++ { + values[k].repetitionLevel = repetitionLevels[k] + values[k].definitionLevel = maxDefinitionLevel + } + } else { + // No nulls, but still need to set levels on all values + for i := range values[:length] { + values[i].repetitionLevel = repetitionLevels[i] + values[i].definitionLevel = col.maxDefinitionLevel + } + } + + return int(length), nil +} + +// repeatedRowLength gives the length of the repeated row starting at the +// beginning of the repetitionLevels slice. +func repeatedRowLength(repetitionLevels []byte) int { + // If a repetition level exists, at least one value is required to represent + // the column. + if len(repetitionLevels) > 0 { + // The subsequent levels will represent the start of a new record when + // they go back to zero. + if i := bytes.IndexByte(repetitionLevels[1:], 0); i >= 0 { + return i + 1 + } + } + return len(repetitionLevels) +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_uint32.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_uint32.go new file mode 100644 index 00000000000..55bf555e0ec --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_uint32.go @@ -0,0 +1,146 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type uint32ColumnBuffer struct{ uint32Page } + +func newUint32ColumnBuffer(typ Type, columnIndex int16, numValues int32) *uint32ColumnBuffer { + return &uint32ColumnBuffer{ + uint32Page: uint32Page{ + typ: typ, + values: memory.SliceBufferFor[uint32](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *uint32ColumnBuffer) Clone() ColumnBuffer { + cloned := &uint32ColumnBuffer{ + uint32Page: uint32Page{ + typ: col.typ, + columnIndex: col.columnIndex, + }, + } + cloned.values.Append(col.values.Slice()...) + return cloned +} + +func (col *uint32ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return uint32ColumnIndex{&col.uint32Page}, nil +} + +func (col *uint32ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return uint32OffsetIndex{&col.uint32Page}, nil +} + +func (col *uint32ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *uint32ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *uint32ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *uint32ColumnBuffer) Page() Page { return &col.uint32Page } + +func (col *uint32ColumnBuffer) Reset() { col.values.Reset() } + +func (col *uint32ColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *uint32ColumnBuffer) Len() int { return col.values.Len() } + +func (col *uint32ColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *uint32ColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *uint32ColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 4) != 0 { + return 0, fmt.Errorf("cannot write INT32 values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[uint32](b)...) + return len(b), nil +} + +func (col *uint32ColumnBuffer) WriteUint32s(values []uint32) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *uint32ColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU32)) + return len(values), nil +} + +func (col *uint32ColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherUint32(col.values.Slice()[offset:], rows.Uint32Array()) +} + +func (col *uint32ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var uintValue uint32 + if value { + uintValue = 1 + } + col.values.AppendValue(uintValue) +} + +func (col *uint32ColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(uint32(value)) +} + +func (col *uint32ColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(uint32(value)) +} + +func (col *uint32ColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(uint32(value.Int32())) +} + +func (col *uint32ColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(uint32(value)) +} + +func (col *uint32ColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(uint32(value)) +} + +func (col *uint32ColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + uintValue, err := strconv.ParseUint(unsafecast.String(value), 10, 32) + if err != nil { + panic("cannot write byte array to uint32 column: " + err.Error()) + } + col.values.AppendValue(uint32(uintValue)) +} + +func (col *uint32ColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *uint32ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_uint64.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_uint64.go new file mode 100644 index 00000000000..8520a38d746 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_uint64.go @@ -0,0 +1,146 @@ +package parquet + +import ( + "fmt" + "io" + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type uint64ColumnBuffer struct{ uint64Page } + +func newUint64ColumnBuffer(typ Type, columnIndex int16, numValues int32) *uint64ColumnBuffer { + return &uint64ColumnBuffer{ + uint64Page: uint64Page{ + typ: typ, + values: memory.SliceBufferFor[uint64](int(numValues)), + columnIndex: ^columnIndex, + }, + } +} + +func (col *uint64ColumnBuffer) Clone() ColumnBuffer { + cloned := &uint64ColumnBuffer{ + uint64Page: uint64Page{ + typ: col.typ, + columnIndex: col.columnIndex, + }, + } + cloned.values.Append(col.values.Slice()...) + return cloned +} + +func (col *uint64ColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return uint64ColumnIndex{&col.uint64Page}, nil +} + +func (col *uint64ColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return uint64OffsetIndex{&col.uint64Page}, nil +} + +func (col *uint64ColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *uint64ColumnBuffer) Dictionary() Dictionary { return nil } + +func (col *uint64ColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *uint64ColumnBuffer) Page() Page { return &col.uint64Page } + +func (col *uint64ColumnBuffer) Reset() { col.values.Reset() } + +func (col *uint64ColumnBuffer) Cap() int { return col.values.Cap() } + +func (col *uint64ColumnBuffer) Len() int { return col.values.Len() } + +func (col *uint64ColumnBuffer) Less(i, j int) bool { return col.values.Less(i, j) } + +func (col *uint64ColumnBuffer) Swap(i, j int) { col.values.Swap(i, j) } + +func (col *uint64ColumnBuffer) Write(b []byte) (int, error) { + if (len(b) % 8) != 0 { + return 0, fmt.Errorf("cannot write INT64 values from input of size %d", len(b)) + } + col.values.Append(unsafecast.Slice[uint64](b)...) + return len(b), nil +} + +func (col *uint64ColumnBuffer) WriteUint64s(values []uint64) (int, error) { + col.values.Append(values...) + return len(values), nil +} + +func (col *uint64ColumnBuffer) WriteValues(values []Value) (int, error) { + col.writeValues(columnLevels{}, makeArrayValue(values, offsetOfU64)) + return len(values), nil +} + +func (col *uint64ColumnBuffer) writeValues(levels columnLevels, rows sparse.Array) { + offset := col.values.Len() + col.values.Resize(offset + rows.Len()) + sparse.GatherUint64(col.values.Slice()[offset:], rows.Uint64Array()) +} + +func (col *uint64ColumnBuffer) writeBoolean(levels columnLevels, value bool) { + var uintValue uint64 + if value { + uintValue = 1 + } + col.values.AppendValue(uintValue) +} + +func (col *uint64ColumnBuffer) writeInt32(levels columnLevels, value int32) { + col.values.AppendValue(uint64(value)) +} + +func (col *uint64ColumnBuffer) writeInt64(levels columnLevels, value int64) { + col.values.AppendValue(uint64(value)) +} + +func (col *uint64ColumnBuffer) writeInt96(levels columnLevels, value deprecated.Int96) { + col.values.AppendValue(uint64(value.Int32())) +} + +func (col *uint64ColumnBuffer) writeFloat(levels columnLevels, value float32) { + col.values.AppendValue(uint64(value)) +} + +func (col *uint64ColumnBuffer) writeDouble(levels columnLevels, value float64) { + col.values.AppendValue(uint64(value)) +} + +func (col *uint64ColumnBuffer) writeByteArray(levels columnLevels, value []byte) { + uintValue, err := strconv.ParseUint(unsafecast.String(value), 10, 32) + if err != nil { + panic("cannot write byte array to uint64 column: " + err.Error()) + } + col.values.AppendValue(uint64(uintValue)) +} + +func (col *uint64ColumnBuffer) writeNull(levels columnLevels) { + col.values.AppendValue(0) +} + +func (col *uint64ColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + colValues := col.values.Slice() + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(colValues))) + case i >= len(colValues): + return 0, io.EOF + default: + for n < len(values) && i < len(colValues) { + values[n] = col.makeValue(colValues[i]) + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer_write.go b/vendor/github.com/parquet-go/parquet-go/column_buffer_write.go new file mode 100644 index 00000000000..34ad0769315 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer_write.go @@ -0,0 +1,786 @@ +package parquet + +import ( + "encoding/json" + "fmt" + "math/bits" + "reflect" + "slices" + "time" + "unsafe" + + "github.com/parquet-go/jsonlite" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" + "google.golang.org/protobuf/types/known/structpb" +) + +// writeRowsFunc is the type of functions that apply rows to a set of column +// buffers. +// +// - columns is the array of column buffer where the rows are written. +// +// - rows is the array of Go values to write to the column buffers. +// +// - levels is used to track the column index, repetition and definition levels +// of values when writing optional or repeated columns. +type writeRowsFunc func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) + +// writeRowsFuncOf generates a writeRowsFunc function for the given Go type and +// parquet schema. The column path indicates the column that the function is +// being generated for in the parquet schema. +func writeRowsFuncOf(t reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + if leaf, exists := schema.Lookup(path...); exists { + logicalType := leaf.Node.Type().LogicalType() + if logicalType != nil && logicalType.Json != nil { + return writeRowsFuncOfJSON(t, schema, path) + } + } + + switch t { + case reflect.TypeFor[deprecated.Int96](): + return writeRowsFuncOfRequired(t, schema, path) + case reflect.TypeFor[time.Time](): + return writeRowsFuncOfTime(t, schema, path, tagReplacements) + case reflect.TypeFor[json.RawMessage](): + return writeRowsFuncFor[json.RawMessage](schema, path) + case reflect.TypeFor[json.Number](): + return writeRowsFuncFor[json.Number](schema, path) + case reflect.TypeFor[*structpb.Struct](): + return writeRowsFuncFor[*structpb.Struct](schema, path) + case reflect.TypeFor[*jsonlite.Value](): + return writeRowsFuncFor[*jsonlite.Value](schema, path) + } + + switch t.Kind() { + case reflect.Bool, + reflect.Int, + reflect.Uint, + reflect.Int32, + reflect.Uint32, + reflect.Int64, + reflect.Uint64, + reflect.Float32, + reflect.Float64, + reflect.String: + return writeRowsFuncOfRequired(t, schema, path) + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return writeRowsFuncOfRequired(t, schema, path) + } else { + return writeRowsFuncOfSlice(t, schema, path, tagReplacements) + } + case reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + return writeRowsFuncOfArray(t, schema, path) + } + case reflect.Pointer: + return writeRowsFuncOfPointer(t, schema, path, tagReplacements) + case reflect.Struct: + return writeRowsFuncOfStruct(t, schema, path, tagReplacements) + case reflect.Map: + return writeRowsFuncOfMap(t, schema, path, tagReplacements) + case reflect.Interface: + return writeRowsFuncOfInterface(t, schema, path) + } + panic("cannot convert Go values of type " + typeNameOf(t) + " to parquet value") +} + +func writeRowsFuncOfRequired(t reflect.Type, schema *Schema, path columnPath) writeRowsFunc { + column := schema.lazyLoadState().mapping.lookup(path) + columnIndex := column.columnIndex + if columnIndex < 0 { + panic("parquet: column not found: " + path.String()) + } + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + columns[columnIndex].writeValues(levels, rows) + } +} + +func writeRowsFuncOfOptional(t reflect.Type, schema *Schema, path columnPath, writeRows writeRowsFunc) writeRowsFunc { + // For interface types, we just increment the definition level for present + // values without checking for null indexes. + // Interface: handled by writeRowsFuncOfInterface which manages levels internally + writeOptional := func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + levels.definitionLevel++ + writeRows(columns, levels, rows) + } + + switch t.Kind() { + case reflect.Interface: + return writeOptional + case reflect.Slice: + // For slices (nested lists), we need to distinguish between nil slices + // and empty slices. Nil slices should not increment the definition level + // (they represent null), while non-nil empty slices should increment it + // (they represent an empty list). + if t.Elem().Kind() != reflect.Uint8 { + type sliceHeader struct { + base unsafe.Pointer + len int + cap int + } + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + // Process each slice individually to check for nil + for i := range rows.Len() { + p := (*sliceHeader)(rows.Index(i)) + elemLevels := levels + // A nil slice has base=nil, while an empty slice has base!=nil but len=0 + // We need to increment definition level for non-nil slices (including empty ones) + if p.base != nil { + elemLevels.definitionLevel++ + } + writeRows(columns, elemLevels, rows.Slice(i, i+1)) + } + } + } + } + + nullIndex := nullIndexFuncOf(t) + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + + nulls := acquireBitmap(rows.Len()) + defer releaseBitmap(nulls) + nullIndex(nulls.bits, rows) + + nullLevels := levels + levels.definitionLevel++ + // In this function, we are dealing with optional values which are + // neither pointers nor slices; for example, a int32 field marked + // "optional" in its parent struct. + // + // We need to find zero values, which should be represented as nulls + // in the parquet column. In order to minimize the calls to writeRows + // and maximize throughput, we use the nullIndex and nonNullIndex + // functions, which are type-specific implementations of the algorithm. + // + // Sections of the input that are contiguous nulls or non-nulls can be + // sent to a single call to writeRows to be written to the underlying + // buffer since they share the same definition level. + // + // This optimization is defeated by inputs alternating null and non-null + // sequences of single values, we do not expect this condition to be a + // common case. + for i := 0; i < rows.Len(); { + j := 0 + x := i / 64 + y := i % 64 + + if y != 0 { + if b := nulls.bits[x] >> uint(y); b == 0 { + x++ + y = 0 + } else { + y += bits.TrailingZeros64(b) + goto writeNulls + } + } + + for x < len(nulls.bits) && nulls.bits[x] == 0 { + x++ + } + + if x < len(nulls.bits) { + y = bits.TrailingZeros64(nulls.bits[x]) % 64 + } + + writeNulls: + if j = x*64 + y; j > rows.Len() { + j = rows.Len() + } + + if i < j { + writeRows(columns, nullLevels, rows.Slice(i, j)) + i = j + } + + if y != 0 { + if b := nulls.bits[x] >> uint(y); b == (1< rows.Len() { + j = rows.Len() + } + + if i < j { + writeRows(columns, levels, rows.Slice(i, j)) + i = j + } + } + + } +} + +func writeRowsFuncOfArray(t reflect.Type, schema *Schema, path columnPath) writeRowsFunc { + column := schema.lazyLoadState().mapping.lookup(path) + arrayLen := t.Len() + columnLen := column.node.Type().Length() + if arrayLen != columnLen { + panic(fmt.Sprintf("cannot convert Go values of type "+typeNameOf(t)+" to FIXED_LEN_BYTE_ARRAY(%d)", columnLen)) + } + return writeRowsFuncOfRequired(t, schema, path) +} + +func writeRowsFuncOfPointer(t reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + elemType := t.Elem() + elemSize := uintptr(elemType.Size()) + writeRows := writeRowsFuncOf(elemType, schema, path, tagReplacements) + + if len(path) == 0 { + // This code path is taken when generating a writeRowsFunc for a pointer + // type. In this case, we do not need to increase the definition level + // since we are not deailng with an optional field but a pointer to the + // row type. + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + + for i := range rows.Len() { + p := *(*unsafe.Pointer)(rows.Index(i)) + a := sparse.Array{} + if p != nil { + a = makeArray(p, 1, elemSize) + } + writeRows(columns, levels, a) + } + } + } + + // Check if the schema node at this path is optional. If not, the pointer + // is just a Go implementation detail (like proto message types) and we + // should not increment the definition level. + node := findByPath(schema, path) + isOptional := node != nil && node.Optional() + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + + for i := range rows.Len() { + p := *(*unsafe.Pointer)(rows.Index(i)) + a := sparse.Array{} + elemLevels := levels + if p != nil { + a = makeArray(p, 1, elemSize) + if isOptional { + elemLevels.definitionLevel++ + } + } + writeRows(columns, elemLevels, a) + } + + } +} + +func writeRowsFuncOfSlice(t reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + elemType := t.Elem() + elemSize := uintptr(elemType.Size()) + writeRows := writeRowsFuncOf(elemType, schema, path, tagReplacements) + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + type sliceHeader struct { + base unsafe.Pointer + len int + cap int + } + + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + + levels.repetitionDepth++ + + for i := range rows.Len() { + p := (*sliceHeader)(rows.Index(i)) + a := makeArray(p.base, p.len, elemSize) + b := sparse.Array{} + + elemLevels := levels + if a.Len() > 0 { + b = a.Slice(0, 1) + elemLevels.definitionLevel++ + } + + writeRows(columns, elemLevels, b) + + if a.Len() > 1 { + elemLevels.repetitionLevel = elemLevels.repetitionDepth + + writeRows(columns, elemLevels, a.Slice(1, a.Len())) + } + } + + } +} + +func writeRowsFuncOfStruct(t reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + type column struct { + offset uintptr + writeRows writeRowsFunc + } + + fields := structFieldsOf(path, t, tagReplacements) + columns := make([]column, len(fields)) + + for i, f := range fields { + columnPath := path.append(f.Name) + // Check if the schema node at the field path is optional and/or a list + // Use the schema structure, not the Go struct tags + node := findByPath(schema, columnPath) + list := false + forEachStructTagOption(f, func(_ reflect.Type, option, _ string) { + switch option { + case "list": + list = true + columnPath = columnPath.append("list", "element") + } + }) + writeRows := writeRowsFuncOf(f.Type, schema, columnPath, tagReplacements) + // Check if the schema node is optional (from the schema, not the Go struct tag) + if node.Optional() { + kind := f.Type.Kind() + switch { + case kind == reflect.Pointer: + case kind == reflect.Interface: + // Interface types handle their own definition levels through + // writeRowsFuncOfInterface -> writeValueFuncOf -> writeValueFuncOfOptional + case kind == reflect.Slice && !list && f.Type.Elem().Kind() != reflect.Uint8: + // For slices other than []byte, optional applies + // to the element, not the list. + case f.Type == reflect.TypeFor[json.RawMessage](): + // json.RawMessage handles its own definition levels through + // writeRowsFuncOfJSONRawMessage -> writeValueFuncOf -> writeValueFuncOfOptional + case f.Type == reflect.TypeFor[time.Time](): + // time.Time is a struct but has IsZero() method, + // so it needs special handling. + // Don't use writeRowsFuncOfOptional which relies + // on bitmap batching. + default: + writeRows = writeRowsFuncOfOptional(f.Type, schema, columnPath, writeRows) + } + } + + columns[i] = column{ + offset: f.Offset, + writeRows: writeRows, + } + } + + return func(buffers []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + for _, column := range columns { + column.writeRows(buffers, levels, rows) + } + } else { + for _, column := range columns { + column.writeRows(buffers, levels, rows.Offset(column.offset)) + } + } + } +} + +func writeRowsFuncOfInterface(t reflect.Type, schema *Schema, path columnPath) writeRowsFunc { + node := findByPath(schema, path) + if node == nil { + panic("column not found: " + path.String()) + } + + columnIndex := findColumnIndex(schema, node, path) + if columnIndex < 0 { + // Empty group node (e.g., from interface{} in map[string]any). + // Return a no-op function since there are no columns to write. + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + // No-op: nothing to write for empty groups + } + } + + // Get the schema-based write function for this node + _, writeValue := writeValueFuncOf(columnIndex, node) + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + for i := range rows.Len() { + writeValue(columns, levels, reflect.NewAt(t, rows.Index(i)).Elem()) + } + } +} + +// writeRowsFuncOfMapToGroup handles writing a Go map to a Parquet GROUP schema +// (as opposed to a MAP logical type). This allows map[string]T to be written +// to schemas with named optional fields. +func writeRowsFuncOfMapToGroup(t reflect.Type, schema *Schema, path columnPath, groupNode Node, tagReplacements []StructTagOption) writeRowsFunc { + if t.Key().Kind() != reflect.String { + panic("map keys must be strings when writing to GROUP schema") + } + + type fieldWriter struct { + fieldName string + writeRows writeRowsFunc // Writes null/empty value + writeValue writeValueFunc + } + + fields := groupNode.Fields() + if len(fields) == 0 { + // Empty group - return no-op function + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + // No-op: empty group has no fields to write + } + } + + writers := make([]fieldWriter, len(fields)) + valueType := t.Elem() + columnIndex := findColumnIndex(schema, findByPath(schema, path), path) + + for i, field := range fields { + fieldPath := path.append(field.Name()) + writeRows := writeRowsFuncOf(valueType, schema, fieldPath, tagReplacements) + if field.Optional() { + writeRows = writeRowsFuncOfOptional(valueType, schema, fieldPath, writeRows) + } + + var writeValue writeValueFunc + columnIndex, writeValue = writeValueFuncOf(columnIndex, field) + + writers[i] = fieldWriter{ + fieldName: field.Name(), + writeRows: writeRows, + writeValue: writeValue, + } + } + + // We make sepcial cases for the common types to avoid paying the cost of + // reflection in calls like MapIndex which force the returned value to be + // allocated on the heap. + var writeMaps writeRowsFunc + switch { + case t.ConvertibleTo(reflect.TypeFor[map[string]string]()): + writeMaps = func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + numRows := rows.Len() + numValues := len(writers) * numRows + buffer := stringArrayPool.Get( + func() *stringArray { return new(stringArray) }, + func(b *stringArray) { b.values = b.values[:0] }, + ) + buffer.values = slices.Grow(buffer.values, numValues)[:numValues] + defer stringArrayPool.Put(buffer) + defer clear(buffer.values) + + for i := range numRows { + m := *(*map[string]string)(reflect.NewAt(t, rows.Index(i)).UnsafePointer()) + for j := range writers { + buffer.values[j*numRows+i] = m[writers[j].fieldName] + } + } + + for j := range writers { + a := sparse.MakeStringArray(buffer.values[j*numRows : (j+1)*numRows]) + writers[j].writeRows(columns, levels, a.UnsafeArray()) + } + } + + case t.ConvertibleTo(reflect.TypeFor[map[string]any]()): + writeMaps = func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + for i := range rows.Len() { + m := *(*map[string]any)(reflect.NewAt(t, rows.Index(i)).UnsafePointer()) + + for j := range writers { + w := &writers[j] + v := m[w.fieldName] + w.writeValue(columns, levels, reflect.ValueOf(v)) + } + } + } + + default: + writeMaps = func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + for i := range rows.Len() { + m := reflect.NewAt(t, rows.Index(i)).Elem() + + for j := range writers { + w := &writers[j] + keyValue := reflect.ValueOf(&w.fieldName).Elem() + mapValue := m.MapIndex(keyValue) + w.writeValue(columns, levels, mapValue) + } + } + } + } + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + for _, w := range writers { + w.writeRows(columns, levels, sparse.Array{}) + } + } else { + writeMaps(columns, levels, rows) + } + } +} + +type stringArray struct{ values []string } + +var stringArrayPool memory.Pool[stringArray] + +// findColumnIndex finds the column index for a given node and path. +// For leaf nodes, returns the column index directly. +// For group nodes, recursively finds the first leaf column. +// Returns -1 for empty group nodes (groups with no fields), which can occur +// when using interface{} types in maps (e.g., map[string]any). +func findColumnIndex(schema *Schema, node Node, path columnPath) int16 { + col := schema.lazyLoadState().mapping.lookup(path) + if col.columnIndex >= 0 { + return col.columnIndex + } + if node.Leaf() { + panic("node is a leaf but has no column index: " + path.String()) + } + fields := node.Fields() + if len(fields) == 0 { + // Empty group nodes can occur with interface{} types (e.g., map[string]any). + // Return -1 to indicate there are no columns to write. + return -1 + } + firstFieldPath := path.append(fields[0].Name()) + return findColumnIndex(schema, fields[0], firstFieldPath) +} + +func writeRowsFuncOfMap(t reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + // Check if the schema at this path is a MAP or a GROUP. + node := findByPath(schema, path) + if node != nil && !isMap(node) { + // The schema is a GROUP (not a MAP), so we need to handle it differently. + // Instead of using key_value structure, we iterate through the GROUP's fields + // and look up corresponding map keys. + return writeRowsFuncOfMapToGroup(t, schema, path, node, tagReplacements) + } + + // Standard MAP logical type handling + keyPath := path.append("key_value", "key") + keyType := t.Key() + writeKeys := writeRowsFuncOf(keyType, schema, keyPath, tagReplacements) + + valuePath := path.append("key_value", "value") + valueNode := findByPath(schema, valuePath) + // If the value is a LIST type, adjust the path to include list/element + if valueNode != nil && isList(valueNode) { + valuePath = valuePath.append("list", "element") + } + valueType := t.Elem() + writeValues := writeRowsFuncOf(valueType, schema, valuePath, tagReplacements) + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeKeys(columns, levels, rows) + writeValues(columns, levels, rows) + return + } + + levels.repetitionDepth++ + makeMap := makeMapFuncOf(t) + + for i := range rows.Len() { + m := reflect.NewAt(t, rows.Index(i)).Elem() + n := m.Len() + + if n == 0 { + empty := sparse.Array{} + writeKeys(columns, levels, empty) + writeValues(columns, levels, empty) + continue + } + + elemLevels := levels + elemLevels.definitionLevel++ + + keys, values := makeMap(m).entries() + writeKeys(columns, elemLevels, keys.Slice(0, 1)) + writeValues(columns, elemLevels, values.Slice(0, 1)) + if n > 1 { + elemLevels.repetitionLevel = elemLevels.repetitionDepth + writeKeys(columns, elemLevels, keys.Slice(1, n)) + writeValues(columns, elemLevels, values.Slice(1, n)) + } + } + + } +} + +func writeRowsFuncOfJSON(t reflect.Type, schema *Schema, path columnPath) writeRowsFunc { + // If this is a string or a byte array write directly. + switch t.Kind() { + case reflect.String: + return writeRowsFuncOfRequired(t, schema, path) + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return writeRowsFuncOfRequired(t, schema, path) + } + } + + columnIndex := findColumnIndex(schema, schema, path) + if columnIndex < 0 { + // Empty group - return no-op function + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + // No-op: empty group has no columns to write + } + } + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + columns[columnIndex].writeNull(levels) + return + } + + b := memory.SliceBuffer[byte]{} + w := memory.SliceWriter{Buffer: &b} + defer b.Reset() + + for i := range rows.Len() { + v := reflect.NewAt(t, rows.Index(i)) + b.Resize(0) + + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + + if err := enc.Encode(v.Interface()); err != nil { + panic(err) + } + + data := b.Slice() + columns[columnIndex].writeByteArray(levels, data[:len(data)-1]) + } + } +} + +func writeRowsFuncOfTime(_ reflect.Type, schema *Schema, path columnPath, tagReplacements []StructTagOption) writeRowsFunc { + t := reflect.TypeFor[int64]() + elemSize := uintptr(t.Size()) + writeRows := writeRowsFuncOf(t, schema, path, tagReplacements) + + col, _ := schema.Lookup(path...) + unit := Nanosecond.TimeUnit() + lt := col.Node.Type().LogicalType() + if lt != nil && lt.Timestamp != nil { + unit = lt.Timestamp.Unit + } + + // Check if the column is optional + isOptional := col.Node.Optional() + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeRows(columns, levels, rows) + return + } + + // If we're optional and the current definition level is already > 0, + // then we're in a pointer/nested context where writeRowsFuncOfPointer + // already handles optionality. + // + // Don't double-handle it here. For simple optional fields, + // definitionLevel starts at 0. + alreadyHandled := isOptional && levels.definitionLevel > 0 + + times := rows.TimeArray() + for i := range times.Len() { + t := times.Index(i) + + // For optional fields, check if the value is zero + // (unless already handled by pointer wrapper). + elemLevels := levels + if isOptional && !alreadyHandled && t.IsZero() { + // Write as NULL (don't increment definition level). + empty := sparse.Array{} + writeRows(columns, elemLevels, empty) + continue + } + + // For optional non-zero values, increment definition level + // (unless already handled). + if isOptional && !alreadyHandled { + elemLevels.definitionLevel++ + } + + var val int64 + switch { + case unit.Millis != nil: + val = t.UnixMilli() + case unit.Micros != nil: + val = t.UnixMicro() + default: + val = t.UnixNano() + } + + a := makeArray(reflectValueData(reflect.ValueOf(val)), 1, elemSize) + writeRows(columns, elemLevels, a) + } + } +} + +// countLeafColumns returns the number of leaf columns in a node +func countLeafColumns(node Node) int16 { + if node.Leaf() { + return 1 + } + count := int16(0) + for _, field := range node.Fields() { + count += countLeafColumns(field) + } + return count +} + +func writeRowsFuncFor[T any](schema *Schema, path columnPath) writeRowsFunc { + node := findByPath(schema, path) + if node == nil { + panic("parquet: column not found: " + path.String()) + } + + columnIndex := findColumnIndex(schema, node, path) + if columnIndex < 0 { + // Empty group - return no-op function + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + // No-op: empty group has no columns to write + } + } + + _, writeValue := writeValueFuncOf(columnIndex, node) + + return func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + for i := range rows.Len() { + p := rows.Index(i) + v := reflect.ValueOf((*T)(p)).Elem() + writeValue(columns, levels, v) + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_chunk.go b/vendor/github.com/parquet-go/parquet-go/column_chunk.go new file mode 100644 index 00000000000..ba1e3a59ca1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_chunk.go @@ -0,0 +1,347 @@ +package parquet + +import ( + "errors" + "io" +) + +var ( + ErrMissingBloomFilter = errors.New("missing bloom filter") + ErrMissingColumnIndex = errors.New("missing column index") + ErrMissingOffsetIndex = errors.New("missing offset index") +) + +// The ColumnChunk interface represents individual columns of a row group. +type ColumnChunk interface { + // Returns the column type. + Type() Type + + // Returns the index of this column in its parent row group. + Column() int + + // Returns a reader exposing the pages of the column. + Pages() Pages + + // Returns the components of the page index for this column chunk, + // containing details about the content and location of pages within the + // chunk. + // + // Note that the returned value may be the same across calls to these + // methods, programs must treat those as read-only. + // + // If the column chunk does not have a column or offset index, the methods return + // ErrMissingColumnIndex or ErrMissingOffsetIndex respectively. + // + // Prior to v0.20, these methods did not return an error because the page index + // for a file was either fully read when the file was opened, or skipped + // completely using the parquet.SkipPageIndex option. Version v0.20 introduced a + // change that the page index can be read on-demand at any time, even if a file + // was opened with the parquet.SkipPageIndex option. Since reading the page index + // can fail, these methods now return an error. + ColumnIndex() (ColumnIndex, error) + OffsetIndex() (OffsetIndex, error) + BloomFilter() BloomFilter + + // Returns the number of values in the column chunk. + // + // This quantity may differ from the number of rows in the parent row group + // because repeated columns may hold zero or more values per row. + NumValues() int64 +} + +// AsyncColumnChunk returns a ColumnChunk that reads pages asynchronously. +func AsyncColumnChunk(columnChunk ColumnChunk) ColumnChunk { + return &asyncColumnChunk{columnChunk} +} + +type asyncColumnChunk struct { + ColumnChunk +} + +func (c *asyncColumnChunk) Pages() Pages { + return AsyncPages(c.ColumnChunk.Pages()) +} + +// NewColumnChunkRowReader creates a new ColumnChunkRowReader for the given +// column chunks. +func NewColumnChunkRowReader(columns []ColumnChunk) RowReadSeekCloser { + return newRowGroupRows(nil, columns, defaultValueBufferSize) +} + +// ColumnChunkValueReader is an interface for reading values from a column chunk. +type ColumnChunkValueReader interface { + ValueReader + RowSeeker + io.Closer +} + +// NewColumnChunkValueReader creates a new ColumnChunkValueReader for the given +// column chunk. +func NewColumnChunkValueReader(column ColumnChunk) ColumnChunkValueReader { + return &columnChunkValueReader{pages: column.Pages()} +} + +type columnChunkValueReader struct { + pages Pages + page Page + values ValueReader + detach bool +} + +func (r *columnChunkValueReader) clear() { + if r.page != nil { + if r.detach { + releaseAndDetachValues(r.page) + } else { + Release(r.page) + } + r.page = nil + r.values = nil + } +} + +func (r *columnChunkValueReader) Reset() { + if r.pages != nil { + // Ignore errors because we are resetting the reader, if the error + // persists we will see it on the next read, and otherwise we can + // read back from the beginning. + r.pages.SeekToRow(0) + } + r.clear() +} + +func (r *columnChunkValueReader) Close() error { + var err error + if r.pages != nil { + err = r.pages.Close() + r.pages = nil + } + r.clear() + return err +} + +func (r *columnChunkValueReader) ReadValues(values []Value) (int, error) { + if r.pages == nil { + return 0, io.EOF + } + + for { + if r.values == nil { + p, err := r.pages.ReadPage() + if err != nil { + return 0, err + } + r.page = p + r.values = p.Values() + } + + n, err := r.values.ReadValues(values) + if n > 0 { + return n, nil + } + if err == nil { + return 0, io.ErrNoProgress + } + if err != io.EOF { + return 0, err + } + r.clear() + } +} + +func (r *columnChunkValueReader) SeekToRow(rowIndex int64) error { + if r.pages == nil { + return io.ErrClosedPipe + } + if err := r.pages.SeekToRow(rowIndex); err != nil { + return err + } + r.clear() + return nil +} + +type pageAndValueWriter interface { + PageWriter + ValueWriter +} + +type readRowsFunc func(*rowGroupRows, []Row, byte) (int, error) + +func readRowsFuncOf(node Node, columnIndex int, repetitionDepth byte) (int, readRowsFunc) { + var read readRowsFunc + + if node.Repeated() { + repetitionDepth++ + } + + if node.Leaf() { + columnIndex, read = readRowsFuncOfLeaf(columnIndex, repetitionDepth) + } else { + columnIndex, read = readRowsFuncOfGroup(node, columnIndex, repetitionDepth) + } + + if node.Repeated() { + read = readRowsFuncOfRepeated(read, repetitionDepth) + } + + return columnIndex, read +} + +//go:noinline +func readRowsFuncOfRepeated(read readRowsFunc, repetitionDepth byte) readRowsFunc { + return func(r *rowGroupRows, rows []Row, repetitionLevel byte) (int, error) { + for i := range rows { + // Repeated columns have variable number of values, we must process + // them one row at a time because we cannot predict how many values + // need to be consumed in each iteration. + row := rows[i : i+1] + + // The first pass looks for values marking the beginning of a row by + // having a repetition level equal to the current level. + n, err := read(r, row, repetitionLevel) + if err != nil { + // The error here may likely be io.EOF, the read function may + // also have successfully read a row, which is indicated by a + // non-zero count. In this case, we increment the index to + // indicate to the caller than rows up to i+1 have been read. + if n > 0 { + i++ + } + return i, err + } + + // The read function may return no errors and also read no rows in + // case where it had more values to read but none corresponded to + // the current repetition level. This is an indication that we will + // not be able to read more rows at this stage, we must return to + // the caller to let it set the repetition level to its current + // depth, which may allow us to read more values when called again. + if n == 0 { + return i, nil + } + + // When we reach this stage, we have successfully read the first + // values of a row of repeated columns. We continue consuming more + // repeated values until we get the indication that we consumed + // them all (the read function returns zero and no errors). + for { + n, err := read(r, row, repetitionDepth) + if err != nil { + return i + 1, err + } + if n == 0 { + break + } + } + } + return len(rows), nil + } +} + +//go:noinline +func readRowsFuncOfGroup(node Node, columnIndex int, repetitionDepth byte) (int, readRowsFunc) { + fields := node.Fields() + + // Empty groups (groups with no fields) are valid structural elements + // that don't contain column data. This function shouldn't be called in + // practice since empty groups have no leaf columns to read from. + if len(fields) == 0 { + return columnIndex, func(r *rowGroupRows, rows []Row, repetitionLevel byte) (int, error) { + // Return 0 since there are no columns to read + return 0, nil + } + } + + if len(fields) == 1 { + // Small optimization for a somewhat common case of groups with a single + // column (like nested list elements for example); there is no need to + // loop over the group of a single element, we can simply skip to calling + // the inner read function. + return readRowsFuncOf(fields[0], columnIndex, repetitionDepth) + } + + group := make([]readRowsFunc, len(fields)) + for i := range group { + columnIndex, group[i] = readRowsFuncOf(fields[i], columnIndex, repetitionDepth) + } + + return columnIndex, func(r *rowGroupRows, rows []Row, repetitionLevel byte) (int, error) { + // When reading a group, we use the first column as an indicator of how + // may rows can be read during this call. + n, err := group[0](r, rows, repetitionLevel) + + if n > 0 { + // Read values for all rows that the group is able to consume. + // Getting io.EOF from calling the read functions indicate that + // we consumed all values of that particular column, but there may + // be more to read in other columns, therefore we must always read + // all columns and cannot stop on the first error. + for _, read := range group[1:] { + _, err2 := read(r, rows[:n], repetitionLevel) + if err2 != nil && err2 != io.EOF { + return 0, err2 + } + } + } + + return n, err + } +} + +//go:noinline +func readRowsFuncOfLeaf(columnIndex int, repetitionDepth byte) (int, readRowsFunc) { + var read readRowsFunc + + if repetitionDepth == 0 { + read = func(r *rowGroupRows, rows []Row, _ byte) (int, error) { + // When the repetition depth is zero, we know that there is exactly + // one value per row for this column, and therefore we can consume + // as many values as there are rows to fill. + col := &r.columns[columnIndex] + buf := r.buffer(columnIndex) + + for i := range rows { + if col.offset == col.length { + n, err := col.reader.ReadValues(buf) + col.offset = 0 + col.length = int32(n) + if n == 0 && err != nil { + return 0, err + } + } + + rows[i] = append(rows[i], buf[col.offset]) + col.offset++ + } + + return len(rows), nil + } + } else { + read = func(r *rowGroupRows, rows []Row, repetitionLevel byte) (int, error) { + // When the repetition depth is not zero, we know that we will be + // called with a single row as input. We attempt to read at most one + // value of a single row and return to the caller. + col := &r.columns[columnIndex] + buf := r.buffer(columnIndex) + + if col.offset == col.length { + n, err := col.reader.ReadValues(buf) + col.offset = 0 + col.length = int32(n) + if n == 0 && err != nil { + return 0, err + } + } + + if buf[col.offset].repetitionLevel != repetitionLevel { + return 0, nil + } + + rows[0] = append(rows[0], buf[col.offset]) + col.offset++ + return 1, nil + } + } + + return columnIndex + 1, read +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_index.go b/vendor/github.com/parquet-go/parquet-go/column_index.go new file mode 100644 index 00000000000..0ddc1842df3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_index.go @@ -0,0 +1,754 @@ +package parquet + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/format" +) + +type ColumnIndex interface { + // NumPages returns the number of paged in the column index. + NumPages() int + + // Returns the number of null values in the page at the given index. + NullCount(int) int64 + + // Tells whether the page at the given index contains null values only. + NullPage(int) bool + + // PageIndex return min/max bounds for the page at the given index in the + // column. + MinValue(int) Value + MaxValue(int) Value + + // IsAscending returns true if the column index min/max values are sorted + // in ascending order (based on the ordering rules of the column's logical + // type). + IsAscending() bool + + // IsDescending returns true if the column index min/max values are sorted + // in descending order (based on the ordering rules of the column's logical + // type). + IsDescending() bool +} + +// NewColumnIndex constructs a ColumnIndex instance from the given parquet +// format column index. The kind argument configures the type of values +func NewColumnIndex(kind Kind, index *format.ColumnIndex) ColumnIndex { + return &formatColumnIndex{ + kind: kind, + index: index, + } +} + +type formatColumnIndex struct { + kind Kind + index *format.ColumnIndex +} + +func (f *formatColumnIndex) NumPages() int { + return len(f.index.MinValues) +} + +func (f *formatColumnIndex) NullCount(i int) int64 { + if len(f.index.NullCounts) > 0 { + return f.index.NullCounts[i] + } + return 0 +} + +func (f *formatColumnIndex) NullPage(i int) bool { + return len(f.index.NullPages) > 0 && f.index.NullPages[i] +} + +func (f *formatColumnIndex) MinValue(i int) Value { + if f.NullPage(i) { + return Value{} + } + return f.kind.Value(f.index.MinValues[i]) +} + +func (f *formatColumnIndex) MaxValue(i int) Value { + if f.NullPage(i) { + return Value{} + } + return f.kind.Value(f.index.MaxValues[i]) +} + +func (f *formatColumnIndex) IsAscending() bool { + return f.index.BoundaryOrder == format.Ascending +} + +func (f *formatColumnIndex) IsDescending() bool { + return f.index.BoundaryOrder == format.Descending +} + +type FileColumnIndex struct { + index *format.ColumnIndex + kind Kind +} + +func (i *FileColumnIndex) NumPages() int { + return len(i.index.NullPages) +} + +func (i *FileColumnIndex) NullCount(j int) int64 { + if len(i.index.NullCounts) > 0 { + return i.index.NullCounts[j] + } + return 0 +} + +func (i *FileColumnIndex) NullPage(j int) bool { + return isNullPage(j, i.index) +} + +func (i *FileColumnIndex) MinValue(j int) Value { + if i.NullPage(j) { + return Value{} + } + return i.makeValue(i.index.MinValues[j]) +} + +func (i *FileColumnIndex) MaxValue(j int) Value { + if i.NullPage(j) { + return Value{} + } + return i.makeValue(i.index.MaxValues[j]) +} + +func (i *FileColumnIndex) IsAscending() bool { + return i.index.BoundaryOrder == format.Ascending +} + +func (i *FileColumnIndex) IsDescending() bool { + return i.index.BoundaryOrder == format.Descending +} + +func (i *FileColumnIndex) makeValue(b []byte) Value { + return i.kind.Value(b) +} + +func isNullPage(j int, index *format.ColumnIndex) bool { + return len(index.NullPages) > 0 && index.NullPages[j] +} + +type emptyColumnIndex struct{} + +func (emptyColumnIndex) NumPages() int { return 0 } +func (emptyColumnIndex) NullCount(int) int64 { return 0 } +func (emptyColumnIndex) NullPage(int) bool { return false } +func (emptyColumnIndex) MinValue(int) Value { return Value{} } +func (emptyColumnIndex) MaxValue(int) Value { return Value{} } +func (emptyColumnIndex) IsAscending() bool { return false } +func (emptyColumnIndex) IsDescending() bool { return false } + +type booleanColumnIndex struct{ page *booleanPage } + +func (i booleanColumnIndex) NumPages() int { return 1 } +func (i booleanColumnIndex) NullCount(int) int64 { return 0 } +func (i booleanColumnIndex) NullPage(int) bool { return false } +func (i booleanColumnIndex) MinValue(int) Value { return makeValueBoolean(i.page.min()) } +func (i booleanColumnIndex) MaxValue(int) Value { return makeValueBoolean(i.page.max()) } +func (i booleanColumnIndex) IsAscending() bool { return false } +func (i booleanColumnIndex) IsDescending() bool { return false } + +type int32ColumnIndex struct{ page *int32Page } + +func (i int32ColumnIndex) NumPages() int { return 1 } +func (i int32ColumnIndex) NullCount(int) int64 { return 0 } +func (i int32ColumnIndex) NullPage(int) bool { return false } +func (i int32ColumnIndex) MinValue(int) Value { return makeValueInt32(i.page.min()) } +func (i int32ColumnIndex) MaxValue(int) Value { return makeValueInt32(i.page.max()) } +func (i int32ColumnIndex) IsAscending() bool { return false } +func (i int32ColumnIndex) IsDescending() bool { return false } + +type int64ColumnIndex struct{ page *int64Page } + +func (i int64ColumnIndex) NumPages() int { return 1 } +func (i int64ColumnIndex) NullCount(int) int64 { return 0 } +func (i int64ColumnIndex) NullPage(int) bool { return false } +func (i int64ColumnIndex) MinValue(int) Value { return makeValueInt64(i.page.min()) } +func (i int64ColumnIndex) MaxValue(int) Value { return makeValueInt64(i.page.max()) } +func (i int64ColumnIndex) IsAscending() bool { return false } +func (i int64ColumnIndex) IsDescending() bool { return false } + +type int96ColumnIndex struct{ page *int96Page } + +func (i int96ColumnIndex) NumPages() int { return 1 } +func (i int96ColumnIndex) NullCount(int) int64 { return 0 } +func (i int96ColumnIndex) NullPage(int) bool { return false } +func (i int96ColumnIndex) MinValue(int) Value { return makeValueInt96(i.page.min()) } +func (i int96ColumnIndex) MaxValue(int) Value { return makeValueInt96(i.page.max()) } +func (i int96ColumnIndex) IsAscending() bool { return false } +func (i int96ColumnIndex) IsDescending() bool { return false } + +type floatColumnIndex struct{ page *floatPage } + +func (i floatColumnIndex) NumPages() int { return 1 } +func (i floatColumnIndex) NullCount(int) int64 { return 0 } +func (i floatColumnIndex) NullPage(int) bool { return false } +func (i floatColumnIndex) MinValue(int) Value { return makeValueFloat(i.page.min()) } +func (i floatColumnIndex) MaxValue(int) Value { return makeValueFloat(i.page.max()) } +func (i floatColumnIndex) IsAscending() bool { return false } +func (i floatColumnIndex) IsDescending() bool { return false } + +type doubleColumnIndex struct{ page *doublePage } + +func (i doubleColumnIndex) NumPages() int { return 1 } +func (i doubleColumnIndex) NullCount(int) int64 { return 0 } +func (i doubleColumnIndex) NullPage(int) bool { return false } +func (i doubleColumnIndex) MinValue(int) Value { return makeValueDouble(i.page.min()) } +func (i doubleColumnIndex) MaxValue(int) Value { return makeValueDouble(i.page.max()) } +func (i doubleColumnIndex) IsAscending() bool { return false } +func (i doubleColumnIndex) IsDescending() bool { return false } + +type byteArrayColumnIndex struct{ page *byteArrayPage } + +func (i byteArrayColumnIndex) NumPages() int { return 1 } +func (i byteArrayColumnIndex) NullCount(int) int64 { return 0 } +func (i byteArrayColumnIndex) NullPage(int) bool { return false } +func (i byteArrayColumnIndex) MinValue(int) Value { return makeValueBytes(ByteArray, i.page.min()) } +func (i byteArrayColumnIndex) MaxValue(int) Value { return makeValueBytes(ByteArray, i.page.max()) } +func (i byteArrayColumnIndex) IsAscending() bool { return false } +func (i byteArrayColumnIndex) IsDescending() bool { return false } + +type fixedLenByteArrayColumnIndex struct{ page *fixedLenByteArrayPage } + +func (i fixedLenByteArrayColumnIndex) NumPages() int { return 1 } +func (i fixedLenByteArrayColumnIndex) NullCount(int) int64 { return 0 } +func (i fixedLenByteArrayColumnIndex) NullPage(int) bool { return false } +func (i fixedLenByteArrayColumnIndex) MinValue(int) Value { + return makeValueBytes(FixedLenByteArray, i.page.min()) +} +func (i fixedLenByteArrayColumnIndex) MaxValue(int) Value { + return makeValueBytes(FixedLenByteArray, i.page.max()) +} +func (i fixedLenByteArrayColumnIndex) IsAscending() bool { return false } +func (i fixedLenByteArrayColumnIndex) IsDescending() bool { return false } + +type uint32ColumnIndex struct{ page *uint32Page } + +func (i uint32ColumnIndex) NumPages() int { return 1 } +func (i uint32ColumnIndex) NullCount(int) int64 { return 0 } +func (i uint32ColumnIndex) NullPage(int) bool { return false } +func (i uint32ColumnIndex) MinValue(int) Value { return makeValueUint32(i.page.min()) } +func (i uint32ColumnIndex) MaxValue(int) Value { return makeValueUint32(i.page.max()) } +func (i uint32ColumnIndex) IsAscending() bool { return false } +func (i uint32ColumnIndex) IsDescending() bool { return false } + +type uint64ColumnIndex struct{ page *uint64Page } + +func (i uint64ColumnIndex) NumPages() int { return 1 } +func (i uint64ColumnIndex) NullCount(int) int64 { return 0 } +func (i uint64ColumnIndex) NullPage(int) bool { return false } +func (i uint64ColumnIndex) MinValue(int) Value { return makeValueUint64(i.page.min()) } +func (i uint64ColumnIndex) MaxValue(int) Value { return makeValueUint64(i.page.max()) } +func (i uint64ColumnIndex) IsAscending() bool { return false } +func (i uint64ColumnIndex) IsDescending() bool { return false } + +type be128ColumnIndex struct{ page *be128Page } + +func (i be128ColumnIndex) NumPages() int { return 1 } +func (i be128ColumnIndex) NullCount(int) int64 { return 0 } +func (i be128ColumnIndex) NullPage(int) bool { return false } +func (i be128ColumnIndex) MinValue(int) Value { return makeValueBytes(FixedLenByteArray, i.page.min()) } +func (i be128ColumnIndex) MaxValue(int) Value { return makeValueBytes(FixedLenByteArray, i.page.max()) } +func (i be128ColumnIndex) IsAscending() bool { return false } +func (i be128ColumnIndex) IsDescending() bool { return false } + +// The ColumnIndexer interface is implemented by types that support generating +// parquet column indexes. +// +// The package does not export any types that implement this interface, programs +// must call NewColumnIndexer on a Type instance to construct column indexers. +type ColumnIndexer interface { + // Resets the column indexer state. + Reset() + + // Add a page to the column indexer. + IndexPage(numValues, numNulls int64, min, max Value) + + // Generates a format.ColumnIndex value from the current state of the + // column indexer. + // + // The returned value may reference internal buffers, in which case the + // values remain valid until the next call to IndexPage or Reset on the + // column indexer. + ColumnIndex() format.ColumnIndex +} + +type baseColumnIndexer struct { + nullPages []bool + nullCounts []int64 +} + +func (i *baseColumnIndexer) reset() { + i.nullPages = i.nullPages[:0] + i.nullCounts = i.nullCounts[:0] +} + +func (i *baseColumnIndexer) observe(numValues, numNulls int64) { + i.nullPages = append(i.nullPages, numValues == numNulls) + i.nullCounts = append(i.nullCounts, numNulls) +} + +func (i *baseColumnIndexer) columnIndex(minValues, maxValues [][]byte, minOrder, maxOrder int) format.ColumnIndex { + nullPages := make([]bool, len(i.nullPages)) + copy(nullPages, i.nullPages) + nullCounts := make([]int64, len(i.nullCounts)) + copy(nullCounts, i.nullCounts) + return format.ColumnIndex{ + NullPages: nullPages, + NullCounts: nullCounts, + MinValues: minValues, + MaxValues: maxValues, + BoundaryOrder: boundaryOrderOf(minOrder, maxOrder), + } +} + +type booleanColumnIndexer struct { + baseColumnIndexer + minValues []bool + maxValues []bool +} + +func newBooleanColumnIndexer() *booleanColumnIndexer { + return new(booleanColumnIndexer) +} + +func (i *booleanColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *booleanColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.boolean()) + i.maxValues = append(i.maxValues, max.boolean()) +} + +func (i *booleanColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(unsafecast.Slice[byte](i.minValues), 1), + splitFixedLenByteArrays(unsafecast.Slice[byte](i.maxValues), 1), + orderOfBool(i.minValues), + orderOfBool(i.maxValues), + ) +} + +type int32ColumnIndexer struct { + baseColumnIndexer + minValues []int32 + maxValues []int32 +} + +func newInt32ColumnIndexer() *int32ColumnIndexer { + return new(int32ColumnIndexer) +} + +func (i *int32ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *int32ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.int32()) + i.maxValues = append(i.maxValues, max.int32()) +} + +func (i *int32ColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexInt32Values(i.minValues), 4), + splitFixedLenByteArrays(columnIndexInt32Values(i.maxValues), 4), + orderOfInt32(i.minValues), + orderOfInt32(i.maxValues), + ) +} + +type int64ColumnIndexer struct { + baseColumnIndexer + minValues []int64 + maxValues []int64 +} + +func newInt64ColumnIndexer() *int64ColumnIndexer { + return new(int64ColumnIndexer) +} + +func (i *int64ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *int64ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.int64()) + i.maxValues = append(i.maxValues, max.int64()) +} + +func (i *int64ColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexInt64Values(i.minValues), 8), + splitFixedLenByteArrays(columnIndexInt64Values(i.maxValues), 8), + orderOfInt64(i.minValues), + orderOfInt64(i.maxValues), + ) +} + +type int96ColumnIndexer struct { + baseColumnIndexer + minValues []deprecated.Int96 + maxValues []deprecated.Int96 +} + +func newInt96ColumnIndexer() *int96ColumnIndexer { + return new(int96ColumnIndexer) +} + +func (i *int96ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *int96ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.Int96()) + i.maxValues = append(i.maxValues, max.Int96()) +} + +func (i *int96ColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexInt96Values(i.minValues), 12), + splitFixedLenByteArrays(columnIndexInt96Values(i.maxValues), 12), + deprecated.OrderOfInt96(i.minValues), + deprecated.OrderOfInt96(i.maxValues), + ) +} + +type floatColumnIndexer struct { + baseColumnIndexer + minValues []float32 + maxValues []float32 +} + +func newFloatColumnIndexer() *floatColumnIndexer { + return new(floatColumnIndexer) +} + +func (i *floatColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *floatColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.float()) + i.maxValues = append(i.maxValues, max.float()) +} + +func (i *floatColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexFloatValues(i.minValues), 4), + splitFixedLenByteArrays(columnIndexFloatValues(i.maxValues), 4), + orderOfFloat32(i.minValues), + orderOfFloat32(i.maxValues), + ) +} + +type doubleColumnIndexer struct { + baseColumnIndexer + minValues []float64 + maxValues []float64 +} + +func newDoubleColumnIndexer() *doubleColumnIndexer { + return new(doubleColumnIndexer) +} + +func (i *doubleColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *doubleColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.double()) + i.maxValues = append(i.maxValues, max.double()) +} + +func (i *doubleColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexDoubleValues(i.minValues), 8), + splitFixedLenByteArrays(columnIndexDoubleValues(i.maxValues), 8), + orderOfFloat64(i.minValues), + orderOfFloat64(i.maxValues), + ) +} + +type byteArrayColumnIndexer struct { + baseColumnIndexer + sizeLimit int + minValues []byte + maxValues []byte +} + +func newByteArrayColumnIndexer(sizeLimit int) *byteArrayColumnIndexer { + return &byteArrayColumnIndexer{sizeLimit: sizeLimit} +} + +func (i *byteArrayColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *byteArrayColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = plain.AppendByteArray(i.minValues, min.byteArray()) + i.maxValues = plain.AppendByteArray(i.maxValues, max.byteArray()) +} + +func (i *byteArrayColumnIndexer) ColumnIndex() format.ColumnIndex { + minValues := splitByteArrays(i.minValues) + maxValues := splitByteArrays(i.maxValues) + if sizeLimit := i.sizeLimit; sizeLimit > 0 { + for i, v := range minValues { + minValues[i] = truncateLargeMinByteArrayValue(v, sizeLimit) + } + for i, v := range maxValues { + maxValues[i] = truncateLargeMaxByteArrayValue(v, sizeLimit) + } + } + return i.columnIndex( + minValues, + maxValues, + orderOfBytes(minValues), + orderOfBytes(maxValues), + ) +} + +type fixedLenByteArrayColumnIndexer struct { + baseColumnIndexer + size int + sizeLimit int + minValues []byte + maxValues []byte +} + +func newFixedLenByteArrayColumnIndexer(size, sizeLimit int) *fixedLenByteArrayColumnIndexer { + return &fixedLenByteArrayColumnIndexer{ + size: size, + sizeLimit: sizeLimit, + } +} + +func (i *fixedLenByteArrayColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *fixedLenByteArrayColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.byteArray()...) + i.maxValues = append(i.maxValues, max.byteArray()...) +} + +func (i *fixedLenByteArrayColumnIndexer) ColumnIndex() format.ColumnIndex { + minValues := splitFixedLenByteArrays(i.minValues, i.size) + maxValues := splitFixedLenByteArrays(i.maxValues, i.size) + if sizeLimit := i.sizeLimit; sizeLimit > 0 { + for i, v := range minValues { + minValues[i] = truncateLargeMinByteArrayValue(v, sizeLimit) + } + for i, v := range maxValues { + maxValues[i] = truncateLargeMaxByteArrayValue(v, sizeLimit) + } + } + return i.columnIndex( + minValues, + maxValues, + orderOfBytes(minValues), + orderOfBytes(maxValues), + ) +} + +type uint32ColumnIndexer struct { + baseColumnIndexer + minValues []uint32 + maxValues []uint32 +} + +func newUint32ColumnIndexer() *uint32ColumnIndexer { + return new(uint32ColumnIndexer) +} + +func (i *uint32ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *uint32ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.uint32()) + i.maxValues = append(i.maxValues, max.uint32()) +} + +func (i *uint32ColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexUint32Values(i.minValues), 4), + splitFixedLenByteArrays(columnIndexUint32Values(i.maxValues), 4), + orderOfUint32(i.minValues), + orderOfUint32(i.maxValues), + ) +} + +type uint64ColumnIndexer struct { + baseColumnIndexer + minValues []uint64 + maxValues []uint64 +} + +func newUint64ColumnIndexer() *uint64ColumnIndexer { + return new(uint64ColumnIndexer) +} + +func (i *uint64ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *uint64ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + i.minValues = append(i.minValues, min.uint64()) + i.maxValues = append(i.maxValues, max.uint64()) +} + +func (i *uint64ColumnIndexer) ColumnIndex() format.ColumnIndex { + return i.columnIndex( + splitFixedLenByteArrays(columnIndexUint64Values(i.minValues), 8), + splitFixedLenByteArrays(columnIndexUint64Values(i.maxValues), 8), + orderOfUint64(i.minValues), + orderOfUint64(i.maxValues), + ) +} + +type be128ColumnIndexer struct { + baseColumnIndexer + minValues [][16]byte + maxValues [][16]byte +} + +func newBE128ColumnIndexer() *be128ColumnIndexer { + return new(be128ColumnIndexer) +} + +func (i *be128ColumnIndexer) Reset() { + i.reset() + i.minValues = i.minValues[:0] + i.maxValues = i.maxValues[:0] +} + +func (i *be128ColumnIndexer) IndexPage(numValues, numNulls int64, min, max Value) { + i.observe(numValues, numNulls) + if !min.IsNull() { + i.minValues = append(i.minValues, *(*[16]byte)(min.byteArray())) + } + if !max.IsNull() { + i.maxValues = append(i.maxValues, *(*[16]byte)(max.byteArray())) + } +} + +func (i *be128ColumnIndexer) ColumnIndex() format.ColumnIndex { + minValues := splitFixedLenByteArrays(unsafecast.Slice[byte](i.minValues), 16) + maxValues := splitFixedLenByteArrays(unsafecast.Slice[byte](i.maxValues), 16) + return i.columnIndex( + minValues, + maxValues, + orderOfBytes(minValues), + orderOfBytes(maxValues), + ) +} + +func truncateLargeMinByteArrayValue(value []byte, sizeLimit int) []byte { + if len(value) > sizeLimit { + value = value[:sizeLimit] + } + return value +} + +// truncateLargeMaxByteArrayValue truncates the given byte array to the given size limit. +// If the given byte array is truncated, it is incremented by 1 in place. +func truncateLargeMaxByteArrayValue(value []byte, sizeLimit int) []byte { + if len(value) > sizeLimit { + value = value[:sizeLimit] + incrementByteArrayInplace(value) + } + return value +} + +// incrementByteArray increments the given byte array by 1. +// Reference: https://github.com/apache/parquet-java/blob/master/parquet-column/src/main/java/org/apache/parquet/internal/column/columnindex/BinaryTruncator.java#L124 +func incrementByteArrayInplace(value []byte) { + for i := len(value) - 1; i >= 0; i-- { + value[i]++ + if value[i] != 0 { // Did not overflow: 0xFF -> 0x00 + return + } + } + // Fully overflowed, so restore all to 0xFF + for i := range value { + value[i] = 0xFF + } +} + +func splitByteArrays(data []byte) [][]byte { + length := 0 + plain.RangeByteArray(data, func([]byte) error { + length++ + return nil + }) + buffer := make([]byte, 0, len(data)-(4*length)) + values := make([][]byte, 0, length) + plain.RangeByteArray(data, func(value []byte) error { + offset := len(buffer) + buffer = append(buffer, value...) + values = append(values, buffer[offset:]) + return nil + }) + return values +} + +func splitFixedLenByteArrays(data []byte, size int) [][]byte { + data = copyBytes(data) + values := make([][]byte, len(data)/size) + for i := range values { + j := (i + 0) * size + k := (i + 1) * size + values[i] = data[j:k:k] + } + return values +} + +func boundaryOrderOf(minOrder, maxOrder int) format.BoundaryOrder { + if minOrder == maxOrder { + switch { + case minOrder > 0: + return format.Ascending + case minOrder < 0: + return format.Descending + } + } + return format.Unordered +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_index_be.go b/vendor/github.com/parquet-go/parquet-go/column_index_be.go new file mode 100644 index 00000000000..ace82ae3207 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_index_be.go @@ -0,0 +1,84 @@ +// This file gets added on all the big-endian CPU architectures. + +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 + +package parquet + +import ( + "encoding/binary" + "math" + + "github.com/parquet-go/parquet-go/deprecated" +) + +func columnIndexInt32Values(values []int32) []byte { + buf := make([]byte, len(values)*4) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint32(buf[idx:(4+idx)], uint32(values[k])) + idx += 4 + } + return buf +} + +func columnIndexInt64Values(values []int64) []byte { + buf := make([]byte, len(values)*8) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint64(buf[idx:(8+idx)], uint64(values[k])) + idx += 8 + } + return buf +} + +func columnIndexInt96Values(values []deprecated.Int96) []byte { + buf := make([]byte, len(values)*12) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint32(buf[idx:(4+idx)], uint32(values[k][0])) + binary.LittleEndian.PutUint32(buf[(4+idx):(8+idx)], uint32(values[k][1])) + binary.LittleEndian.PutUint32(buf[(8+idx):(12+idx)], uint32(values[k][2])) + idx += 12 + } + return buf +} + +func columnIndexFloatValues(values []float32) []byte { + buf := make([]byte, len(values)*4) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint32(buf[idx:(4+idx)], math.Float32bits(values[k])) + idx += 4 + } + return buf +} + +func columnIndexDoubleValues(values []float64) []byte { + buf := make([]byte, len(values)*8) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint64(buf[idx:(8+idx)], math.Float64bits(values[k])) + idx += 8 + } + return buf +} + +func columnIndexUint32Values(values []uint32) []byte { + buf := make([]byte, len(values)*4) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint32(buf[idx:(4+idx)], values[k]) + idx += 4 + } + return buf +} + +func columnIndexUint64Values(values []uint64) []byte { + buf := make([]byte, len(values)*8) + idx := 0 + for k := range len(values) { + binary.LittleEndian.PutUint64(buf[idx:(8+idx)], values[k]) + idx += 8 + } + return buf +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_index_le.go b/vendor/github.com/parquet-go/parquet-go/column_index_le.go new file mode 100644 index 00000000000..ff842efbb90 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_index_le.go @@ -0,0 +1,38 @@ +// This file gets added on all the little-endian CPU architectures. + +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm + +package parquet + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" +) + +func columnIndexInt32Values(values []int32) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexInt64Values(values []int64) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexInt96Values(values []deprecated.Int96) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexFloatValues(values []float32) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexDoubleValues(values []float64) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexUint32Values(values []uint32) []byte { + return unsafecast.Slice[byte](values) +} + +func columnIndexUint64Values(values []uint64) []byte { + return unsafecast.Slice[byte](values) +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_mapping.go b/vendor/github.com/parquet-go/parquet-go/column_mapping.go new file mode 100644 index 00000000000..a9345adb151 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_mapping.go @@ -0,0 +1,88 @@ +package parquet + +// LeafColumn is a struct type representing leaf columns of a parquet schema. +type LeafColumn struct { + Node Node + Path []string + ColumnIndex int + MaxRepetitionLevel int + MaxDefinitionLevel int +} + +func columnMappingOf(schema Node) (mapping columnMappingGroup, columns [][]string) { + mapping = make(columnMappingGroup) + columns = make([][]string, 0, 16) + + forEachLeafColumnOf(schema, func(leaf leafColumn) { + path := make(columnPath, len(leaf.path)) + copy(path, leaf.path) + columns = append(columns, path) + + group := mapping + for len(path) > 1 { + columnName := path[0] + g, ok := group[columnName].(columnMappingGroup) + if !ok { + g = make(columnMappingGroup) + group[columnName] = g + } + group, path = g, path[1:] + } + + leaf.path = path // use the copy + group[path[0]] = &columnMappingLeaf{column: leaf} + }) + + return mapping, columns +} + +type columnMapping interface { + lookup(path columnPath) leafColumn +} + +type columnMappingGroup map[string]columnMapping + +func (group columnMappingGroup) lookup(path columnPath) leafColumn { + if len(path) > 0 { + c, ok := group[path[0]] + if ok { + return c.lookup(path[1:]) + } + } + return leafColumn{columnIndex: -1} +} + +func (group columnMappingGroup) lookupClosest(path columnPath) leafColumn { + for len(path) > 0 { + g, ok := group[path[0]].(columnMappingGroup) + if ok { + group, path = g, path[1:] + } else { + firstName := "" + firstLeaf := (*columnMappingLeaf)(nil) + for name, child := range group { + if leaf, ok := child.(*columnMappingLeaf); ok { + if firstLeaf == nil || name < firstName { + firstName, firstLeaf = name, leaf + } + } + } + if firstLeaf != nil { + return firstLeaf.column + } + break + } + } + return leafColumn{columnIndex: -1} +} + +type columnMappingLeaf struct { + column leafColumn +} + +func (leaf *columnMappingLeaf) lookup(path columnPath) leafColumn { + if len(path) == 0 { + return leaf.column + } + return leafColumn{columnIndex: -1} +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_path.go b/vendor/github.com/parquet-go/parquet-go/column_path.go new file mode 100644 index 00000000000..d32fa7cd157 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/column_path.go @@ -0,0 +1,108 @@ +package parquet + +import ( + "slices" + "strings" +) + +type columnPath []string + +func (path columnPath) append(names ...string) columnPath { + return slices.Concat(path, names) +} + +func (path columnPath) equal(other columnPath) bool { + return stringsAreEqual(path, other) +} + +func (path columnPath) less(other columnPath) bool { + return stringsAreOrdered(path, other) +} + +func (path columnPath) String() string { + return strings.Join(path, ".") +} + +func stringsAreEqual(strings1, strings2 []string) bool { + if len(strings1) != len(strings2) { + return false + } + + for i := range strings1 { + if strings1[i] != strings2[i] { + return false + } + } + + return true +} + +func stringsAreOrdered(strings1, strings2 []string) bool { + n := min(len(strings1), len(strings2)) + + for i := range n { + if strings1[i] >= strings2[i] { + return false + } + } + + return len(strings1) <= len(strings2) +} + +type leafColumn struct { + node Node + path columnPath + maxRepetitionLevel byte + maxDefinitionLevel byte + columnIndex int16 +} + +func forEachLeafColumnOf(node Node, do func(leafColumn)) { + forEachLeafColumn(node, nil, 0, 0, 0, do) +} + +func forEachLeafColumn(node Node, path columnPath, columnIndex, maxRepetitionLevel, maxDefinitionLevel int, do func(leafColumn)) int { + switch { + case node.Optional(): + maxDefinitionLevel++ + case node.Repeated(): + maxRepetitionLevel++ + maxDefinitionLevel++ + } + + if node.Leaf() { + do(leafColumn{ + node: node, + path: path, + maxRepetitionLevel: makeRepetitionLevel(maxRepetitionLevel), + maxDefinitionLevel: makeDefinitionLevel(maxDefinitionLevel), + columnIndex: makeColumnIndex(columnIndex), + }) + return columnIndex + 1 + } + + for _, field := range node.Fields() { + columnIndex = forEachLeafColumn( + field, + path.append(field.Name()), + columnIndex, + maxRepetitionLevel, + maxDefinitionLevel, + do, + ) + } + + return columnIndex +} + +func lookupColumnPath(node Node, path columnPath) Node { + for node != nil && len(path) > 0 { + node = fieldByName(node, path[0]) + path = path[1:] + } + return node +} + +func hasColumnPath(node Node, path columnPath) bool { + return lookupColumnPath(node, path) != nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/compare.go b/vendor/github.com/parquet-go/parquet-go/compare.go new file mode 100644 index 00000000000..7ac5c532317 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compare.go @@ -0,0 +1,356 @@ +package parquet + +import ( + "encoding/binary" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// CompareDescending constructs a comparison function which inverses the order +// of values. +// +//go:noinline +func CompareDescending(cmp func(Value, Value) int) func(Value, Value) int { + return func(a, b Value) int { return -cmp(a, b) } +} + +// CompareNullsFirst constructs a comparison function which assumes that null +// values are smaller than all other values. +// +//go:noinline +func CompareNullsFirst(cmp func(Value, Value) int) func(Value, Value) int { + return func(a, b Value) int { + switch { + case a.IsNull(): + if b.IsNull() { + return 0 + } + return -1 + case b.IsNull(): + return +1 + default: + return cmp(a, b) + } + } +} + +// CompareNullsLast constructs a comparison function which assumes that null +// values are greater than all other values. +// +//go:noinline +func CompareNullsLast(cmp func(Value, Value) int) func(Value, Value) int { + return func(a, b Value) int { + switch { + case a.IsNull(): + if b.IsNull() { + return 0 + } + return +1 + case b.IsNull(): + return -1 + default: + return cmp(a, b) + } + } +} + +func compareBool(v1, v2 bool) int { + switch { + case !v1 && v2: + return -1 + case v1 && !v2: + return +1 + default: + return 0 + } +} + +func compareInt32(v1, v2 int32) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareInt64(v1, v2 int64) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareInt96(v1, v2 deprecated.Int96) int { + switch { + case v1.Less(v2): + return -1 + case v2.Less(v1): + return +1 + default: + return 0 + } +} + +func compareFloat32(v1, v2 float32) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareFloat64(v1, v2 float64) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareUint32(v1, v2 uint32) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareUint64(v1, v2 uint64) int { + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + default: + return 0 + } +} + +func compareBE128(v1, v2 *[16]byte) int { + x := binary.BigEndian.Uint64(v1[:8]) + y := binary.BigEndian.Uint64(v2[:8]) + switch { + case x < y: + return -1 + case x > y: + return +1 + } + x = binary.BigEndian.Uint64(v1[8:]) + y = binary.BigEndian.Uint64(v2[8:]) + switch { + case x < y: + return -1 + case x > y: + return +1 + default: + return 0 + } +} + +func lessBE128(v1, v2 *[16]byte) bool { + x := binary.BigEndian.Uint64(v1[:8]) + y := binary.BigEndian.Uint64(v2[:8]) + switch { + case x < y: + return true + case x > y: + return false + } + x = binary.BigEndian.Uint64(v1[8:]) + y = binary.BigEndian.Uint64(v2[8:]) + return x < y +} + +func compareRowsFuncOf(schema *Schema, sortingColumns []SortingColumn) func(Row, Row) int { + leafColumns := make([]leafColumn, len(sortingColumns)) + canCompareRows := true + + forEachLeafColumnOf(schema, func(leaf leafColumn) { + if leaf.maxRepetitionLevel > 0 { + canCompareRows = false + } + + if sortingIndex := searchSortingColumn(sortingColumns, leaf.path); sortingIndex < len(sortingColumns) { + leafColumns[sortingIndex] = leaf + + if leaf.maxDefinitionLevel > 0 { + canCompareRows = false + } + } + }) + + // This is an optimization for the common case where rows + // are sorted by non-optional, non-repeated columns. + // + // The sort function can make the assumption that it will + // find the column value at the current column index, and + // does not need to scan the rows looking for values with + // a matching column index. + if canCompareRows { + return compareRowsFuncOfColumnIndexes(leafColumns, sortingColumns) + } + + return compareRowsFuncOfColumnValues(leafColumns, sortingColumns) +} + +func compareRowsUnordered(Row, Row) int { return 0 } + +//go:noinline +func compareRowsFuncOfIndexColumns(compareFuncs []func(Row, Row) int) func(Row, Row) int { + return func(row1, row2 Row) int { + for _, compare := range compareFuncs { + if cmp := compare(row1, row2); cmp != 0 { + return cmp + } + } + return 0 + } +} + +//go:noinline +func compareRowsFuncOfIndexAscending(columnIndex int16, typ Type) func(Row, Row) int { + return func(row1, row2 Row) int { return typ.Compare(row1[columnIndex], row2[columnIndex]) } +} + +//go:noinline +func compareRowsFuncOfIndexDescending(columnIndex int16, typ Type) func(Row, Row) int { + return func(row1, row2 Row) int { return -typ.Compare(row1[columnIndex], row2[columnIndex]) } +} + +//go:noinline +func compareRowsFuncOfColumnIndexes(leafColumns []leafColumn, sortingColumns []SortingColumn) func(Row, Row) int { + compareFuncs := make([]func(Row, Row) int, len(sortingColumns)) + + for sortingIndex, sortingColumn := range sortingColumns { + leaf := leafColumns[sortingIndex] + typ := leaf.node.Type() + + if sortingColumn.Descending() { + compareFuncs[sortingIndex] = compareRowsFuncOfIndexDescending(leaf.columnIndex, typ) + } else { + compareFuncs[sortingIndex] = compareRowsFuncOfIndexAscending(leaf.columnIndex, typ) + } + } + + switch len(compareFuncs) { + case 0: + return compareRowsUnordered + case 1: + return compareFuncs[0] + default: + return compareRowsFuncOfIndexColumns(compareFuncs) + } +} + +var columnPool memory.Pool[[][2]int32] + +//go:noinline +func compareRowsFuncOfColumnValues(leafColumns []leafColumn, sortingColumns []SortingColumn) func(Row, Row) int { + highestColumnIndex := int16(0) + columnIndexes := make([]int16, len(sortingColumns)) + compareFuncs := make([]func(Value, Value) int, len(sortingColumns)) + + for sortingIndex, sortingColumn := range sortingColumns { + leaf := leafColumns[sortingIndex] + compare := leaf.node.Type().Compare + + if sortingColumn.Descending() { + compare = CompareDescending(compare) + } + + if leaf.maxDefinitionLevel > 0 { + if sortingColumn.NullsFirst() { + compare = CompareNullsFirst(compare) + } else { + compare = CompareNullsLast(compare) + } + } + + columnIndexes[sortingIndex] = leaf.columnIndex + compareFuncs[sortingIndex] = compare + + if leaf.columnIndex > highestColumnIndex { + highestColumnIndex = leaf.columnIndex + } + } + + return func(row1, row2 Row) int { + columns1 := columnPool.Get(compareColumnNew, compareColumnReset) + columns2 := columnPool.Get(compareColumnNew, compareColumnReset) + defer columnPool.Put(columns1) + defer columnPool.Put(columns2) + + i1 := 0 + i2 := 0 + + for columnIndex := int16(0); columnIndex <= highestColumnIndex; columnIndex++ { + j1 := i1 + 1 + j2 := i2 + 1 + + for j1 < len(row1) && row1[j1].columnIndex == ^columnIndex { + j1++ + } + + for j2 < len(row2) && row2[j2].columnIndex == ^columnIndex { + j2++ + } + + *columns1 = append(*columns1, [2]int32{int32(i1), int32(j1)}) + *columns2 = append(*columns2, [2]int32{int32(i2), int32(j2)}) + i1 = j1 + i2 = j2 + } + + for i, compare := range compareFuncs { + columnIndex := columnIndexes[i] + offsets1 := (*columns1)[columnIndex] + offsets2 := (*columns2)[columnIndex] + values1 := row1[offsets1[0]:offsets1[1]:offsets1[1]] + values2 := row2[offsets2[0]:offsets2[1]:offsets2[1]] + i1 := 0 + i2 := 0 + + for i1 < len(values1) && i2 < len(values2) { + if cmp := compare(values1[i1], values2[i2]); cmp != 0 { + return cmp + } + i1++ + i2++ + } + + if i1 < len(values1) { + return +1 + } + if i2 < len(values2) { + return -1 + } + } + return 0 + } +} + +func compareColumnNew() *[][2]int32 { + s := make([][2]int32, 0, 128) + return &s +} + +func compareColumnReset(c *[][2]int32) { + *c = (*c)[:0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress.go b/vendor/github.com/parquet-go/parquet-go/compress.go new file mode 100644 index 00000000000..c2b2eb130fb --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress.go @@ -0,0 +1,96 @@ +package parquet + +import ( + "fmt" + + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/compress/brotli" + "github.com/parquet-go/parquet-go/compress/gzip" + "github.com/parquet-go/parquet-go/compress/lz4" + "github.com/parquet-go/parquet-go/compress/snappy" + "github.com/parquet-go/parquet-go/compress/uncompressed" + "github.com/parquet-go/parquet-go/compress/zstd" + "github.com/parquet-go/parquet-go/format" +) + +var ( + // Uncompressed is a parquet compression codec representing uncompressed + // pages. + Uncompressed uncompressed.Codec + + // Snappy is the SNAPPY parquet compression codec. + Snappy snappy.Codec + + // Gzip is the GZIP parquet compression codec. + Gzip = gzip.Codec{ + Level: gzip.DefaultCompression, + } + + // Brotli is the BROTLI parquet compression codec. + Brotli = brotli.Codec{ + Quality: brotli.DefaultQuality, + LGWin: brotli.DefaultLGWin, + } + + // Zstd is the ZSTD parquet compression codec. + Zstd = zstd.Codec{ + Level: zstd.DefaultLevel, + } + + // Lz4Raw is the LZ4_RAW parquet compression codec. + Lz4Raw = lz4.Codec{ + Level: lz4.DefaultLevel, + } + + // Table of compression codecs indexed by their code in the parquet format. + compressionCodecs = [...]compress.Codec{ + format.Uncompressed: &Uncompressed, + format.Snappy: &Snappy, + format.Gzip: &Gzip, + format.Brotli: &Brotli, + format.Zstd: &Zstd, + format.Lz4Raw: &Lz4Raw, + } +) + +// LookupCompressionCodec returns the compression codec associated with the +// given code. +// +// The function never returns nil. If the encoding is not supported, +// an "unsupported" codec is returned. +func LookupCompressionCodec(codec format.CompressionCodec) compress.Codec { + if codec >= 0 && int(codec) < len(compressionCodecs) { + if c := compressionCodecs[codec]; c != nil { + return c + } + } + return &unsupported{codec} +} + +type unsupported struct { + codec format.CompressionCodec +} + +func (u *unsupported) String() string { + return "UNSUPPORTED" +} + +func (u *unsupported) CompressionCodec() format.CompressionCodec { + return u.codec +} + +func (u *unsupported) Encode(dst, src []byte) ([]byte, error) { + return dst[:0], u.error() +} + +func (u *unsupported) Decode(dst, src []byte) ([]byte, error) { + return dst[:0], u.error() +} + +func (u *unsupported) error() error { + return fmt.Errorf("unsupported compression codec: %s", u.codec) +} + +func isCompressed(c compress.Codec) bool { + return c != nil && c.CompressionCodec() != format.Uncompressed +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/brotli/brotli.go b/vendor/github.com/parquet-go/parquet-go/compress/brotli/brotli.go new file mode 100644 index 00000000000..db7c0bfdf2b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/brotli/brotli.go @@ -0,0 +1,54 @@ +// Package brotli implements the BROTLI parquet compression codec. +package brotli + +import ( + "io" + + "github.com/andybalholm/brotli" + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/format" +) + +const ( + DefaultQuality = 0 + DefaultLGWin = 0 +) + +type Codec struct { + // Quality controls the compression-speed vs compression-density trade-offs. + // The higher the quality, the slower the compression. Range is 0 to 11. + Quality int + // LGWin is the base 2 logarithm of the sliding window size. + // Range is 10 to 24. 0 indicates automatic configuration based on Quality. + LGWin int + + r compress.Decompressor + w compress.Compressor +} + +func (c *Codec) String() string { + return "BROTLI" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Brotli +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + return c.w.Encode(dst, src, func(w io.Writer) (compress.Writer, error) { + return brotli.NewWriterOptions(w, brotli.WriterOptions{ + Quality: c.Quality, + LGWin: c.LGWin, + }), nil + }) +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + return c.r.Decode(dst, src, func(r io.Reader) (compress.Reader, error) { + return reader{brotli.NewReader(r)}, nil + }) +} + +type reader struct{ *brotli.Reader } + +func (reader) Close() error { return nil } diff --git a/vendor/github.com/parquet-go/parquet-go/compress/compress.go b/vendor/github.com/parquet-go/parquet-go/compress/compress.go new file mode 100644 index 00000000000..5e22f89be89 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/compress.go @@ -0,0 +1,148 @@ +// Package compress provides the generic APIs implemented by parquet compression +// codecs. +// +// https://github.com/apache/parquet-format/blob/master/Compression.md +package compress + +import ( + "bytes" + "io" + + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// The Codec interface represents parquet compression codecs implemented by the +// compress sub-packages. +// +// Codec instances must be safe to use concurrently from multiple goroutines. +type Codec interface { + // Returns a human-readable name for the codec. + String() string + + // Returns the code of the compression codec in the parquet format. + CompressionCodec() format.CompressionCodec + + // Writes the compressed version of src to dst and returns it. + // + // The method automatically reallocates the output buffer if its capacity + // was too small to hold the compressed data. + Encode(dst, src []byte) ([]byte, error) + + // Writes the uncompressed version of src to dst and returns it. + // + // The method automatically reallocates the output buffer if its capacity + // was too small to hold the uncompressed data. + Decode(dst, src []byte) ([]byte, error) +} + +type Reader interface { + io.ReadCloser + Reset(io.Reader) error +} + +type Writer interface { + io.WriteCloser + Reset(io.Writer) +} + +type Compressor struct { + writers memory.Pool[writer] +} + +type writer struct { + output bytes.Buffer + writer Writer +} + +func (c *Compressor) Encode(dst, src []byte, newWriter func(io.Writer) (Writer, error)) ([]byte, error) { + w := c.writers.Get( + func() *writer { + w := new(writer) + w.output = *bytes.NewBuffer(dst[:0]) + var err error + if w.writer, err = newWriter(&w.output); err != nil { + panic(err) // Will be caught below + } + return w + }, + func(w *writer) { + w.output = *bytes.NewBuffer(dst[:0]) + w.writer.Reset(&w.output) + }, + ) + + defer func() { + w.output = *bytes.NewBuffer(nil) + w.writer.Reset(io.Discard) + c.writers.Put(w) + }() + + if _, err := w.writer.Write(src); err != nil { + return w.output.Bytes(), err + } + if err := w.writer.Close(); err != nil { + return w.output.Bytes(), err + } + return w.output.Bytes(), nil +} + +type Decompressor struct { + readers memory.Pool[reader] +} + +type reader struct { + input bytes.Reader + reader Reader +} + +func (d *Decompressor) Decode(dst, src []byte, newReader func(io.Reader) (Reader, error)) ([]byte, error) { + r := d.readers.Get( + func() *reader { + r := new(reader) + r.input.Reset(src) + var err error + if r.reader, err = newReader(&r.input); err != nil { + panic(err) // Will be caught below + } + return r + }, + func(r *reader) { + r.input.Reset(src) + if err := r.reader.Reset(&r.input); err != nil { + panic(err) // Will be caught below + } + }, + ) + + defer func() { + r.input.Reset(nil) + if err := r.reader.Reset(nil); err == nil { + d.readers.Put(r) + } + }() + + if cap(dst) == 0 { + dst = make([]byte, 0, 2*len(src)) + } else { + dst = dst[:0] + } + + for { + n, err := r.reader.Read(dst[len(dst):cap(dst)]) + dst = dst[:len(dst)+n] + + if err != nil { + if err == io.EOF { + err = nil + } + return dst, err + } + + if len(dst) == cap(dst) { + tmp := make([]byte, len(dst), 2*len(dst)) + copy(tmp, dst) + dst = tmp + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/gzip/gzip.go b/vendor/github.com/parquet-go/parquet-go/compress/gzip/gzip.go new file mode 100644 index 00000000000..99780a95097 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/gzip/gzip.go @@ -0,0 +1,67 @@ +// Package gzip implements the GZIP parquet compression codec. +package gzip + +import ( + "io" + "strings" + + "github.com/klauspost/compress/gzip" + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/format" +) + +const ( + emptyGzip = "\x1f\x8b\b\x00\x00\x00\x00\x00\x02\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00" +) + +const ( + NoCompression = gzip.NoCompression + BestSpeed = gzip.BestSpeed + BestCompression = gzip.BestCompression + DefaultCompression = gzip.DefaultCompression + HuffmanOnly = gzip.HuffmanOnly +) + +type Codec struct { + Level int + + r compress.Decompressor + w compress.Compressor +} + +func (c *Codec) String() string { + return "GZIP" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Gzip +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + return c.w.Encode(dst, src, func(w io.Writer) (compress.Writer, error) { + return gzip.NewWriterLevel(w, c.Level) + }) +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + return c.r.Decode(dst, src, func(r io.Reader) (compress.Reader, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: z}, nil + }) +} + +type reader struct { + *gzip.Reader + emptyGzip strings.Reader +} + +func (r *reader) Reset(rr io.Reader) error { + if rr == nil { + r.emptyGzip.Reset(emptyGzip) + rr = &r.emptyGzip + } + return r.Reader.Reset(rr) +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/lz4/lz4.go b/vendor/github.com/parquet-go/parquet-go/compress/lz4/lz4.go new file mode 100644 index 00000000000..975cfed8cca --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/lz4/lz4.go @@ -0,0 +1,87 @@ +// Package lz4 implements the LZ4_RAW parquet compression codec. +package lz4 + +import ( + "github.com/parquet-go/parquet-go/format" + "github.com/pierrec/lz4/v4" +) + +type Level = lz4.CompressionLevel + +const ( + Fastest = lz4.CompressionLevel(99) + Fast = lz4.Fast + Level1 = lz4.Level1 + Level2 = lz4.Level2 + Level3 = lz4.Level3 + Level4 = lz4.Level4 + Level5 = lz4.Level5 + Level6 = lz4.Level6 + Level7 = lz4.Level7 + Level8 = lz4.Level8 + Level9 = lz4.Level9 +) + +const ( + DefaultLevel = Fast +) + +type Codec struct { + Level Level +} + +func (c *Codec) String() string { + return "LZ4_RAW" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Lz4Raw +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + dst = reserveAtLeast(dst, lz4.CompressBlockBound(len(src))) + + var ( + n int + err error + ) + if c.Level == Fastest { + compressor := lz4.Compressor{} + n, err = compressor.CompressBlock(src, dst) + } else { + compressor := lz4.CompressorHC{Level: c.Level} + n, err = compressor.CompressBlock(src, dst) + } + return dst[:n], err +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + // 3x seems like a common compression ratio, so we optimistically size the + // output buffer to that size. Feel free to change the value if you observe + // different behaviors. + dst = reserveAtLeast(dst, 3*len(src)) + + for { + n, err := lz4.UncompressBlock(src, dst) + // The lz4 package does not expose the error values, they are declared + // in internal/lz4errors. Based on what I read of the implementation, + // the only condition where this function errors is if the output buffer + // was too short. + // + // https://github.com/pierrec/lz4/blob/a5532e5996ee86d17f8ce2694c08fb5bf3c6b471/internal/lz4block/block.go#L45-L53 + if err != nil { + dst = make([]byte, 2*len(dst)) + } else { + return dst[:n], nil + } + } +} + +func reserveAtLeast(b []byte, n int) []byte { + if cap(b) < n { + b = make([]byte, n) + } else { + b = b[:cap(b)] + } + return b +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/snappy/snappy.go b/vendor/github.com/parquet-go/parquet-go/compress/snappy/snappy.go new file mode 100644 index 00000000000..eb3febda42c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/snappy/snappy.go @@ -0,0 +1,31 @@ +// Package snappy implements the SNAPPY parquet compression codec. +package snappy + +import ( + "github.com/klauspost/compress/snappy" + "github.com/parquet-go/parquet-go/format" +) + +type Codec struct { +} + +// The snappy.Reader and snappy.Writer implement snappy encoding/decoding with +// a framing protocol, but snappy requires the implementation to use the raw +// snappy block encoding. This is why we need to use snappy.Encode/snappy.Decode +// and have to ship custom implementations of the compressed reader and writer. + +func (c *Codec) String() string { + return "SNAPPY" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Snappy +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + return snappy.Encode(dst, src), nil +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + return snappy.Decode(dst, src) +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/uncompressed/uncompressed.go b/vendor/github.com/parquet-go/parquet-go/compress/uncompressed/uncompressed.go new file mode 100644 index 00000000000..2aa0b3538f5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/uncompressed/uncompressed.go @@ -0,0 +1,27 @@ +// Package uncompressed provides implementations of the compression codec +// interfaces as pass-through without applying any compression nor +// decompression. +package uncompressed + +import ( + "github.com/parquet-go/parquet-go/format" +) + +type Codec struct { +} + +func (c *Codec) String() string { + return "UNCOMPRESSED" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Uncompressed +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + return append(dst[:0], src...), nil +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + return append(dst[:0], src...), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/compress/zstd/zstd.go b/vendor/github.com/parquet-go/parquet-go/compress/zstd/zstd.go new file mode 100644 index 00000000000..82105fb6e70 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/compress/zstd/zstd.go @@ -0,0 +1,105 @@ +// Package zstd implements the ZSTD parquet compression codec. +package zstd + +import ( + "github.com/klauspost/compress/zstd" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type Level = zstd.EncoderLevel + +const ( + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest = zstd.SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault = zstd.SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression = zstd.SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression = zstd.SpeedBestCompression +) + +const ( + DefaultLevel = SpeedDefault + + DefaultConcurrency = 1 +) + +type Codec struct { + Level Level + + // Concurrency is the number of CPU cores to use for encoding and decoding. + // If Concurrency is 0, it will use DefaultConcurrency. + Concurrency uint + + encoders memory.Pool[zstd.Encoder] + decoders memory.Pool[zstd.Decoder] +} + +func (c *Codec) String() string { + return "ZSTD" +} + +func (c *Codec) CompressionCodec() format.CompressionCodec { + return format.Zstd +} + +func (c *Codec) Encode(dst, src []byte) ([]byte, error) { + e := c.encoders.Get( + func() *zstd.Encoder { + e, err := zstd.NewWriter(nil, + zstd.WithEncoderConcurrency(c.concurrency()), + zstd.WithEncoderLevel(c.level()), + zstd.WithZeroFrames(true), + zstd.WithEncoderCRC(false), + ) + if err != nil { + panic(err) + } + return e + }, + func(e *zstd.Encoder) {}, + ) + defer c.encoders.Put(e) + return e.EncodeAll(src, dst[:0]), nil +} + +func (c *Codec) Decode(dst, src []byte) ([]byte, error) { + d := c.decoders.Get( + func() *zstd.Decoder { + d, err := zstd.NewReader(nil, + zstd.WithDecoderConcurrency(c.concurrency()), + ) + if err != nil { + panic(err) + } + return d + }, + func(d *zstd.Decoder) {}, + ) + defer c.decoders.Put(d) + return d.DecodeAll(src, dst[:0]) +} + +func (c *Codec) level() Level { + if c.Level != 0 { + return c.Level + } + return DefaultLevel +} + +func (c *Codec) concurrency() int { + if c.Concurrency != 0 { + return int(c.Concurrency) + } + return DefaultConcurrency +} diff --git a/vendor/github.com/parquet-go/parquet-go/config.go b/vendor/github.com/parquet-go/parquet-go/config.go new file mode 100644 index 00000000000..50b26b9d4f7 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/config.go @@ -0,0 +1,1054 @@ +package parquet + +import ( + "fmt" + "maps" + "math" + "reflect" + "runtime/debug" + "slices" + "strings" + "sync" + + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/encoding" +) + +// ReadMode is an enum that is used to configure the way that a File reads pages. +type ReadMode int + +const ( + ReadModeSync ReadMode = iota // ReadModeSync reads pages synchronously on demand (Default). + ReadModeAsync // ReadModeAsync reads pages asynchronously in the background. +) + +const ( + DefaultColumnIndexSizeLimit = 16 + DefaultColumnBufferCapacity = 16 * 1024 + DefaultPageBufferSize = 256 * 1024 + DefaultWriteBufferSize = 32 * 1024 + DefaultDataPageVersion = 2 + DefaultDataPageStatistics = true + DefaultSkipMagicBytes = false + DefaultSkipPageIndex = false + DefaultSkipBloomFilters = false + DefaultMaxRowsPerRowGroup = math.MaxInt64 + DefaultReadMode = ReadModeSync +) + +const ( + parquetGoModulePath = "github.com/parquet-go/parquet-go" +) + +var ( + defaultCreatedByInfo string + defaultCreatedByOnce sync.Once +) + +func defaultCreatedBy() string { + defaultCreatedByOnce.Do(func() { + createdBy := parquetGoModulePath + build, ok := debug.ReadBuildInfo() + if ok { + for _, mod := range build.Deps { + if mod.Replace == nil && mod.Path == parquetGoModulePath { + semver, _, buildsha := parseModuleVersion(mod.Version) + createdBy = formatCreatedBy(createdBy, semver, buildsha) + break + } + } + } + defaultCreatedByInfo = createdBy + }) + return defaultCreatedByInfo +} + +func parseModuleVersion(version string) (semver, datetime, buildsha string) { + semver, version = splitModuleVersion(version) + datetime, version = splitModuleVersion(version) + buildsha, _ = splitModuleVersion(version) + semver = strings.TrimPrefix(semver, "v") + return +} + +func splitModuleVersion(s string) (head, tail string) { + if i := strings.IndexByte(s, '-'); i < 0 { + head = s + } else { + head, tail = s[:i], s[i+1:] + } + return +} + +func formatCreatedBy(application, version, build string) string { + return application + " version " + version + "(build " + build + ")" +} + +// The FileConfig type carries configuration options for parquet files. +// +// FileConfig implements the FileOption interface so it can be used directly +// as argument to the OpenFile function when needed, for example: +// +// f, err := parquet.OpenFile(reader, size, &parquet.FileConfig{ +// SkipPageIndex: true, +// SkipBloomFilters: true, +// ReadMode: ReadModeAsync, +// }) +type FileConfig struct { + SkipMagicBytes bool + SkipPageIndex bool + SkipBloomFilters bool + OptimisticRead bool + ReadBufferSize int + ReadMode ReadMode + Schema *Schema +} + +// DefaultFileConfig returns a new FileConfig value initialized with the +// default file configuration. +func DefaultFileConfig() *FileConfig { + return &FileConfig{ + SkipMagicBytes: DefaultSkipMagicBytes, + SkipPageIndex: DefaultSkipPageIndex, + SkipBloomFilters: DefaultSkipBloomFilters, + ReadBufferSize: defaultReadBufferSize, + ReadMode: DefaultReadMode, + Schema: nil, + } +} + +// NewFileConfig constructs a new file configuration applying the options passed +// as arguments. +// +// The function returns an non-nil error if some of the options carried invalid +// configuration values. +func NewFileConfig(options ...FileOption) (*FileConfig, error) { + config := DefaultFileConfig() + config.Apply(options...) + return config, config.Validate() +} + +// Apply applies the given list of options to c. +func (c *FileConfig) Apply(options ...FileOption) { + for _, opt := range options { + opt.ConfigureFile(c) + } +} + +// ConfigureFile applies configuration options from c to config. +func (c *FileConfig) ConfigureFile(config *FileConfig) { + *config = FileConfig{ + SkipMagicBytes: c.SkipMagicBytes, + SkipPageIndex: c.SkipPageIndex, + SkipBloomFilters: c.SkipBloomFilters, + ReadBufferSize: coalesceInt(c.ReadBufferSize, config.ReadBufferSize), + ReadMode: ReadMode(coalesceInt(int(c.ReadMode), int(config.ReadMode))), + Schema: coalesceSchema(c.Schema, config.Schema), + } +} + +// Validate returns a non-nil error if the configuration of c is invalid. +func (c *FileConfig) Validate() error { + return nil +} + +// The ReaderConfig type carries configuration options for parquet readers. +// +// ReaderConfig implements the ReaderOption interface so it can be used directly +// as argument to the NewReader function when needed, for example: +// +// reader := parquet.NewReader(output, schema, &parquet.ReaderConfig{ +// // ... +// }) +type ReaderConfig struct { + Schema *Schema + SchemaConfig *SchemaConfig +} + +// DefaultReaderConfig returns a new ReaderConfig value initialized with the +// default reader configuration. +func DefaultReaderConfig() *ReaderConfig { + return &ReaderConfig{ + SchemaConfig: DefaultSchemaConfig(), + } +} + +// NewReaderConfig constructs a new reader configuration applying the options +// passed as arguments. +// +// The function returns an non-nil error if some of the options carried invalid +// configuration values. +func NewReaderConfig(options ...ReaderOption) (*ReaderConfig, error) { + config := DefaultReaderConfig() + config.Apply(options...) + return config, config.Validate() +} + +// Apply applies the given list of options to c. +func (c *ReaderConfig) Apply(options ...ReaderOption) { + for _, opt := range options { + opt.ConfigureReader(c) + } +} + +// ConfigureReader applies configuration options from c to config. +func (c *ReaderConfig) ConfigureReader(config *ReaderConfig) { + *config = ReaderConfig{ + Schema: coalesceSchema(c.Schema, config.Schema), + SchemaConfig: coalesceSchemaConfig(c.SchemaConfig, config.SchemaConfig), + } +} + +// Validate returns a non-nil error if the configuration of c is invalid. +func (c *ReaderConfig) Validate() error { + return nil +} + +// The WriterConfig type carries configuration options for parquet writers. +// +// WriterConfig implements the WriterOption interface so it can be used directly +// as argument to the NewWriter function when needed, for example: +// +// writer := parquet.NewWriter(output, schema, &parquet.WriterConfig{ +// CreatedBy: "my test program", +// }) +type WriterConfig struct { + CreatedBy string + ColumnPageBuffers BufferPool + ColumnIndexSizeLimit func(path []string) int + PageBufferSize int + WriteBufferSize int + DataPageVersion int + DataPageStatistics bool + DeprecatedDataPageStatistics bool + MaxRowsPerRowGroup int64 + KeyValueMetadata map[string]string + Schema *Schema + BloomFilters []BloomFilterColumn + Compression compress.Codec + Sorting SortingConfig + SkipPageBounds [][]string + Encodings map[Kind]encoding.Encoding + DictionaryMaxBytes int64 + SchemaConfig *SchemaConfig +} + +// DefaultWriterConfig returns a new WriterConfig value initialized with the +// default writer configuration. +func DefaultWriterConfig() *WriterConfig { + return &WriterConfig{ + CreatedBy: defaultCreatedBy(), + ColumnPageBuffers: &defaultColumnBufferPool, + ColumnIndexSizeLimit: func(path []string) int { return DefaultColumnIndexSizeLimit }, + PageBufferSize: DefaultPageBufferSize, + WriteBufferSize: DefaultWriteBufferSize, + DataPageVersion: DefaultDataPageVersion, + DataPageStatistics: DefaultDataPageStatistics, + MaxRowsPerRowGroup: DefaultMaxRowsPerRowGroup, + SchemaConfig: DefaultSchemaConfig(), + Sorting: SortingConfig{ + SortingBuffers: &defaultSortingBufferPool, + }, + } +} + +// NewWriterConfig constructs a new writer configuration applying the options +// passed as arguments. +// +// The function returns an non-nil error if some of the options carried invalid +// configuration values. +func NewWriterConfig(options ...WriterOption) (*WriterConfig, error) { + config := DefaultWriterConfig() + config.Apply(options...) + return config, config.Validate() +} + +// Apply applies the given list of options to c. +func (c *WriterConfig) Apply(options ...WriterOption) { + for _, opt := range options { + opt.ConfigureWriter(c) + } +} + +// ConfigureWriter applies configuration options from c to config. +func (c *WriterConfig) ConfigureWriter(config *WriterConfig) { + keyValueMetadata := config.KeyValueMetadata + if len(c.KeyValueMetadata) > 0 { + if keyValueMetadata == nil { + keyValueMetadata = make(map[string]string, len(c.KeyValueMetadata)) + } + maps.Copy(keyValueMetadata, c.KeyValueMetadata) + } + + encodings := config.Encodings + if len(c.Encodings) > 0 { + if encodings == nil { + encodings = make(map[Kind]encoding.Encoding, len(c.Encodings)) + } + maps.Copy(encodings, c.Encodings) + } + + *config = WriterConfig{ + CreatedBy: coalesceString(c.CreatedBy, config.CreatedBy), + ColumnPageBuffers: coalesceBufferPool(c.ColumnPageBuffers, config.ColumnPageBuffers), + ColumnIndexSizeLimit: coalesceColumnIndexLimit(c.ColumnIndexSizeLimit, config.ColumnIndexSizeLimit), + PageBufferSize: coalesceInt(c.PageBufferSize, config.PageBufferSize), + WriteBufferSize: coalesceInt(c.WriteBufferSize, config.WriteBufferSize), + DataPageVersion: coalesceInt(c.DataPageVersion, config.DataPageVersion), + DataPageStatistics: coalesceBool(c.DataPageStatistics, config.DataPageStatistics), + DeprecatedDataPageStatistics: coalesceBool(c.DeprecatedDataPageStatistics, config.DeprecatedDataPageStatistics), + MaxRowsPerRowGroup: coalesceInt64(c.MaxRowsPerRowGroup, config.MaxRowsPerRowGroup), + KeyValueMetadata: keyValueMetadata, + Schema: coalesceSchema(c.Schema, config.Schema), + BloomFilters: coalesceSlices(c.BloomFilters, config.BloomFilters), + Compression: coalesceCompression(c.Compression, config.Compression), + Sorting: coalesceSortingConfig(c.Sorting, config.Sorting), + SkipPageBounds: coalesceSlices(c.SkipPageBounds, config.SkipPageBounds), + Encodings: encodings, + SchemaConfig: coalesceSchemaConfig(c.SchemaConfig, config.SchemaConfig), + } +} + +// Validate returns a non-nil error if the configuration of c is invalid. +func (c *WriterConfig) Validate() error { + const baseName = "parquet.(*WriterConfig)." + return errorInvalidConfiguration( + validateNotNil(baseName+"ColumnPageBuffers", c.ColumnPageBuffers), + validatePositiveInt(baseName+"PageBufferSize", c.PageBufferSize), + validateOneOfInt(baseName+"DataPageVersion", c.DataPageVersion, 1, 2), + c.Sorting.Validate(), + ) +} + +// The RowGroupConfig type carries configuration options for parquet row groups. +// +// RowGroupConfig implements the RowGroupOption interface so it can be used +// directly as argument to the NewBuffer function when needed, for example: +// +// buffer := parquet.NewBuffer(&parquet.RowGroupConfig{ +// ColumnBufferCapacity: 10_000, +// }) +type RowGroupConfig struct { + ColumnBufferCapacity int + Schema *Schema + Sorting SortingConfig +} + +// DefaultRowGroupConfig returns a new RowGroupConfig value initialized with the +// default row group configuration. +func DefaultRowGroupConfig() *RowGroupConfig { + return &RowGroupConfig{ + ColumnBufferCapacity: DefaultColumnBufferCapacity, + Sorting: SortingConfig{ + SortingBuffers: &defaultSortingBufferPool, + }, + } +} + +// NewRowGroupConfig constructs a new row group configuration applying the +// options passed as arguments. +// +// The function returns an non-nil error if some of the options carried invalid +// configuration values. +func NewRowGroupConfig(options ...RowGroupOption) (*RowGroupConfig, error) { + config := DefaultRowGroupConfig() + config.Apply(options...) + return config, config.Validate() +} + +// Validate returns a non-nil error if the configuration of c is invalid. +func (c *RowGroupConfig) Validate() error { + const baseName = "parquet.(*RowGroupConfig)." + return errorInvalidConfiguration( + validatePositiveInt(baseName+"ColumnBufferCapacity", c.ColumnBufferCapacity), + c.Sorting.Validate(), + ) +} + +func (c *RowGroupConfig) Apply(options ...RowGroupOption) { + for _, opt := range options { + opt.ConfigureRowGroup(c) + } +} + +func (c *RowGroupConfig) ConfigureRowGroup(config *RowGroupConfig) { + *config = RowGroupConfig{ + ColumnBufferCapacity: coalesceInt(c.ColumnBufferCapacity, config.ColumnBufferCapacity), + Schema: coalesceSchema(c.Schema, config.Schema), + Sorting: coalesceSortingConfig(c.Sorting, config.Sorting), + } +} + +// The SortingConfig type carries configuration options for parquet row groups. +// +// SortingConfig implements the SortingOption interface so it can be used +// directly as argument to the NewSortingWriter function when needed, +// for example: +// +// buffer := parquet.NewSortingWriter[Row]( +// parquet.SortingWriterConfig( +// parquet.DropDuplicatedRows(true), +// ), +// }) +type SortingConfig struct { + SortingBuffers BufferPool + SortingColumns []SortingColumn + DropDuplicatedRows bool +} + +// DefaultSortingConfig returns a new SortingConfig value initialized with the +// default row group configuration. +func DefaultSortingConfig() *SortingConfig { + return &SortingConfig{ + SortingBuffers: &defaultSortingBufferPool, + } +} + +// NewSortingConfig constructs a new sorting configuration applying the +// options passed as arguments. +// +// The function returns an non-nil error if some of the options carried invalid +// configuration values. +func NewSortingConfig(options ...SortingOption) (*SortingConfig, error) { + config := DefaultSortingConfig() + config.Apply(options...) + return config, config.Validate() +} + +func (c *SortingConfig) Validate() error { + const baseName = "parquet.(*SortingConfig)." + return errorInvalidConfiguration( + validateNotNil(baseName+"SortingBuffers", c.SortingBuffers), + ) +} + +func (c *SortingConfig) Apply(options ...SortingOption) { + for _, opt := range options { + opt.ConfigureSorting(c) + } +} + +func (c *SortingConfig) ConfigureSorting(config *SortingConfig) { + *config = coalesceSortingConfig(*c, *config) +} + +// SchemaOption is an interface implemented by types that carry configuration +// options for parquet schemas. SchemaOption also implements ReaderOption and WriterOption +// and may be used to configure the way NewGenericReader and NewGenericWriter derive schemas from the arguments. +type SchemaOption interface { + ReaderOption + WriterOption + + ConfigureSchema(*SchemaConfig) +} + +// FileOption is an interface implemented by types that carry configuration +// options for parquet files. +type FileOption interface { + ConfigureFile(*FileConfig) +} + +// ReaderOption is an interface implemented by types that carry configuration +// options for parquet readers. +type ReaderOption interface { + ConfigureReader(*ReaderConfig) +} + +// WriterOption is an interface implemented by types that carry configuration +// options for parquet writers. +type WriterOption interface { + ConfigureWriter(*WriterConfig) +} + +// RowGroupOption is an interface implemented by types that carry configuration +// options for parquet row groups. +type RowGroupOption interface { + ConfigureRowGroup(*RowGroupConfig) +} + +// SortingOption is an interface implemented by types that carry configuration +// options for parquet sorting writers. +type SortingOption interface { + ConfigureSorting(*SortingConfig) +} + +// SkipMagicBytes is a file configuration option which prevents automatically +// reading the magic bytes when opening a parquet file, when set to true. This +// is useful as an optimization when programs can trust that they are dealing +// with parquet files and do not need to verify the first 4 bytes. +func SkipMagicBytes(skip bool) FileOption { + return fileOption(func(config *FileConfig) { config.SkipMagicBytes = skip }) +} + +// SkipPageIndex is a file configuration option which prevents automatically +// reading the page index when opening a parquet file, when set to true. This is +// useful as an optimization when programs know that they will not need to +// consume the page index. +// +// Defaults to false. +func SkipPageIndex(skip bool) FileOption { + return fileOption(func(config *FileConfig) { config.SkipPageIndex = skip }) +} + +// SkipBloomFilters is a file configuration option which prevents automatically +// reading the bloom filters when opening a parquet file, when set to true. +// This is useful as an optimization when programs know that they will not need +// to consume the bloom filters. +// +// Defaults to false. +func SkipBloomFilters(skip bool) FileOption { + return fileOption(func(config *FileConfig) { config.SkipBloomFilters = skip }) +} + +// OptimisticRead configures a file to optimistically perform larger buffered +// reads to improve performance. This is useful when reading from remote storage +// and amortize the cost of network round trips. +// +// This is an option instead of enabled by default because dependents of this +// package have historically relied on the read patterns to provide external +// caches and achieve similar results (e.g., Tempo). +func OptimisticRead(enabled bool) FileOption { + return fileOption(func(config *FileConfig) { config.OptimisticRead = enabled }) +} + +// FileReadMode is a file configuration option which controls the way pages +// are read. Currently the only two options are ReadModeAsync and ReadModeSync +// which control whether or not pages are loaded asynchronously. It can be +// advantageous to use ReadModeAsync if your reader is backed by network +// storage. +// +// Defaults to ReadModeSync. +func FileReadMode(mode ReadMode) FileOption { + return fileOption(func(config *FileConfig) { config.ReadMode = mode }) +} + +// ReadBufferSize is a file configuration option which controls the default +// buffer sizes for reads made to the provided io.Reader. The default of 4096 +// is appropriate for disk based access but if your reader is backed by network +// storage it can be advantageous to increase this value to something more like +// 4 MiB. +// +// Defaults to 4096. +func ReadBufferSize(size int) FileOption { + return fileOption(func(config *FileConfig) { config.ReadBufferSize = size }) +} + +// FileSchema is used to pass a known schema in while opening a Parquet file. +// This optimization is only useful if your application is currently opening +// an extremely large number of parquet files with the same, known schema. +// +// Defaults to nil. +func FileSchema(schema *Schema) FileOption { + return fileOption(func(config *FileConfig) { config.Schema = schema }) +} + +// PageBufferSize configures the size of column page buffers on parquet writers. +// +// Note that the page buffer size refers to the in-memory buffers where pages +// are generated, not the size of pages after encoding and compression. +// This design choice was made to help control the amount of memory needed to +// read and write pages rather than controlling the space used by the encoded +// representation on disk. +// +// Defaults to 256KiB. +func PageBufferSize(size int) WriterOption { + return writerOption(func(config *WriterConfig) { config.PageBufferSize = size }) +} + +// WriteBufferSize configures the size of the write buffer. +// +// Setting the writer buffer size to zero deactivates buffering, all writes are +// immediately sent to the output io.Writer. +// +// Defaults to 32KiB. +func WriteBufferSize(size int) WriterOption { + return writerOption(func(config *WriterConfig) { config.WriteBufferSize = size }) +} + +// MaxRowsPerRowGroup configures the maximum number of rows that a writer will +// produce in each row group. +// +// This limit is useful to control size of row groups in both number of rows and +// byte size. While controlling the byte size of a row group is difficult to +// achieve with parquet due to column encoding and compression, the number of +// rows remains a useful proxy. +// +// Defaults to unlimited. +func MaxRowsPerRowGroup(numRows int64) WriterOption { + if numRows <= 0 { + numRows = DefaultMaxRowsPerRowGroup + } + return writerOption(func(config *WriterConfig) { config.MaxRowsPerRowGroup = numRows }) +} + +// CreatedBy creates a configuration option which sets the name of the +// application that created a parquet file. +// +// The option formats the "CreatedBy" file metadata according to the convention +// described by the parquet spec: +// +// " version (build )" +// +// By default, the option is set to the parquet-go module name, version, and +// build hash. +func CreatedBy(application, version, build string) WriterOption { + createdBy := formatCreatedBy(application, version, build) + return writerOption(func(config *WriterConfig) { config.CreatedBy = createdBy }) +} + +// ColumnPageBuffers creates a configuration option to customize the buffer pool +// used when constructing row groups. This can be used to provide on-disk buffers +// as swap space to ensure that the parquet file creation will no be bottlenecked +// on the amount of memory available. +// +// Defaults to using in-memory buffers. +func ColumnPageBuffers(buffers BufferPool) WriterOption { + return writerOption(func(config *WriterConfig) { config.ColumnPageBuffers = buffers }) +} + +// ColumnIndexSizeLimit creates a configuration option to customize the size +// limit of page boundaries recorded in column indexes. The result of the provided +// function must be larger then 0. +// +// Defaults to the function that returns 16 for all paths. +func ColumnIndexSizeLimit(f func(path []string) int) WriterOption { + return writerOption(func(config *WriterConfig) { config.ColumnIndexSizeLimit = f }) +} + +// DataPageVersion creates a configuration option which configures the version of +// data pages used when creating a parquet file. +// +// Defaults to version 2. +func DataPageVersion(version int) WriterOption { + return writerOption(func(config *WriterConfig) { config.DataPageVersion = version }) +} + +// DataPageStatistics creates a configuration option which defines whether data +// page statistics are emitted. This option is useful when generating parquet +// files that intend to be backward compatible with older readers which may not +// have the ability to load page statistics from the column index. +// +// This used to be disabled by default, but it was switched from false to true +// after v0.26.0, because the computation of page statistics is cheap and query +// engines do better the more statistics they have available, enabling is a more +// sensible default. +// +// Defaults to true. +func DataPageStatistics(enabled bool) WriterOption { + return writerOption(func(config *WriterConfig) { config.DataPageStatistics = enabled }) +} + +// DeprecatedDataPageStatistics creates a configuration option which defines +// whether to also write the deprecated Min/Max fields in column chunk +// statistics, in addition to the standard MinValue/MaxValue fields. This option +// is useful for backward compatibility with older readers that only understand +// the deprecated fields. +// +// The deprecated Min/Max fields use signed comparison, while MinValue/MaxValue +// respect the column's logical type ordering. For columns where these orderings +// differ (e.g., unsigned integers), enabling this option may produce incorrect +// statistics for older readers. +// +// Defaults to false. +func DeprecatedDataPageStatistics(enabled bool) WriterOption { + return writerOption(func(config *WriterConfig) { config.DeprecatedDataPageStatistics = enabled }) +} + +// KeyValueMetadata creates a configuration option which adds key/value metadata +// to add to the metadata of parquet files. +// +// This option is additive, it may be used multiple times to add more than one +// key/value pair. +// +// Keys are assumed to be unique, if the same key is repeated multiple times the +// last value is retained. While the parquet format does not require unique keys, +// this design decision was made to optimize for the most common use case where +// applications leverage this extension mechanism to associate single values to +// keys. This may create incompatibilities with other parquet libraries, or may +// cause some key/value pairs to be lost when open parquet files written with +// repeated keys. We can revisit this decision if it ever becomes a blocker. +func KeyValueMetadata(key, value string) WriterOption { + return writerOption(func(config *WriterConfig) { + if config.KeyValueMetadata == nil { + config.KeyValueMetadata = map[string]string{key: value} + } else { + config.KeyValueMetadata[key] = value + } + }) +} + +// BloomFilters creates a configuration option which defines the bloom filters +// that parquet writers should generate. +// +// The compute and memory footprint of generating bloom filters for all columns +// of a parquet schema can be significant, so by default no filters are created +// and applications need to explicitly declare the columns that they want to +// create filters for. +func BloomFilters(filters ...BloomFilterColumn) WriterOption { + filters = slices.Clone(filters) + return writerOption(func(config *WriterConfig) { config.BloomFilters = filters }) +} + +// Compression creates a configuration option which sets the default compression +// codec used by a writer for columns where none were defined. +func Compression(codec compress.Codec) WriterOption { + return writerOption(func(config *WriterConfig) { config.Compression = codec }) +} + +// SortingWriterConfig is a writer option which applies configuration specific +// to sorting writers. +func SortingWriterConfig(options ...SortingOption) WriterOption { + options = slices.Clone(options) + return writerOption(func(config *WriterConfig) { config.Sorting.Apply(options...) }) +} + +// SkipPageBounds lists the path to a column that shouldn't have bounds written to the +// footer of the parquet file. This is useful for data blobs, like a raw html file, +// where the bounds are not meaningful. +// +// This option is additive, it may be used multiple times to skip multiple columns. +func SkipPageBounds(path ...string) WriterOption { + return writerOption(func(config *WriterConfig) { config.SkipPageBounds = append(config.SkipPageBounds, path) }) +} + +// DefaultEncodingFor creates a configuration option which sets the default encoding +// used by a writer for columns with the specified primitive type where none were defined. +// +// It will fail if the specified enconding isn't compatible with the specified primitive type. +func DefaultEncodingFor(kind Kind, enc encoding.Encoding) WriterOption { + return writerOption(func(config *WriterConfig) { defaultEncodingFor(config, kind, enc) }) +} + +func defaultEncodingFor(config *WriterConfig, kind Kind, enc encoding.Encoding) { + if !canEncode(enc, kind) { + panic("cannot use encoding " + enc.Encoding().String() + " for kind " + kind.String()) + } + if config.Encodings == nil { + config.Encodings = map[Kind]encoding.Encoding{kind: enc} + } else { + config.Encodings[kind] = enc + } +} + +// DefaultEncoding creates a configuration option which sets the default encoding +// used by a writer for columns where none were defined. +// +// It will fail if the specified enconding isn't compatible with any of the primitive types. +func DefaultEncoding(enc encoding.Encoding) WriterOption { + return writerOption(func(config *WriterConfig) { + defaultEncodingFor(config, Boolean, enc) + defaultEncodingFor(config, Int32, enc) + defaultEncodingFor(config, Int64, enc) + defaultEncodingFor(config, Int96, enc) + defaultEncodingFor(config, Float, enc) + defaultEncodingFor(config, Double, enc) + defaultEncodingFor(config, ByteArray, enc) + defaultEncodingFor(config, FixedLenByteArray, enc) + }) +} + +// DictionaryMaxBytes creates a configuration option which sets the maximum +// size in bytes for each column's dictionary. +// +// When a column's dictionary exceeds this limit, that column will switch from +// dictionary encoding to PLAIN encoding for the remainder of the row group. +// Pages written before the limit was reached remain dictionary-encoded, while +// subsequent pages use PLAIN encoding. +// +// A value of 0 (the default) means unlimited dictionary size. +func DictionaryMaxBytes(size int64) WriterOption { + return writerOption(func(config *WriterConfig) { config.DictionaryMaxBytes = size }) +} + +// ColumnBufferCapacity creates a configuration option which defines the size of +// row group column buffers. +// +// Defaults to 16384. +func ColumnBufferCapacity(size int) RowGroupOption { + return rowGroupOption(func(config *RowGroupConfig) { config.ColumnBufferCapacity = size }) +} + +// SortingRowGroupConfig is a row group option which applies configuration +// specific sorting row groups. +func SortingRowGroupConfig(options ...SortingOption) RowGroupOption { + options = slices.Clone(options) + return rowGroupOption(func(config *RowGroupConfig) { config.Sorting.Apply(options...) }) +} + +// SortingColumns creates a configuration option which defines the sorting order +// of columns in a row group. +// +// The order of sorting columns passed as argument defines the ordering +// hierarchy; when elements are equal in the first column, the second column is +// used to order rows, etc... +func SortingColumns(columns ...SortingColumn) SortingOption { + // Make a copy so that we do not retain the input slice generated implicitly + // for the variable argument list, and also avoid having a nil slice when + // the option is passed with no sorting columns, so we can differentiate it + // from it not being passed. + columns = slices.Clone(columns) + return sortingOption(func(config *SortingConfig) { config.SortingColumns = columns }) +} + +// SortingBuffers creates a configuration option which sets the pool of buffers +// used to hold intermediary state when sorting parquet rows. +// +// Defaults to using in-memory buffers. +func SortingBuffers(buffers BufferPool) SortingOption { + return sortingOption(func(config *SortingConfig) { config.SortingBuffers = buffers }) +} + +// DropDuplicatedRows configures whether a sorting writer will keep or remove +// duplicated rows. +// +// Two rows are considered duplicates if the values of their all their sorting +// columns are equal. +// +// Defaults to false +func DropDuplicatedRows(drop bool) SortingOption { + return sortingOption(func(config *SortingConfig) { config.DropDuplicatedRows = drop }) +} + +// The SchemaConfig type carries configuration options for parquet schemas. +// +// SchemaConfig implements the SchemaOption interface so it can be used directly +// as argument to the SchemaOf function when needed, for example: +// +// schema := parquet.SchemaOf(obj, &parquet.SchemaConfig{ +// ... +// }) +type SchemaConfig struct { + StructTags []StructTagOption +} + +func (c *SchemaConfig) ConfigureSchema(config *SchemaConfig) { + config.StructTags = coalesceStructTags(c.StructTags, config.StructTags) +} + +func (c *SchemaConfig) ConfigureReader(config *ReaderConfig) { + c.ConfigureSchema(config.SchemaConfig) +} + +func (c *SchemaConfig) ConfigureWriter(config *WriterConfig) { + c.ConfigureSchema(config.SchemaConfig) +} + +func DefaultSchemaConfig() *SchemaConfig { + return &SchemaConfig{} +} + +// StructTagOption performs runtime replacement of "parquet..." struct tags. This +// option can be used anywhere a schema is derived from a Go struct including +// SchemaOf, NewGenericReader, and NewGenericWriter. +type StructTagOption struct { + ColumnPath []string + StructTag reflect.StructTag +} + +var ( + _ SchemaOption = (*StructTagOption)(nil) + _ ReaderOption = (*StructTagOption)(nil) + _ WriterOption = (*StructTagOption)(nil) +) + +// StructTag performs runtime replacement of struct tags when deriving a schema from +// a Go struct for the column at the given path. This option can be used anywhere a schema is +// derived from a Go struct including SchemaOf, NewGenericReader, and NewGenericWriter. +// +// This option is additive, it may be used multiple times to affect multiple columns. +// +// When renaming a column, configure the option by its original name. +func StructTag(tag reflect.StructTag, path ...string) SchemaOption { + return &StructTagOption{StructTag: tag, ColumnPath: path} +} + +func (f *StructTagOption) ConfigureSchema(config *SchemaConfig) { + config.StructTags = append(config.StructTags, *f) +} + +func (f *StructTagOption) ConfigureWriter(config *WriterConfig) { + f.ConfigureSchema(config.SchemaConfig) +} + +func (f *StructTagOption) ConfigureReader(config *ReaderConfig) { + f.ConfigureSchema(config.SchemaConfig) +} + +type fileOption func(*FileConfig) + +func (opt fileOption) ConfigureFile(config *FileConfig) { opt(config) } + +type readerOption func(*ReaderConfig) + +func (opt readerOption) ConfigureReader(config *ReaderConfig) { opt(config) } + +type writerOption func(*WriterConfig) + +func (opt writerOption) ConfigureWriter(config *WriterConfig) { opt(config) } + +type rowGroupOption func(*RowGroupConfig) + +func (opt rowGroupOption) ConfigureRowGroup(config *RowGroupConfig) { opt(config) } + +type sortingOption func(*SortingConfig) + +func (opt sortingOption) ConfigureSorting(config *SortingConfig) { opt(config) } + +func coalesceBool(i1, i2 bool) bool { + return i1 || i2 +} + +func coalesceInt(i1, i2 int) int { + if i1 != 0 { + return i1 + } + return i2 +} + +func coalesceInt64(i1, i2 int64) int64 { + if i1 != 0 { + return i1 + } + return i2 +} + +func coalesceString(s1, s2 string) string { + if s1 != "" { + return s1 + } + return s2 +} + +func coalesceSlices[T any](s1, s2 []T) []T { + if s1 != nil { + return s1 + } + return s2 +} + +func coalesceColumnIndexLimit(f1, f2 func([]string) int) func([]string) int { + if f1 != nil { + return f1 + } + return f2 +} + +func coalesceBufferPool(p1, p2 BufferPool) BufferPool { + if p1 != nil { + return p1 + } + return p2 +} + +func coalesceSchema(s1, s2 *Schema) *Schema { + if s1 != nil { + return s1 + } + return s2 +} + +func coalesceSortingConfig(c1, c2 SortingConfig) SortingConfig { + return SortingConfig{ + SortingBuffers: coalesceBufferPool(c1.SortingBuffers, c2.SortingBuffers), + SortingColumns: coalesceSlices(c1.SortingColumns, c2.SortingColumns), + DropDuplicatedRows: c1.DropDuplicatedRows, + } +} + +func coalesceCompression(c1, c2 compress.Codec) compress.Codec { + if c1 != nil { + return c1 + } + return c2 +} + +func coalesceSchemaConfig(f1, f2 *SchemaConfig) *SchemaConfig { + if f1 != nil { + return f1 + } + return f2 +} + +func coalesceStructTags(s1, s2 []StructTagOption) []StructTagOption { + if len(s1) > 0 { + return s1 + } + return s2 +} + +func validatePositiveInt(optionName string, optionValue int) error { + if optionValue > 0 { + return nil + } + return errorInvalidOptionValue(optionName, optionValue) +} + +func validatePositiveInt64(optionName string, optionValue int64) error { + if optionValue > 0 { + return nil + } + return errorInvalidOptionValue(optionName, optionValue) +} + +func validateOneOfInt(optionName string, optionValue int, supportedValues ...int) error { + if slices.Contains(supportedValues, optionValue) { + return nil + } + return errorInvalidOptionValue(optionName, optionValue) +} + +func validateNotNil(optionName string, optionValue any) error { + if optionValue != nil { + return nil + } + return errorInvalidOptionValue(optionName, optionValue) +} + +func errorInvalidOptionValue(optionName string, optionValue any) error { + return fmt.Errorf("invalid option value: %s: %v", optionName, optionValue) +} + +func errorInvalidConfiguration(reasons ...error) error { + var err *invalidConfiguration + + for _, reason := range reasons { + if reason != nil { + if err == nil { + err = new(invalidConfiguration) + } + err.reasons = append(err.reasons, reason) + } + } + + if err != nil { + return err + } + + return nil +} + +type invalidConfiguration struct { + reasons []error +} + +func (err *invalidConfiguration) Error() string { + errorMessage := new(strings.Builder) + for _, reason := range err.reasons { + errorMessage.WriteString(reason.Error()) + errorMessage.WriteString("\n") + } + errorString := errorMessage.String() + if errorString != "" { + errorString = errorString[:len(errorString)-1] + } + return errorString +} + +var ( + _ FileOption = (*FileConfig)(nil) + _ ReaderOption = (*ReaderConfig)(nil) + _ WriterOption = (*WriterConfig)(nil) + _ RowGroupOption = (*RowGroupConfig)(nil) + _ SortingOption = (*SortingConfig)(nil) + _ SchemaOption = (*SchemaConfig)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/convert.go b/vendor/github.com/parquet-go/parquet-go/convert.go new file mode 100644 index 00000000000..8fba4432225 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/convert.go @@ -0,0 +1,1534 @@ +package parquet + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "strconv" + "time" + + "github.com/parquet-go/parquet-go/internal/memory" + "golang.org/x/sys/cpu" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// ConvertError is an error type returned by calls to Convert when the conversion +// of parquet schemas is impossible or the input row for the conversion is +// malformed. +type ConvertError struct { + Path []string + From Node + To Node +} + +// Error satisfies the error interface. +func (e *ConvertError) Error() string { + sourceType := e.From.Type() + targetType := e.To.Type() + + sourceRepetition := fieldRepetitionTypeOf(e.From) + targetRepetition := fieldRepetitionTypeOf(e.To) + + return fmt.Sprintf("cannot convert parquet column %q from %s %s to %s %s", + columnPath(e.Path), + sourceRepetition, + sourceType, + targetRepetition, + targetType, + ) +} + +// Conversion is an interface implemented by types that provide conversion of +// parquet rows from one schema to another. +// +// Conversion instances must be safe to use concurrently from multiple goroutines. +type Conversion interface { + // Applies the conversion logic on the src row, returning the result + // appended to dst. + Convert(rows []Row) (int, error) + // Converts the given column index in the target schema to the original + // column index in the source schema of the conversion. + Column(int) int + // Returns the target schema of the conversion. + Schema() *Schema +} + +type conversion struct { + columns []conversionColumn + schema *Schema + buffers memory.Pool[conversionBuffer] + // This field is used to size the column buffers held in the memory.Pool since + // they are intended to store the source rows being converted from. + numberOfSourceColumns int +} + +type conversionBuffer struct { + columns [][]Value +} + +type conversionColumn struct { + sourceIndex int + convertValues conversionFunc + targetKind Kind // Target column kind for creating proper null values + isOptional bool // Whether the target column is optional (for null handling) +} + +type conversionFunc func([]Value) error + +func convertToSelf(column []Value) error { return nil } + +func convertToType(targetType, sourceType Type) conversionFunc { + return func(column []Value) error { + for i, v := range column { + v, err := targetType.ConvertValue(v, sourceType) + if err != nil { + return err + } + column[i].ptr = v.ptr + column[i].u64 = v.u64 + column[i].kind = v.kind + } + return nil + } +} + +func convertToZero(kind Kind) conversionFunc { + switch kind { + case Boolean: + return convertToZeroBoolean + case Int32: + return convertToZeroInt32 + case Int64: + return convertToZeroInt64 + case Int96: + return convertToZeroInt96 + case Float: + return convertToZeroFloat + case Double: + return convertToZeroDouble + case ByteArray: + return convertToZeroByteArray + case FixedLenByteArray: + return convertToZeroFixedLenByteArray + default: + return convertToSelf + } +} + +func convertToZeroKind(column []Value, kind int8) error { + for i := range column { + column[i].ptr = nil + column[i].u64 = 0 + column[i].kind = kind + } + return nil +} + +func convertToZeroBoolean(column []Value) error { + return convertToZeroKind(column, ^int8(Boolean)) +} + +func convertToZeroInt32(column []Value) error { + return convertToZeroKind(column, ^int8(Int32)) +} + +func convertToZeroInt64(column []Value) error { + return convertToZeroKind(column, ^int8(Int64)) +} + +func convertToZeroInt96(column []Value) error { + return convertToZeroKind(column, ^int8(Int96)) +} + +func convertToZeroFloat(column []Value) error { + return convertToZeroKind(column, ^int8(Float)) +} + +func convertToZeroDouble(column []Value) error { + return convertToZeroKind(column, ^int8(Double)) +} + +func convertToZeroByteArray(column []Value) error { + return convertToZeroKind(column, ^int8(ByteArray)) +} + +func convertToZeroFixedLenByteArray(column []Value) error { + return convertToZeroKind(column, ^int8(FixedLenByteArray)) +} + +func convertToNull(column []Value) error { + return convertToZeroKind(column, 0) // kind = 0 indicates null +} + +func convertToNullOptional(maxDefinitionLevel byte) conversionFunc { + return func(column []Value) error { + for i := range column { + column[i].ptr = nil + column[i].u64 = 0 + column[i].kind = 0 // kind = 0 indicates null + // For optional fields, if the source value is present (defLevel == max), + // we need to set defLevel to max-1 to indicate null at the leaf level + if column[i].definitionLevel == maxDefinitionLevel { + column[i].definitionLevel-- + } + // If source is already null (defLevel < max), keep the same defLevel + } + return nil + } +} + +func convertToLevels(repetitionLevels, definitionLevels []byte) conversionFunc { + return func(column []Value) error { + for i := range column { + r := column[i].repetitionLevel + d := column[i].definitionLevel + column[i].repetitionLevel = repetitionLevels[r] + column[i].definitionLevel = definitionLevels[d] + } + return nil + } +} + +func multiConversionFunc(conversions []conversionFunc) conversionFunc { + switch len(conversions) { + case 0: + return convertToSelf + case 1: + return conversions[0] + default: + return func(column []Value) error { + for _, conv := range conversions { + if err := conv(column); err != nil { + return err + } + } + return nil + } + } +} + +func (c *conversion) getBuffer() *conversionBuffer { + return c.buffers.Get( + func() *conversionBuffer { + b := &conversionBuffer{ + columns: make([][]Value, c.numberOfSourceColumns), + } + values := make([]Value, c.numberOfSourceColumns) + for i := range b.columns { + b.columns[i] = values[i : i : i+1] + } + return b + }, + func(b *conversionBuffer) {}, + ) +} + +func (c *conversion) putBuffer(b *conversionBuffer) { + c.buffers.Put(b) +} + +// Convert here satisfies the Conversion interface, and does the actual work +// to convert between the source and target Rows. +func (c *conversion) Convert(rows []Row) (int, error) { + source := c.getBuffer() + defer c.putBuffer(source) + + for n, row := range rows { + for i, values := range source.columns { + source.columns[i] = values[:0] + } + row.Range(func(columnIndex int, columnValues []Value) bool { + source.columns[columnIndex] = append(source.columns[columnIndex], columnValues...) + return true + }) + row = row[:0] + + for columnIndex, conv := range c.columns { + columnOffset := len(row) + if conv.sourceIndex < 0 { + // When there is no source column, we put a single value as + // placeholder in the column. This is a condition where the + // target contained a column which did not exist at had not + // other columns existing at that same level. + var value Value + if conv.isOptional { + // Optional field: create null value (kind = 0) + value = Value{} + } else { + // Required field: create typed zero value + value = ZeroValue(conv.targetKind) + } + row = append(row, value) + } else { + sourceValues := source.columns[conv.sourceIndex] + // We must copy to the output row first and not mutate the + // source columns because multiple target columns may map to + // the same source column. + row = append(row, sourceValues...) + } + columnValues := row[columnOffset:] + + if err := conv.convertValues(columnValues); err != nil { + return n, err + } + + // Since the column index may have changed between the source and + // taget columns we ensure that the right value is always written + // to the output row. + for i := range columnValues { + // Fix: If we have a zero Value{}, convert it to a properly typed value + // For optional fields, keep as null (kind = 0) + // For required fields, convert to typed zero value + if columnValues[i].Kind() == Kind(0) && !conv.isOptional { + columnValues[i] = ZeroValue(conv.targetKind) + } + + columnValues[i].columnIndex = ^int16(columnIndex) + } + } + + rows[n] = row + } + + return len(rows), nil +} + +func (c *conversion) Column(i int) int { + return c.columns[i].sourceIndex +} + +func (c *conversion) Schema() *Schema { + return c.schema +} + +type identity struct{ schema *Schema } + +func (id identity) Convert(rows []Row) (int, error) { return len(rows), nil } +func (id identity) Column(i int) int { return i } +func (id identity) Schema() *Schema { return id.schema } + +// Convert constructs a conversion function from one parquet schema to another. +// +// The function supports converting between schemas where the source or target +// have extra columns; if there are more columns in the source, they will be +// stripped out of the rows. Extra columns in the target schema will be set to +// null or zero values. +// +// The returned function is intended to be used to append the converted source +// row to the destination buffer. +func Convert(to, from Node) (conv Conversion, err error) { + schema, _ := to.(*Schema) + if schema == nil { + schema = NewSchema("", to) + } + + if EqualNodes(to, from) { + return identity{schema}, nil + } + + targetMapping, targetColumns := columnMappingOf(to) + sourceMapping, sourceColumns := columnMappingOf(from) + columns := make([]conversionColumn, len(targetColumns)) + + for i, path := range targetColumns { + targetColumn := targetMapping.lookup(path) + sourceColumn := sourceMapping.lookup(path) + + conversions := []conversionFunc{} + if sourceColumn.node != nil { + targetType := targetColumn.node.Type() + sourceType := sourceColumn.node.Type() + if !EqualTypes(targetType, sourceType) { + conversions = append(conversions, convertToType(targetType, sourceType)) + } + + repetitionLevels := make([]byte, len(path)+1) + definitionLevels := make([]byte, len(path)+1) + targetRepetitionLevel := byte(0) + targetDefinitionLevel := byte(0) + sourceRepetitionLevel := byte(0) + sourceDefinitionLevel := byte(0) + targetNode := to + sourceNode := from + + for j := range path { + targetNode = fieldByName(targetNode, path[j]) + sourceNode = fieldByName(sourceNode, path[j]) + + targetRepetitionLevel, targetDefinitionLevel = applyFieldRepetitionType( + fieldRepetitionTypeOf(targetNode), + targetRepetitionLevel, + targetDefinitionLevel, + ) + sourceRepetitionLevel, sourceDefinitionLevel = applyFieldRepetitionType( + fieldRepetitionTypeOf(sourceNode), + sourceRepetitionLevel, + sourceDefinitionLevel, + ) + + repetitionLevels[sourceRepetitionLevel] = targetRepetitionLevel + definitionLevels[sourceDefinitionLevel] = targetDefinitionLevel + } + + repetitionLevels = repetitionLevels[:sourceRepetitionLevel+1] + definitionLevels = definitionLevels[:sourceDefinitionLevel+1] + + if !isDirectLevelMapping(repetitionLevels) || !isDirectLevelMapping(definitionLevels) { + conversions = append(conversions, convertToLevels(repetitionLevels, definitionLevels)) + } + + } else { + // Column doesn't exist in source - this is a missing column + targetType := targetColumn.node.Type() + targetKind := targetType.Kind() + + // Check if the leaf field itself is optional (not just nested in optional/repeated structure) + isOptionalField := targetColumn.node.Optional() + + closestColumn := sourceMapping.lookupClosest(path) + if closestColumn.node != nil { + // There's a sibling column we can use as a template for structure + if isOptionalField { + // Optional field: convert to null values while mirroring structure + conversions = append(conversions, convertToNullOptional(targetColumn.maxDefinitionLevel)) + } else { + // Required field: convert to typed zero values + conversions = append(conversions, convertToZero(targetKind)) + } + // Use the closest column as source for structure/levels + sourceColumn = closestColumn + } else { + // No sibling columns exist + if !isOptionalField { + // Required field: create typed zero value + conversions = append(conversions, convertToZero(targetKind)) + } + // Keep sourceColumn with columnIndex -1 + // For optional fields without siblings, we'll create a single null value per row + } + } + + // Store target column type for creating proper null values + targetType := targetColumn.node.Type() + + // Determine sourceIndex: -1 if column doesn't exist in source + sourceIndex := int(sourceColumn.columnIndex) + if sourceColumn.node == nil { + sourceIndex = -1 + } + + // Determine if target column is optional + isOptional := targetColumn.maxDefinitionLevel > 0 + + columns[i] = conversionColumn{ + sourceIndex: sourceIndex, + convertValues: multiConversionFunc(conversions), + targetKind: targetType.Kind(), // Store target kind for null value creation + isOptional: isOptional, + } + } + + c := &conversion{ + columns: columns, + schema: schema, + numberOfSourceColumns: len(sourceColumns), + } + return c, nil +} + +func isDirectLevelMapping(levels []byte) bool { + for i, level := range levels { + if level != byte(i) { + return false + } + } + return true +} + +// findAdjacentColumnChunk finds a sibling column at the same repetition depth +// Returns nil if no suitable adjacent column exists +func findAdjacentColumnChunk(schema *Schema, targetColumnIndex int16, columns []ColumnChunk, sourceMapping columnMapping) ColumnChunk { + var targetLeaf leafColumn + targetFound := false + + forEachLeafColumnOf(schema, func(leaf leafColumn) { + if leaf.columnIndex == targetColumnIndex { + targetLeaf = leaf + targetFound = true + } + }) + + if !targetFound { + return nil + } + + // Find a sibling: same parent path and same max repetition level + targetParentPath := targetLeaf.path + if len(targetParentPath) > 0 { + targetParentPath = targetParentPath[:len(targetParentPath)-1] + } + + var adjacentChunk ColumnChunk + forEachLeafColumnOf(schema, func(leaf leafColumn) { + if leaf.columnIndex == targetColumnIndex { + return // Skip self + } + + // Check if this is a sibling + if len(leaf.path) > 0 { + leafParentPath := leaf.path[:len(leaf.path)-1] + if targetParentPath.equal(leafParentPath) && + leaf.maxRepetitionLevel == targetLeaf.maxRepetitionLevel { + // Check if this column exists in the converted row group + if int(leaf.columnIndex) < len(columns) && columns[leaf.columnIndex] != nil { + // Make sure it's not another missing column + if _, ok := columns[leaf.columnIndex].(*missingColumnChunk); !ok { + adjacentChunk = columns[leaf.columnIndex] + return // Found a suitable adjacent column + } + } + } + } + }) + + return adjacentChunk +} + +// ConvertRowGroup constructs a wrapper of the given row group which applies +// the given schema conversion to its rows. +func ConvertRowGroup(rowGroup RowGroup, conv Conversion) RowGroup { + if EqualNodes(rowGroup.Schema(), conv.Schema()) { + return rowGroup + } + schema := conv.Schema() + numRows := rowGroup.NumRows() + rowGroupColumns := rowGroup.ColumnChunks() + sourceSchema := rowGroup.Schema() + + // Build a mapping to detect missing columns + sourceMapping, _ := columnMappingOf(sourceSchema) + + columns := make([]ColumnChunk, numLeafColumnsOf(schema)) + + // First pass: create all non-missing columns + forEachLeafColumnOf(schema, func(leaf leafColumn) { + i := leaf.columnIndex + j := conv.Column(int(leaf.columnIndex)) + + // Check if this column actually exists in the source schema + sourceColumn := sourceMapping.lookup(leaf.path) + isMissing := sourceColumn.node == nil + + if !isMissing { + if i == int16(j) { + columns[i] = rowGroupColumns[j] + } else { + columns[i] = &convertedColumnChunk{ + chunk: rowGroupColumns[j], + targetColumnIndex: ^int16(i), + } + } + } + }) + + // Second pass: create missing columns with references to adjacent columns + forEachLeafColumnOf(schema, func(leaf leafColumn) { + i := leaf.columnIndex + + // Check if this column actually exists in the source schema + sourceColumn := sourceMapping.lookup(leaf.path) + isMissing := sourceColumn.node == nil + + if isMissing { + // Find adjacent column for mirroring levels + adjacentChunk := findAdjacentColumnChunk(schema, i, columns, sourceMapping) + + columns[i] = &missingColumnChunk{ + typ: leaf.node.Type(), + column: i, + numRows: numRows, + numValues: numRows, // May be adjusted when reading + numNulls: numRows, // Depends on required vs optional + maxRepetitionLevel: leaf.maxRepetitionLevel, + maxDefinitionLevel: leaf.maxDefinitionLevel, + adjacentChunk: adjacentChunk, + } + } + }) + + // Sorting columns must exist on the conversion schema in order to be + // advertised on the converted row group otherwise the resulting rows + // would not be in the right order. + sorting := []SortingColumn{} + for _, col := range rowGroup.SortingColumns() { + if !hasColumnPath(schema, col.Path()) { + break + } + sorting = append(sorting, col) + } + + return &convertedRowGroup{ + // The pair of rowGroup+conv is retained to construct a converted row + // reader by wrapping the underlying row reader of the row group because + // it allows proper reconstruction of the repetition and definition + // levels. + // + // Columns of the source row group which do not exist in the target are + // masked to prevent loading unneeded pages when reading rows from the + // converted row group. + rowGroup: maskMissingRowGroupColumns(rowGroup, len(columns), conv), + columns: columns, + sorting: sorting, + conv: conv, + } +} + +func maskMissingRowGroupColumns(r RowGroup, numColumns int, conv Conversion) RowGroup { + rowGroupColumns := r.ColumnChunks() + columns := make([]ColumnChunk, len(rowGroupColumns)) + missing := make([]missingColumnChunk, len(columns)) + numRows := r.NumRows() + + // Compute max levels for each column in the source schema + sourceSchema := r.Schema() + forEachLeafColumnOf(sourceSchema, func(leaf leafColumn) { + i := leaf.columnIndex + missing[i] = missingColumnChunk{ + typ: rowGroupColumns[i].Type(), + column: int16(i), + numRows: numRows, + numValues: numRows, + numNulls: numRows, + maxRepetitionLevel: leaf.maxRepetitionLevel, + maxDefinitionLevel: leaf.maxDefinitionLevel, + } + }) + + for i := range columns { + columns[i] = &missing[i] + } + + for i := range numColumns { + j := conv.Column(i) + if j >= 0 && j < len(columns) { + columns[j] = rowGroupColumns[j] + } + } + + return &rowGroup{ + schema: r.Schema(), + numRows: numRows, + columns: columns, + } +} + +type missingColumnChunk struct { + typ Type + column int16 + numRows int64 + numValues int64 + numNulls int64 + maxRepetitionLevel byte // Maximum repetition level for this column + maxDefinitionLevel byte // Maximum definition level for this column + adjacentChunk ColumnChunk // Adjacent column chunk to mirror levels from (nil if none) +} + +func (c *missingColumnChunk) Type() Type { return c.typ } +func (c *missingColumnChunk) Column() int { return int(c.column) } +func (c *missingColumnChunk) Pages() Pages { + var adjacentPages Pages + if c.adjacentChunk != nil { + adjacentPages = c.adjacentChunk.Pages() + } + return onePage(missingPage{ + missingColumnChunk: c, + adjacentPages: adjacentPages, + }) +} +func (c *missingColumnChunk) ColumnIndex() (ColumnIndex, error) { return missingColumnIndex{c}, nil } +func (c *missingColumnChunk) OffsetIndex() (OffsetIndex, error) { return missingOffsetIndex{}, nil } +func (c *missingColumnChunk) BloomFilter() BloomFilter { return missingBloomFilter{} } +func (c *missingColumnChunk) NumValues() int64 { return c.numValues } + +type missingColumnIndex struct{ *missingColumnChunk } + +func (i missingColumnIndex) NumPages() int { return 1 } +func (i missingColumnIndex) NullCount(int) int64 { return i.numNulls } +func (i missingColumnIndex) NullPage(int) bool { return true } +func (i missingColumnIndex) MinValue(int) Value { return Value{} } +func (i missingColumnIndex) MaxValue(int) Value { return Value{} } +func (i missingColumnIndex) IsAscending() bool { return true } +func (i missingColumnIndex) IsDescending() bool { return false } + +type missingOffsetIndex struct{} + +func (missingOffsetIndex) NumPages() int { return 1 } +func (missingOffsetIndex) Offset(int) int64 { return 0 } +func (missingOffsetIndex) CompressedPageSize(int) int64 { return 0 } +func (missingOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type missingBloomFilter struct{} + +func (missingBloomFilter) ReadAt([]byte, int64) (int, error) { return 0, io.EOF } +func (missingBloomFilter) Size() int64 { return 0 } +func (missingBloomFilter) Check(Value) (bool, error) { return false, nil } + +type missingPage struct { + *missingColumnChunk + adjacentPages Pages // Pages from adjacent column for level mirroring +} + +func (p missingPage) Column() int { return int(p.column) } +func (p missingPage) Dictionary() Dictionary { return nil } +func (p missingPage) NumRows() int64 { return p.numRows } +func (p missingPage) NumValues() int64 { return p.numValues } +func (p missingPage) NumNulls() int64 { return p.numNulls } +func (p missingPage) Bounds() (min, max Value, ok bool) { return } +func (p missingPage) Slice(i, j int64) Page { + return missingPage{ + missingColumnChunk: &missingColumnChunk{ + typ: p.typ, + column: p.column, + numRows: j - i, + numValues: j - i, + numNulls: j - i, + maxRepetitionLevel: p.maxRepetitionLevel, + maxDefinitionLevel: p.maxDefinitionLevel, + adjacentChunk: p.adjacentChunk, + }, + adjacentPages: p.adjacentPages, + } +} +func (p missingPage) Size() int64 { return 0 } +func (p missingPage) RepetitionLevels() []byte { return nil } +func (p missingPage) DefinitionLevels() []byte { return nil } +func (p missingPage) Data() encoding.Values { return p.typ.NewValues(nil, nil) } +func (p missingPage) Values() ValueReader { + var adjacentReader ValueReader + if p.adjacentPages != nil { + // Open the adjacent page to read levels from + if adjacentPage, err := p.adjacentPages.ReadPage(); err == nil { + adjacentReader = adjacentPage.Values() + } + } + + return &missingPageValues{ + page: p, + adjacentReader: adjacentReader, + adjacentBuffer: make([]Value, 1024), // Reasonable buffer size + } +} + +type missingPageValues struct { + page missingPage + read int64 + adjacentReader ValueReader // Reader for adjacent column to mirror levels + adjacentBuffer []Value // Buffer for reading adjacent values +} + +func (r *missingPageValues) ReadValues(values []Value) (int, error) { + remain := r.page.numValues - r.read + if int64(len(values)) > remain { + values = values[:remain] + } + + typ := r.page.typ + columnIndex := ^r.page.column + + // Case 1: No adjacent column (root-level field, no siblings) + if r.adjacentReader == nil { + return r.readWithoutAdjacent(values, typ, columnIndex) + } + + // Case 2: Has adjacent column - mirror its repetition/definition levels + return r.readWithAdjacent(values, typ, columnIndex) +} + +func (r *missingPageValues) readWithoutAdjacent(values []Value, typ Type, columnIndex int16) (int, error) { + // For fields without siblings, assume one value per row + // Definition level depends on whether field is required or optional + isRequired := r.page.maxDefinitionLevel == 0 + + if isRequired { + // Required field: produce zero/default values + for i := range values { + values[i] = ZeroValue(typ.Kind()) + values[i].repetitionLevel = 0 + values[i].definitionLevel = r.page.maxDefinitionLevel // Present value + values[i].columnIndex = columnIndex + } + } else { + // Optional field: produce nulls + definitionLevel := byte(0) + if r.page.maxDefinitionLevel > 0 { + definitionLevel = r.page.maxDefinitionLevel - 1 + } + + for i := range values { + values[i] = Value{ + repetitionLevel: 0, + definitionLevel: definitionLevel, + columnIndex: columnIndex, + } + } + } + + r.read += int64(len(values)) + if r.read == r.page.numValues { + return len(values), io.EOF + } + return len(values), nil +} + +func (r *missingPageValues) readWithAdjacent(values []Value, typ Type, columnIndex int16) (int, error) { + // Read values from adjacent column to get its levels + n, err := r.adjacentReader.ReadValues(r.adjacentBuffer[:len(values)]) + if err != nil && err != io.EOF { + return 0, err + } + + // Determine if this missing column is required or optional + isRequired := r.page.maxDefinitionLevel == 0 + + for i := range n { + repLevel := r.adjacentBuffer[i].repetitionLevel + adjacentDefLevel := r.adjacentBuffer[i].definitionLevel + + var defLevel byte + var value Value + + if isRequired { + // Required field: produce zero/default values + value = ZeroValue(typ.Kind()) + // Mirror adjacent definition level structure + defLevel = adjacentDefLevel + } else { + // Optional field: produce nulls + // Definition level indicates null at appropriate nesting + if adjacentDefLevel < r.page.maxDefinitionLevel { + // Adjacent is null at some level, follow its definition + defLevel = adjacentDefLevel + } else { + // Adjacent is present, but we are null at leaf level + defLevel = r.page.maxDefinitionLevel - 1 + } + } + + value.repetitionLevel = repLevel + value.definitionLevel = defLevel + value.columnIndex = columnIndex + values[i] = value + } + + r.read += int64(n) + if err == io.EOF { + return n, io.EOF + } + return n, nil +} + +func (r *missingPageValues) Close() error { + r.read = r.page.numValues + return nil +} + +type convertedRowGroup struct { + rowGroup RowGroup + columns []ColumnChunk + sorting []SortingColumn + conv Conversion +} + +func (c *convertedRowGroup) NumRows() int64 { return c.rowGroup.NumRows() } +func (c *convertedRowGroup) ColumnChunks() []ColumnChunk { return c.columns } +func (c *convertedRowGroup) Schema() *Schema { return c.conv.Schema() } +func (c *convertedRowGroup) SortingColumns() []SortingColumn { return c.sorting } +func (c *convertedRowGroup) Rows() Rows { + rows := c.rowGroup.Rows() + return &convertedRows{ + Closer: rows, + rows: rows, + conv: c.conv, + } +} + +// ConvertRowReader constructs a wrapper of the given row reader which applies +// the given schema conversion to the rows. +func ConvertRowReader(rows RowReader, conv Conversion) RowReaderWithSchema { + return &convertedRows{rows: &forwardRowSeeker{rows: rows}, conv: conv} +} + +type convertedRows struct { + io.Closer + rows RowReadSeeker + conv Conversion +} + +func (c *convertedRows) ReadRows(rows []Row) (int, error) { + n, err := c.rows.ReadRows(rows) + if n > 0 { + var convErr error + n, convErr = c.conv.Convert(rows[:n]) + if convErr != nil { + err = convErr + } + } + return n, err +} + +func (c *convertedRows) Schema() *Schema { + return c.conv.Schema() +} + +func (c *convertedRows) SeekToRow(rowIndex int64) error { + return c.rows.SeekToRow(rowIndex) +} + +// convertedColumnChunk wraps a ColumnChunk to fix the column index after reordering. +// When ConvertRowGroup reorders columns, the underlying chunk's Column() method +// returns the original position. This wrapper fixes both Column() and the +// columnIndex in values read from the chunk. +type convertedColumnChunk struct { + chunk ColumnChunk + targetColumnIndex int16 // XOR-encoded column index (^int16(columnIndex)) +} + +func (c *convertedColumnChunk) Type() Type { + return c.chunk.Type() +} + +func (c *convertedColumnChunk) Column() int { + return int(^c.targetColumnIndex) +} + +func (c *convertedColumnChunk) NumValues() int64 { + return c.chunk.NumValues() +} + +func (c *convertedColumnChunk) Pages() Pages { + return &convertedPages{ + pages: c.chunk.Pages(), + targetColumnIndex: c.targetColumnIndex, + } +} + +func (c *convertedColumnChunk) ColumnIndex() (ColumnIndex, error) { + return c.chunk.ColumnIndex() +} + +func (c *convertedColumnChunk) OffsetIndex() (OffsetIndex, error) { + return c.chunk.OffsetIndex() +} + +func (c *convertedColumnChunk) BloomFilter() BloomFilter { + return c.chunk.BloomFilter() +} + +// convertedPages wraps Pages to return convertedPage instances. +type convertedPages struct { + pages Pages + targetColumnIndex int16 +} + +func (p *convertedPages) ReadPage() (Page, error) { + page, err := p.pages.ReadPage() + if err != nil { + return nil, err + } + return &convertedPage{ + page: page, + targetColumnIndex: p.targetColumnIndex, + }, nil +} + +func (p *convertedPages) SeekToRow(rowIndex int64) error { + return p.pages.SeekToRow(rowIndex) +} + +func (p *convertedPages) Close() error { + return p.pages.Close() +} + +// convertedPage wraps a Page to return a convertedValueReader. +type convertedPage struct { + page Page + targetColumnIndex int16 +} + +func (p *convertedPage) Type() Type { + return p.page.Type() +} + +func (p *convertedPage) Column() int { + return int(^p.targetColumnIndex) +} + +func (p *convertedPage) Dictionary() Dictionary { + return p.page.Dictionary() +} + +func (p *convertedPage) NumRows() int64 { + return p.page.NumRows() +} + +func (p *convertedPage) NumValues() int64 { + return p.page.NumValues() +} + +func (p *convertedPage) NumNulls() int64 { + return p.page.NumNulls() +} + +func (p *convertedPage) Bounds() (min, max Value, ok bool) { + return p.page.Bounds() +} + +func (p *convertedPage) Size() int64 { + return p.page.Size() +} + +func (p *convertedPage) RepetitionLevels() []byte { + return p.page.RepetitionLevels() +} + +func (p *convertedPage) DefinitionLevels() []byte { + return p.page.DefinitionLevels() +} + +func (p *convertedPage) Data() encoding.Values { + return p.page.Data() +} + +func (p *convertedPage) Values() ValueReader { + return &convertedValueReader{ + reader: p.page.Values(), + targetColumnIndex: p.targetColumnIndex, + } +} + +func (p *convertedPage) Slice(i, j int64) Page { + return &convertedPage{ + page: p.page.Slice(i, j), + targetColumnIndex: p.targetColumnIndex, + } +} + +func (p *convertedPage) Retain() { + Retain(p.page) +} + +func (p *convertedPage) Release() { + Release(p.page) +} + +func (p *convertedPage) ReleaseAndDetachValues() { + releaseAndDetachValues(p.page) +} + +var ( + _ retainable = (*convertedPage)(nil) + _ releasable = (*convertedPage)(nil) + _ detachable = (*convertedPage)(nil) +) + +// convertedValueReader wraps a ValueReader to rewrite columnIndex in values. +type convertedValueReader struct { + reader ValueReader + targetColumnIndex int16 +} + +func (r *convertedValueReader) ReadValues(values []Value) (int, error) { + n, err := r.reader.ReadValues(values) + // Rewrite columnIndex for all values to match the target column position + for i := range n { + values[i].columnIndex = r.targetColumnIndex + } + return n, err +} + +var ( + trueBytes = []byte(`true`) + falseBytes = []byte(`false`) + unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) +) + +func convertBooleanToInt32(v Value) (Value, error) { + return v.convertToInt32(int32(v.byte())), nil +} + +func convertBooleanToInt64(v Value) (Value, error) { + return v.convertToInt64(int64(v.byte())), nil +} + +func convertBooleanToInt96(v Value) (Value, error) { + return v.convertToInt96(deprecated.Int96{0: uint32(v.byte())}), nil +} + +func convertBooleanToFloat(v Value) (Value, error) { + return v.convertToFloat(float32(v.byte())), nil +} + +func convertBooleanToDouble(v Value) (Value, error) { + return v.convertToDouble(float64(v.byte())), nil +} + +func convertBooleanToByteArray(v Value) (Value, error) { + return v.convertToByteArray([]byte{v.byte()}), nil +} + +func convertBooleanToFixedLenByteArray(v Value, size int) (Value, error) { + b := []byte{v.byte()} + c := make([]byte, size) + copy(c, b) + return v.convertToFixedLenByteArray(c), nil +} + +func convertBooleanToString(v Value) (Value, error) { + b := ([]byte)(nil) + if v.boolean() { + b = trueBytes + } else { + b = falseBytes + } + return v.convertToByteArray(b), nil +} + +func convertInt32ToBoolean(v Value) (Value, error) { + return v.convertToBoolean(v.int32() != 0), nil +} + +func convertInt32ToInt64(v Value) (Value, error) { + return v.convertToInt64(int64(v.int32())), nil +} + +func convertInt32ToInt96(v Value) (Value, error) { + return v.convertToInt96(deprecated.Int32ToInt96(v.int32())), nil +} + +func convertInt32ToFloat(v Value) (Value, error) { + return v.convertToFloat(float32(v.int32())), nil +} + +func convertInt32ToDouble(v Value) (Value, error) { + return v.convertToDouble(float64(v.int32())), nil +} + +func convertInt32ToByteArray(v Value) (Value, error) { + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, v.uint32()) + return v.convertToByteArray(b), nil +} + +func convertInt32ToFixedLenByteArray(v Value, size int) (Value, error) { + b := make([]byte, 4) + c := make([]byte, size) + binary.LittleEndian.PutUint32(b, v.uint32()) + copy(c, b) + return v.convertToFixedLenByteArray(c), nil +} + +func convertInt32ToString(v Value) (Value, error) { + return v.convertToByteArray(strconv.AppendInt(nil, int64(v.int32()), 10)), nil +} + +func convertInt64ToBoolean(v Value) (Value, error) { + return v.convertToBoolean(v.int64() != 0), nil +} + +func convertInt64ToInt32(v Value) (Value, error) { + return v.convertToInt32(int32(v.int64())), nil +} + +func convertInt64ToInt96(v Value) (Value, error) { + return v.convertToInt96(deprecated.Int64ToInt96(v.int64())), nil +} + +func convertInt64ToFloat(v Value) (Value, error) { + return v.convertToFloat(float32(v.int64())), nil +} + +func convertInt64ToDouble(v Value) (Value, error) { + return v.convertToDouble(float64(v.int64())), nil +} + +func convertInt64ToByteArray(v Value) (Value, error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, v.uint64()) + return v.convertToByteArray(b), nil +} + +func convertInt64ToFixedLenByteArray(v Value, size int) (Value, error) { + b := make([]byte, 8) + c := make([]byte, size) + binary.LittleEndian.PutUint64(b, v.uint64()) + copy(c, b) + return v.convertToFixedLenByteArray(c), nil +} + +func convertInt64ToString(v Value) (Value, error) { + return v.convertToByteArray(strconv.AppendInt(nil, v.int64(), 10)), nil +} + +func convertInt96ToBoolean(v Value) (Value, error) { + return v.convertToBoolean(!v.int96().IsZero()), nil +} + +func convertInt96ToInt32(v Value) (Value, error) { + return v.convertToInt32(v.int96().Int32()), nil +} + +func convertInt96ToInt64(v Value) (Value, error) { + return v.convertToInt64(v.int96().Int64()), nil +} + +func convertInt96ToFloat(v Value) (Value, error) { + return v, invalidConversion(v, "INT96", "FLOAT") +} + +func convertInt96ToDouble(v Value) (Value, error) { + return v, invalidConversion(v, "INT96", "DOUBLE") +} + +func convertInt96ToByteArray(v Value) (Value, error) { + return v.convertToByteArray(v.byteArray()), nil +} + +func convertInt96ToFixedLenByteArray(v Value, size int) (Value, error) { + b := v.byteArray() + if len(b) < size { + c := make([]byte, size) + copy(c, b) + b = c + } else { + b = b[:size] + } + return v.convertToFixedLenByteArray(b), nil +} + +func convertInt96ToString(v Value) (Value, error) { + return v.convertToByteArray([]byte(v.String())), nil +} + +func convertFloatToBoolean(v Value) (Value, error) { + return v.convertToBoolean(v.float() != 0), nil +} + +func convertFloatToInt32(v Value) (Value, error) { + return v.convertToInt32(int32(v.float())), nil +} + +func convertFloatToInt64(v Value) (Value, error) { + return v.convertToInt64(int64(v.float())), nil +} + +func convertFloatToInt96(v Value) (Value, error) { + return v, invalidConversion(v, "FLOAT", "INT96") +} + +func convertFloatToDouble(v Value) (Value, error) { + return v.convertToDouble(float64(v.float())), nil +} + +func convertFloatToByteArray(v Value) (Value, error) { + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, v.uint32()) + return v.convertToByteArray(b), nil +} + +func convertFloatToFixedLenByteArray(v Value, size int) (Value, error) { + b := make([]byte, 4) + c := make([]byte, size) + binary.LittleEndian.PutUint32(b, v.uint32()) + copy(c, b) + return v.convertToFixedLenByteArray(c), nil +} + +func convertFloatToString(v Value) (Value, error) { + return v.convertToByteArray(strconv.AppendFloat(nil, float64(v.float()), 'g', -1, 32)), nil +} + +func convertDoubleToBoolean(v Value) (Value, error) { + return v.convertToBoolean(v.double() != 0), nil +} + +func convertDoubleToInt32(v Value) (Value, error) { + return v.convertToInt32(int32(v.double())), nil +} + +func convertDoubleToInt64(v Value) (Value, error) { + return v.convertToInt64(int64(v.double())), nil +} + +func convertDoubleToInt96(v Value) (Value, error) { + return v, invalidConversion(v, "FLOAT", "INT96") +} + +func convertDoubleToFloat(v Value) (Value, error) { + return v.convertToFloat(float32(v.double())), nil +} + +func convertDoubleToByteArray(v Value) (Value, error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, v.uint64()) + return v.convertToByteArray(b), nil +} + +func convertDoubleToFixedLenByteArray(v Value, size int) (Value, error) { + b := make([]byte, 8) + c := make([]byte, size) + binary.LittleEndian.PutUint64(b, v.uint64()) + copy(c, b) + return v.convertToFixedLenByteArray(c), nil +} + +func convertDoubleToString(v Value) (Value, error) { + return v.convertToByteArray(strconv.AppendFloat(nil, v.double(), 'g', -1, 64)), nil +} + +func convertByteArrayToBoolean(v Value) (Value, error) { + return v.convertToBoolean(!isZero(v.byteArray())), nil +} + +func convertByteArrayToInt32(v Value) (Value, error) { + b := make([]byte, 4) + copy(b, v.byteArray()) + return v.convertToInt32(int32(binary.LittleEndian.Uint32(b))), nil +} + +func convertByteArrayToInt64(v Value) (Value, error) { + b := make([]byte, 8) + copy(b, v.byteArray()) + return v.convertToInt64(int64(binary.LittleEndian.Uint64(b))), nil +} + +func convertByteArrayToInt96(v Value) (Value, error) { + b := make([]byte, 12) + copy(b, v.byteArray()) + return v.convertToInt96(deprecated.Int96{ + 0: binary.LittleEndian.Uint32(b[0:4]), + 1: binary.LittleEndian.Uint32(b[4:8]), + 2: binary.LittleEndian.Uint32(b[8:12]), + }), nil +} + +func convertByteArrayToFloat(v Value) (Value, error) { + b := make([]byte, 4) + copy(b, v.byteArray()) + return v.convertToFloat(math.Float32frombits(binary.LittleEndian.Uint32(b))), nil +} + +func convertByteArrayToDouble(v Value) (Value, error) { + b := make([]byte, 8) + copy(b, v.byteArray()) + return v.convertToDouble(math.Float64frombits(binary.LittleEndian.Uint64(b))), nil +} + +func convertByteArrayToFixedLenByteArray(v Value, size int) (Value, error) { + b := v.byteArray() + if len(b) < size { + c := make([]byte, size) + copy(c, b) + b = c + } else { + b = b[:size] + } + return v.convertToFixedLenByteArray(b), nil +} + +func convertFixedLenByteArrayToString(v Value) (Value, error) { + b := v.byteArray() + c := make([]byte, hex.EncodedLen(len(b))) + hex.Encode(c, b) + return v.convertToByteArray(c), nil +} + +func convertStringToBoolean(v Value) (Value, error) { + b, err := strconv.ParseBool(v.string()) + if err != nil { + return v, conversionError(v, "STRING", "BOOLEAN", err) + } + return v.convertToBoolean(b), nil +} + +func convertStringToInt32(v Value) (Value, error) { + i, err := strconv.ParseInt(v.string(), 10, 32) + if err != nil { + return v, conversionError(v, "STRING", "INT32", err) + } + return v.convertToInt32(int32(i)), nil +} + +func convertStringToInt64(v Value) (Value, error) { + i, err := strconv.ParseInt(v.string(), 10, 64) + if err != nil { + return v, conversionError(v, "STRING", "INT64", err) + } + return v.convertToInt64(i), nil +} + +func convertStringToInt96(v Value) (Value, error) { + i, ok := new(big.Int).SetString(v.string(), 10) + if !ok { + return v, conversionError(v, "STRING", "INT96", strconv.ErrSyntax) + } + b := i.Bytes() + c := make([]byte, 12) + copy(c, b) + if cpu.IsBigEndian { + bufLen := len(c) + for idx := 0; idx < bufLen; idx = idx + 4 { + for m, n := (idx + 0), (idx + 3); m < n; m, n = m+1, n-1 { + c[m], c[n] = c[n], c[m] + } + } + } + i96 := unsafecast.Slice[deprecated.Int96](c) + return v.convertToInt96(i96[0]), nil +} + +func convertStringToFloat(v Value) (Value, error) { + f, err := strconv.ParseFloat(v.string(), 32) + if err != nil { + return v, conversionError(v, "STRING", "FLOAT", err) + } + return v.convertToFloat(float32(f)), nil +} + +func convertStringToDouble(v Value) (Value, error) { + f, err := strconv.ParseFloat(v.string(), 64) + if err != nil { + return v, conversionError(v, "STRING", "DOUBLE", err) + } + return v.convertToDouble(f), nil +} + +func convertStringToFixedLenByteArray(v Value, size int) (Value, error) { + b := v.byteArray() + c := make([]byte, size) + _, err := hex.Decode(c, b) + if err != nil { + return v, conversionError(v, "STRING", "BYTE_ARRAY", err) + } + return v.convertToFixedLenByteArray(c), nil +} + +func convertStringToDate(v Value, tz *time.Location) (Value, error) { + t, err := time.ParseInLocation("2006-01-02", v.string(), tz) + if err != nil { + return v, conversionError(v, "STRING", "DATE", err) + } + d := daysSinceUnixEpoch(t) + return v.convertToInt32(int32(d)), nil +} + +func convertStringToTimeMillis(v Value, tz *time.Location) (Value, error) { + t, err := time.ParseInLocation("15:04:05.999", v.string(), tz) + if err != nil { + return v, conversionError(v, "STRING", "TIME", err) + } + m := nearestMidnightLessThan(t) + milliseconds := t.Sub(m).Milliseconds() + return v.convertToInt32(int32(milliseconds)), nil +} + +func convertStringToTimeMicros(v Value, tz *time.Location) (Value, error) { + t, err := time.ParseInLocation("15:04:05.999999", v.string(), tz) + if err != nil { + return v, conversionError(v, "STRING", "TIME", err) + } + m := nearestMidnightLessThan(t) + microseconds := t.Sub(m).Microseconds() + return v.convertToInt64(microseconds), nil +} + +func convertDateToTimestamp(v Value, u format.TimeUnit, tz *time.Location) (Value, error) { + t := unixEpoch.AddDate(0, 0, int(v.int32())) + d := timeUnitDuration(u) + return v.convertToInt64(int64(t.In(tz).Sub(unixEpoch) / d)), nil +} + +func convertDateToString(v Value) (Value, error) { + t := unixEpoch.AddDate(0, 0, int(v.int32())) + b := t.AppendFormat(make([]byte, 0, 10), "2006-01-02") + return v.convertToByteArray(b), nil +} + +func convertTimeMillisToString(v Value, tz *time.Location) (Value, error) { + t := time.UnixMilli(int64(v.int32())).In(tz) + b := t.AppendFormat(make([]byte, 0, 12), "15:04:05.999") + return v.convertToByteArray(b), nil +} + +func convertTimeMicrosToString(v Value, tz *time.Location) (Value, error) { + t := time.UnixMicro(v.int64()).In(tz) + b := t.AppendFormat(make([]byte, 0, 15), "15:04:05.999999") + return v.convertToByteArray(b), nil +} + +func convertTimestampToDate(v Value, u format.TimeUnit, tz *time.Location) (Value, error) { + t := timestamp(v, u, tz) + d := daysSinceUnixEpoch(t) + return v.convertToInt32(int32(d)), nil +} + +func convertTimestampToTimeMillis(v Value, u format.TimeUnit, sourceZone, targetZone *time.Location) (Value, error) { + t := timestamp(v, u, sourceZone) + m := nearestMidnightLessThan(t) + milliseconds := t.In(targetZone).Sub(m).Milliseconds() + return v.convertToInt32(int32(milliseconds)), nil +} + +func convertTimestampToTimeMicros(v Value, u format.TimeUnit, sourceZone, targetZone *time.Location) (Value, error) { + t := timestamp(v, u, sourceZone) + m := nearestMidnightLessThan(t) + microseconds := t.In(targetZone).Sub(m).Microseconds() + return v.convertToInt64(int64(microseconds)), nil +} + +func convertTimestampToTimestamp(v Value, sourceUnit, targetUnit format.TimeUnit) (Value, error) { + sourceScale := timeUnitDuration(sourceUnit).Nanoseconds() + targetScale := timeUnitDuration(targetUnit).Nanoseconds() + targetValue := (v.int64() * sourceScale) / targetScale + return v.convertToInt64(targetValue), nil +} + +const nanosecondsPerDay = 24 * 60 * 60 * 1e9 + +func daysSinceUnixEpoch(t time.Time) int { + return int(t.Sub(unixEpoch).Hours()) / 24 +} + +func nearestMidnightLessThan(t time.Time) time.Time { + y, m, d := t.Date() + return time.Date(y, m, d, 0, 0, 0, 0, t.Location()) +} + +func timestamp(v Value, u format.TimeUnit, tz *time.Location) time.Time { + return unixEpoch.In(tz).Add(time.Duration(v.int64()) * timeUnitDuration(u)) +} + +func timeUnitDuration(unit format.TimeUnit) time.Duration { + switch { + case unit.Millis != nil: + return time.Millisecond + case unit.Micros != nil: + return time.Microsecond + default: + return time.Nanosecond + } +} + +func invalidConversion(value Value, from, to string) error { + return fmt.Errorf("%s to %s: %s: %w", from, to, value, ErrInvalidConversion) +} + +func conversionError(value Value, from, to string, err error) error { + return fmt.Errorf("%s to %s: %q: %s: %w", from, to, value.string(), err, ErrInvalidConversion) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dedupe.go b/vendor/github.com/parquet-go/parquet-go/dedupe.go new file mode 100644 index 00000000000..0f434396753 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dedupe.go @@ -0,0 +1,107 @@ +package parquet + +// DedupeRowReader constructs a row reader which drops duplicated consecutive +// rows, according to the comparator function passed as argument. +// +// If the underlying reader produces a sequence of rows sorted by the same +// comparison predicate, the output is guaranteed to produce unique rows only. +func DedupeRowReader(reader RowReader, compare func(Row, Row) int) RowReader { + return &dedupeRowReader{reader: reader, compare: compare} +} + +type dedupeRowReader struct { + reader RowReader + compare func(Row, Row) int + dedupe +} + +func (d *dedupeRowReader) ReadRows(rows []Row) (int, error) { + for { + n, err := d.reader.ReadRows(rows) + n = d.deduplicate(rows[:n], d.compare) + + if n > 0 || err != nil { + return n, err + } + } +} + +// DedupeRowWriter constructs a row writer which drops duplicated consecutive +// rows, according to the comparator function passed as argument. +// +// If the writer is given a sequence of rows sorted by the same comparison +// predicate, the output is guaranteed to contain unique rows only. +func DedupeRowWriter(writer RowWriter, compare func(Row, Row) int) RowWriter { + return &dedupeRowWriter{writer: writer, compare: compare} +} + +type dedupeRowWriter struct { + writer RowWriter + compare func(Row, Row) int + dedupe + rows []Row +} + +func (d *dedupeRowWriter) WriteRows(rows []Row) (int, error) { + // We need to make a copy because we cannot modify the rows slice received + // as argument to respect the RowWriter contract. + d.rows = append(d.rows[:0], rows...) + defer func() { + for i := range d.rows { + d.rows[i] = Row{} + } + }() + + if n := d.deduplicate(d.rows, d.compare); n > 0 { + w, err := d.writer.WriteRows(d.rows[:n]) + if err != nil { + return w, err + } + } + + // Return the number of rows received instead of the number of deduplicated + // rows actually written to the underlying writer because we have to repsect + // the RowWriter contract. + return len(rows), nil +} + +type dedupe struct { + lastRow Row + uniq []Row + dupe []Row +} + +func (d *dedupe) reset() { + d.lastRow = d.lastRow[:0] +} + +func (d *dedupe) deduplicate(rows []Row, compare func(Row, Row) int) int { + defer func() { + for i := range d.uniq { + d.uniq[i] = Row{} + } + for i := range d.dupe { + d.dupe[i] = Row{} + } + d.uniq = d.uniq[:0] + d.dupe = d.dupe[:0] + }() + + lastRow := d.lastRow + + for _, row := range rows { + if len(lastRow) != 0 && compare(row, lastRow) == 0 { + d.dupe = append(d.dupe, row) + } else { + lastRow = row + d.uniq = append(d.uniq, row) + } + } + + rows = rows[:0] + rows = append(rows, d.uniq...) + rows = append(rows, d.dupe...) + + d.lastRow = append(d.lastRow[:0], lastRow...) + return len(d.uniq) +} diff --git a/vendor/github.com/parquet-go/parquet-go/deprecated/int96.go b/vendor/github.com/parquet-go/parquet-go/deprecated/int96.go new file mode 100644 index 00000000000..fc6d40648e8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/deprecated/int96.go @@ -0,0 +1,179 @@ +package deprecated + +import ( + "math/big" + "math/bits" +) + +// Int96 is an implementation of the deprecated INT96 parquet type. +type Int96 [3]uint32 + +// Int32ToInt96 converts a int32 value to a Int96. +func Int32ToInt96(value int32) (i96 Int96) { + if value < 0 { + i96[2] = 0xFFFFFFFF + i96[1] = 0xFFFFFFFF + } + i96[0] = uint32(value) + return +} + +// Int64ToInt96 converts a int64 value to Int96. +func Int64ToInt96(value int64) (i96 Int96) { + if value < 0 { + i96[2] = 0xFFFFFFFF + } + i96[1] = uint32(value >> 32) + i96[0] = uint32(value) + return +} + +// IsZero returns true if i is the zero-value. +func (i Int96) IsZero() bool { return i == Int96{} } + +// Negative returns true if i is a negative value. +func (i Int96) Negative() bool { + return (i[2] >> 31) != 0 +} + +// Less returns true if i < j. +// +// The method implements a signed comparison between the two operands. +func (i Int96) Less(j Int96) bool { + if i.Negative() { + if !j.Negative() { + return true + } + } else { + if j.Negative() { + return false + } + } + for k := 2; k >= 0; k-- { + a, b := i[k], j[k] + switch { + case a < b: + return true + case a > b: + return false + } + } + return false +} + +// Int converts i to a big.Int representation. +func (i Int96) Int() *big.Int { + z := new(big.Int) + z.Or(z, big.NewInt(int64(i[2])<<32|int64(i[1]))) + z.Lsh(z, 32) + z.Or(z, big.NewInt(int64(i[0]))) + return z +} + +// Int32 converts i to a int32, potentially truncating the value. +func (i Int96) Int32() int32 { + return int32(i[0]) +} + +// Int64 converts i to a int64, potentially truncating the value. +func (i Int96) Int64() int64 { + return int64(i[1])<<32 | int64(i[0]) +} + +// String returns a string representation of i. +func (i Int96) String() string { + return i.Int().String() +} + +// Len returns the minimum length in bits required to store the value of i. +func (i Int96) Len() int { + switch { + case i[2] != 0: + return 64 + bits.Len32(i[2]) + case i[1] != 0: + return 32 + bits.Len32(i[1]) + default: + return bits.Len32(i[0]) + } +} + +func MaxLenInt96(data []Int96) int { + max := 0 + for i := range data { + n := data[i].Len() + if n > max { + max = n + } + } + return max +} + +func MinInt96(data []Int96) (min Int96) { + if len(data) > 0 { + min = data[0] + for _, v := range data[1:] { + if v.Less(min) { + min = v + } + } + } + return min +} + +func MaxInt96(data []Int96) (max Int96) { + if len(data) > 0 { + max = data[0] + for _, v := range data[1:] { + if max.Less(v) { + max = v + } + } + } + return max +} + +func MinMaxInt96(data []Int96) (min, max Int96) { + if len(data) > 0 { + min = data[0] + max = data[0] + for _, v := range data[1:] { + if v.Less(min) { + min = v + } + if max.Less(v) { + max = v + } + } + } + return min, max +} + +func OrderOfInt96(data []Int96) int { + if len(data) > 1 { + if int96AreInAscendingOrder(data) { + return +1 + } + if int96AreInDescendingOrder(data) { + return -1 + } + } + return 0 +} + +func int96AreInAscendingOrder(data []Int96) bool { + for i := len(data) - 1; i > 0; i-- { + if data[i].Less(data[i-1]) { + return false + } + } + return true +} + +func int96AreInDescendingOrder(data []Int96) bool { + for i := len(data) - 1; i > 0; i-- { + if data[i-1].Less(data[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/parquet-go/parquet-go/deprecated/parquet.go b/vendor/github.com/parquet-go/parquet-go/deprecated/parquet.go new file mode 100644 index 00000000000..b2c60072c70 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/deprecated/parquet.go @@ -0,0 +1,112 @@ +package deprecated + +// DEPRECATED: Common types used by frameworks(e.g. hive, pig) using parquet. +// ConvertedType is superseded by LogicalType. This enum should not be extended. +// +// See LogicalTypes.md for conversion between ConvertedType and LogicalType. +type ConvertedType int32 + +const ( + // a BYTE_ARRAY actually contains UTF8 encoded chars + UTF8 ConvertedType = 0 + + // a map is converted as an optional field containing a repeated key/value pair + Map ConvertedType = 1 + + // a key/value pair is converted into a group of two fields + MapKeyValue ConvertedType = 2 + + // a list is converted into an optional field containing a repeated field for its + // values + List ConvertedType = 3 + + // an enum is converted into a binary field + Enum ConvertedType = 4 + + // A decimal value. + // + // This may be used to annotate binary or fixed primitive types. The + // underlying byte array stores the unscaled value encoded as two's + // complement using big-endian byte order (the most significant byte is the + // zeroth element). The value of the decimal is the value * 10^{-scale}. + // + // This must be accompanied by a (maximum) precision and a scale in the + // SchemaElement. The precision specifies the number of digits in the decimal + // and the scale stores the location of the decimal point. For example 1.23 + // would have precision 3 (3 total digits) and scale 2 (the decimal point is + // 2 digits over). + Decimal ConvertedType = 5 + + // A Date + // + // Stored as days since Unix epoch, encoded as the INT32 physical type. + Date ConvertedType = 6 + + // A time + // + // The total number of milliseconds since midnight. The value is stored + // as an INT32 physical type. + TimeMillis ConvertedType = 7 + + // A time. + // + // The total number of microseconds since midnight. The value is stored as + // an INT64 physical type. + TimeMicros ConvertedType = 8 + + // A date/time combination + // + // Date and time recorded as milliseconds since the Unix epoch. Recorded as + // a physical type of INT64. + TimestampMillis ConvertedType = 9 + + // A date/time combination + // + // Date and time recorded as microseconds since the Unix epoch. The value is + // stored as an INT64 physical type. + TimestampMicros ConvertedType = 10 + + // An unsigned integer value. + // + // The number describes the maximum number of meaningful data bits in + // the stored value. 8, 16 and 32 bit values are stored using the + // INT32 physical type. 64 bit values are stored using the INT64 + // physical type. + Uint8 ConvertedType = 11 + Uint16 ConvertedType = 12 + Uint32 ConvertedType = 13 + Uint64 ConvertedType = 14 + + // A signed integer value. + // + // The number describes the maximum number of meaningful data bits in + // the stored value. 8, 16 and 32 bit values are stored using the + // INT32 physical type. 64 bit values are stored using the INT64 + // physical type. + Int8 ConvertedType = 15 + Int16 ConvertedType = 16 + Int32 ConvertedType = 17 + Int64 ConvertedType = 18 + + // An embedded JSON document + // + // A JSON document embedded within a single UTF8 column. + Json ConvertedType = 19 + + // An embedded BSON document + // + // A BSON document embedded within a single BINARY column. + Bson ConvertedType = 20 + + // An interval of time + // + // This type annotates data stored as a FIXED_LEN_BYTE_ARRAY of length 12 + // This data is composed of three separate little endian unsigned + // integers. Each stores a component of a duration of time. The first + // integer identifies the number of months associated with the duration, + // the second identifies the number of days associated with the duration + // and the third identifies the number of milliseconds associated with + // the provided duration. This duration of time is independent of any + // particular timezone or date. + Interval ConvertedType = 21 +) diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary.go b/vendor/github.com/parquet-go/parquet-go/dictionary.go new file mode 100644 index 00000000000..6d1933d755d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary.go @@ -0,0 +1,427 @@ +package parquet + +import ( + "io" + + "slices" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/sparse" +) + +const ( + // Maximum load of probing tables. This parameter configures the balance + // between memory density and compute time of probing operations. Valid + // values are floating point numbers between 0 and 1. + // + // Smaller values result in lower collision probability when inserting + // values in probing tables, but also increase memory utilization. + // + // TODO: make this configurable by the application? + hashprobeTableMaxLoad = 0.85 + + // An estimate of the CPU cache footprint used by insert operations. + // + // This constant is used to determine a useful chunk size depending on the + // size of values being inserted in dictionaries. More values of small size + // can fit in CPU caches, so the inserts can operate on larger chunks. + insertsTargetCacheFootprint = 8192 +) + +// The Dictionary interface represents type-specific implementations of parquet +// dictionaries. +// +// Programs can instantiate dictionaries by call the NewDictionary method of a +// Type object. +// +// The current implementation has a limitation which prevents applications from +// providing custom versions of this interface because it contains unexported +// methods. The only way to create Dictionary values is to call the +// NewDictionary of Type instances. This limitation may be lifted in future +// releases. +type Dictionary interface { + // Returns the type that the dictionary was created from. + Type() Type + + // Returns the number of value indexed in the dictionary. + Len() int + + // Returns the total size in bytes of all values stored in the dictionary. + // This is used for tracking dictionary memory usage and enforcing size limits. + Size() int64 + + // Returns the dictionary value at the given index. + Index(index int32) Value + + // Inserts values from the second slice to the dictionary and writes the + // indexes at which each value was inserted to the first slice. + // + // The method panics if the length of the indexes slice is smaller than the + // length of the values slice. + Insert(indexes []int32, values []Value) + + // Given an array of dictionary indexes, lookup the values into the array + // of values passed as second argument. + // + // The method panics if len(indexes) > len(values), or one of the indexes + // is negative or greater than the highest index in the dictionary. + Lookup(indexes []int32, values []Value) + + // Returns the min and max values found in the given indexes. + Bounds(indexes []int32) (min, max Value) + + // Resets the dictionary to its initial state, removing all values. + Reset() + + // Returns a Page representing the content of the dictionary. + // + // The returned page shares the underlying memory of the buffer, it remains + // valid to use until the dictionary's Reset method is called. + Page() Page + + // See ColumnBuffer.writeValues for details on the use of unexported methods + // on interfaces. + insert(indexes []int32, rows sparse.Array) + + // Parquet primitive type insert methods. Each dictionary implementation + // supports only the Parquet types it can handle and panics for others. + // Returns the index at which the value was inserted (or already existed). + insertBoolean(value bool) int32 + insertInt32(value int32) int32 + insertInt64(value int64) int32 + insertInt96(value deprecated.Int96) int32 + insertFloat(value float32) int32 + insertDouble(value float64) int32 + insertByteArray(value []byte) int32 + + //lookup(indexes []int32, rows sparse.Array) +} + +func checkLookupIndexBounds(indexes []int32, rows sparse.Array) { + if rows.Len() < len(indexes) { + panic("dictionary lookup with more indexes than values") + } +} + +// indexedType is a wrapper around a Type value which overrides object +// constructors to use indexed versions referencing values in the dictionary +// instead of storing plain values. +type indexedType struct { + Type + dict Dictionary +} + +func newIndexedType(typ Type, dict Dictionary) *indexedType { + return &indexedType{Type: typ, dict: dict} +} + +func (t *indexedType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newIndexedColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t *indexedType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newIndexedPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +// indexedPage is an implementation of the Page interface which stores +// indexes instead of plain value. The indexes reference the values in a +// dictionary that the page was created for. +type indexedPage struct { + typ *indexedType + values []int32 + columnIndex int16 +} + +func newIndexedPage(typ *indexedType, columnIndex int16, numValues int32, data encoding.Values) *indexedPage { + // RLE encoded values that contain dictionary indexes in data pages are + // sometimes truncated when they contain only zeros. We account for this + // special case here and extend the values buffer if it is shorter than + // needed to hold `numValues`. + size := int(numValues) + values := data.Int32() + + if len(values) < size { + if cap(values) < size { + tmp := make([]int32, size) + copy(tmp, values) + values = tmp + } else { + clear := values[len(values):size] + for i := range clear { + clear[i] = 0 + } + } + } + + return &indexedPage{ + typ: typ, + values: values[:size], + columnIndex: ^columnIndex, + } +} + +func (page *indexedPage) Type() Type { return indexedPageType{page.typ} } + +func (page *indexedPage) Column() int { return int(^page.columnIndex) } + +func (page *indexedPage) Dictionary() Dictionary { return page.typ.dict } + +func (page *indexedPage) NumRows() int64 { return int64(len(page.values)) } + +func (page *indexedPage) NumValues() int64 { return int64(len(page.values)) } + +func (page *indexedPage) NumNulls() int64 { return 0 } + +func (page *indexedPage) Size() int64 { return 4 * int64(len(page.values)) } + +func (page *indexedPage) RepetitionLevels() []byte { return nil } + +func (page *indexedPage) DefinitionLevels() []byte { return nil } + +func (page *indexedPage) Data() encoding.Values { return encoding.Int32Values(page.values) } + +func (page *indexedPage) Values() ValueReader { return &indexedPageValues{page: page} } + +func (page *indexedPage) Bounds() (min, max Value, ok bool) { + if ok = len(page.values) > 0; ok { + min, max = page.typ.dict.Bounds(page.values) + min.columnIndex = page.columnIndex + max.columnIndex = page.columnIndex + } + return min, max, ok +} + +func (page *indexedPage) Slice(i, j int64) Page { + return &indexedPage{ + typ: page.typ, + values: page.values[i:j], + columnIndex: page.columnIndex, + } +} + +// indexedPageType is an adapter for the indexedType returned when accessing +// the type of an indexedPage value. It overrides the Encode/Decode methods to +// account for the fact that an indexed page is holding indexes of values into +// its dictionary instead of plain values. +type indexedPageType struct{ *indexedType } + +func (t indexedPageType) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.Int32ValuesFromBytes(values) +} + +func (t indexedPageType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeInt32(dst, src, enc) +} + +func (t indexedPageType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeInt32(dst, src, enc) +} + +func (t indexedPageType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return Int32Type.EstimateDecodeSize(numValues, src, enc) +} + +type indexedPageValues struct { + page *indexedPage + offset int +} + +func (r *indexedPageValues) ReadValues(values []Value) (n int, err error) { + if n = len(r.page.values) - r.offset; n == 0 { + return 0, io.EOF + } + if n > len(values) { + n = len(values) + } + r.page.typ.dict.Lookup(r.page.values[r.offset:r.offset+n], values[:n]) + r.offset += n + if r.offset == len(r.page.values) { + err = io.EOF + } + return n, err +} + +// indexedColumnBuffer is an implementation of the ColumnBuffer interface which +// builds a page of indexes into a parent dictionary when values are written. +type indexedColumnBuffer struct{ indexedPage } + +func newIndexedColumnBuffer(typ *indexedType, columnIndex int16, numValues int32) *indexedColumnBuffer { + return &indexedColumnBuffer{ + indexedPage: indexedPage{ + typ: typ, + values: make([]int32, 0, numValues), + columnIndex: ^columnIndex, + }, + } +} + +func (col *indexedColumnBuffer) Clone() ColumnBuffer { + return &indexedColumnBuffer{ + indexedPage: indexedPage{ + typ: col.typ, + values: slices.Clone(col.values), + columnIndex: col.columnIndex, + }, + } +} + +func (col *indexedColumnBuffer) Type() Type { return col.typ.Type } + +func (col *indexedColumnBuffer) ColumnIndex() (ColumnIndex, error) { + return indexedColumnIndex{col}, nil +} + +func (col *indexedColumnBuffer) OffsetIndex() (OffsetIndex, error) { + return indexedOffsetIndex{col}, nil +} + +func (col *indexedColumnBuffer) BloomFilter() BloomFilter { return nil } + +func (col *indexedColumnBuffer) Dictionary() Dictionary { return col.typ.dict } + +func (col *indexedColumnBuffer) Pages() Pages { return onePage(col.Page()) } + +func (col *indexedColumnBuffer) Page() Page { return &col.indexedPage } + +func (col *indexedColumnBuffer) Reset() { col.values = col.values[:0] } + +func (col *indexedColumnBuffer) Cap() int { return cap(col.values) } + +func (col *indexedColumnBuffer) Len() int { return len(col.values) } + +func (col *indexedColumnBuffer) Less(i, j int) bool { + u := col.typ.dict.Index(col.values[i]) + v := col.typ.dict.Index(col.values[j]) + return col.typ.Compare(u, v) < 0 +} + +func (col *indexedColumnBuffer) Swap(i, j int) { + col.values[i], col.values[j] = col.values[j], col.values[i] +} + +func (col *indexedColumnBuffer) WriteValues(values []Value) (int, error) { + i := len(col.values) + j := len(col.values) + len(values) + + if j <= cap(col.values) { + col.values = col.values[:j] + } else { + tmp := make([]int32, j, 2*j) + copy(tmp, col.values) + col.values = tmp + } + + col.typ.dict.Insert(col.values[i:], values) + return len(values), nil +} + +func (col *indexedColumnBuffer) writeValues(_ columnLevels, rows sparse.Array) { + i := len(col.values) + j := len(col.values) + rows.Len() + + if j <= cap(col.values) { + col.values = col.values[:j] + } else { + tmp := make([]int32, j, 2*j) + copy(tmp, col.values) + col.values = tmp + } + + col.typ.dict.insert(col.values[i:], rows) +} + +func (col *indexedColumnBuffer) writeBoolean(_ columnLevels, value bool) { + col.values = append(col.values, col.typ.dict.insertBoolean(value)) +} + +func (col *indexedColumnBuffer) writeInt32(_ columnLevels, value int32) { + col.values = append(col.values, col.typ.dict.insertInt32(value)) +} + +func (col *indexedColumnBuffer) writeInt64(_ columnLevels, value int64) { + col.values = append(col.values, col.typ.dict.insertInt64(value)) +} + +func (col *indexedColumnBuffer) writeInt96(_ columnLevels, value deprecated.Int96) { + col.values = append(col.values, col.typ.dict.insertInt96(value)) +} + +func (col *indexedColumnBuffer) writeFloat(_ columnLevels, value float32) { + col.values = append(col.values, col.typ.dict.insertFloat(value)) +} + +func (col *indexedColumnBuffer) writeDouble(_ columnLevels, value float64) { + col.values = append(col.values, col.typ.dict.insertDouble(value)) +} + +func (col *indexedColumnBuffer) writeByteArray(_ columnLevels, value []byte) { + col.values = append(col.values, col.typ.dict.insertByteArray(value)) +} + +func (col *indexedColumnBuffer) writeNull(_ columnLevels) { + panic("cannot write null to indexed column") +} + +func (col *indexedColumnBuffer) ReadValuesAt(values []Value, offset int64) (n int, err error) { + i := int(offset) + switch { + case i < 0: + return 0, errRowIndexOutOfBounds(offset, int64(len(col.values))) + case i >= len(col.values): + return 0, io.EOF + default: + for n < len(values) && i < len(col.values) { + values[n] = col.typ.dict.Index(col.values[i]) + values[n].columnIndex = col.columnIndex + n++ + i++ + } + if n < len(values) { + err = io.EOF + } + return n, err + } +} + +func (col *indexedColumnBuffer) ReadRowAt(row Row, index int64) (Row, error) { + switch { + case index < 0: + return row, errRowIndexOutOfBounds(index, int64(len(col.values))) + case index >= int64(len(col.values)): + return row, io.EOF + default: + v := col.typ.dict.Index(col.values[index]) + v.columnIndex = col.columnIndex + return append(row, v), nil + } +} + +type indexedColumnIndex struct{ col *indexedColumnBuffer } + +func (index indexedColumnIndex) NumPages() int { return 1 } +func (index indexedColumnIndex) NullCount(int) int64 { return 0 } +func (index indexedColumnIndex) NullPage(int) bool { return false } +func (index indexedColumnIndex) MinValue(int) Value { + min, _, _ := index.col.Bounds() + return min +} +func (index indexedColumnIndex) MaxValue(int) Value { + _, max, _ := index.col.Bounds() + return max +} +func (index indexedColumnIndex) IsAscending() bool { + min, max, _ := index.col.Bounds() + return index.col.typ.Compare(min, max) <= 0 +} +func (index indexedColumnIndex) IsDescending() bool { + min, max, _ := index.col.Bounds() + return index.col.typ.Compare(min, max) > 0 +} + +type indexedOffsetIndex struct{ col *indexedColumnBuffer } + +func (index indexedOffsetIndex) NumPages() int { return 1 } +func (index indexedOffsetIndex) Offset(int) int64 { return 0 } +func (index indexedOffsetIndex) CompressedPageSize(int) int64 { return index.col.Size() } +func (index indexedOffsetIndex) FirstRowIndex(int) int64 { return 0 } diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.go b/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.go new file mode 100644 index 00000000000..56002c7e5aa --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.go @@ -0,0 +1,168 @@ +//go:build !purego + +package parquet + +import ( + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/sparse" +) + +//go:noescape +func dictionaryBoundsInt32(dict []int32, indexes []int32) (min, max int32, err errno) + +//go:noescape +func dictionaryBoundsInt64(dict []int64, indexes []int32) (min, max int64, err errno) + +//go:noescape +func dictionaryBoundsFloat32(dict []float32, indexes []int32) (min, max float32, err errno) + +//go:noescape +func dictionaryBoundsFloat64(dict []float64, indexes []int32) (min, max float64, err errno) + +//go:noescape +func dictionaryBoundsUint32(dict []uint32, indexes []int32) (min, max uint32, err errno) + +//go:noescape +func dictionaryBoundsUint64(dict []uint64, indexes []int32) (min, max uint64, err errno) + +//go:noescape +func dictionaryBoundsBE128(dict [][16]byte, indexes []int32) (min, max *[16]byte, err errno) + +//go:noescape +func dictionaryLookup32(dict []uint32, indexes []int32, rows sparse.Array) errno + +//go:noescape +func dictionaryLookup64(dict []uint64, indexes []int32, rows sparse.Array) errno + +//go:noescape +func dictionaryLookupByteArrayString(dict []uint32, page []byte, indexes []int32, rows sparse.Array) errno + +//go:noescape +func dictionaryLookupFixedLenByteArrayString(dict []byte, len int, indexes []int32, rows sparse.Array) errno + +//go:noescape +func dictionaryLookupFixedLenByteArrayPointer(dict []byte, len int, indexes []int32, rows sparse.Array) errno + +func (d *int32Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dict := unsafecast.Slice[uint32](d.values.Slice()) + dictionaryLookup32(dict, indexes, rows).check() +} + +func (d *int64Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dict := unsafecast.Slice[uint64](d.values.Slice()) + dictionaryLookup64(dict, indexes, rows).check() +} + +func (d *floatDictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dict := unsafecast.Slice[uint32](d.values.Slice()) + dictionaryLookup32(dict, indexes, rows).check() +} + +func (d *doubleDictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dict := unsafecast.Slice[uint64](d.values.Slice()) + dictionaryLookup64(dict, indexes, rows).check() +} + +func (d *byteArrayDictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + // TODO: this optimization is disabled for now because it appears to race + // with the garbage collector and result in writing pointers to free objects + // to the output. + // + // This command was used to trigger the problem: + // + // GOMAXPROCS=8 go test -run TestIssueSegmentio368 -count 10 + // + // https://github.com/segmentio/parquet-go/issues/368 + // + //dictionaryLookupByteArrayString(d.offsets, d.values, indexes, rows).check() + for i, j := range indexes { + *(*string)(rows.Index(i)) = unsafecast.String(d.index(int(j))) + } +} + +func (d *fixedLenByteArrayDictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + //dictionaryLookupFixedLenByteArrayString(d.data, d.size, indexes, rows).check() + for i, j := range indexes { + *(*string)(rows.Index(i)) = unsafecast.String(d.index(j)) + } +} + +func (d *uint32Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dictionaryLookup32(d.values.Slice(), indexes, rows).check() +} + +func (d *uint64Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + dictionaryLookup64(d.values.Slice(), indexes, rows).check() +} + +func (d *be128Dictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + //dict := unsafecast.Slice[byte](d.values) + //dictionaryLookupFixedLenByteArrayString(dict, 16, indexes, rows).check() + s := "0123456789ABCDEF" + for i, j := range indexes { + *(**[16]byte)(unsafe.Pointer(&s)) = d.index(j) + *(*string)(rows.Index(i)) = s + } +} + +func (d *be128Dictionary) lookupPointer(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + //dict := unsafecast.Slice[byte](d.values) + //dictionaryLookupFixedLenByteArrayPointer(dict, 16, indexes, rows).check() + for i, j := range indexes { + *(**[16]byte)(rows.Index(i)) = d.index(j) + } +} + +func (d *int32Dictionary) bounds(indexes []int32) (min, max int32) { + min, max, err := dictionaryBoundsInt32(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *int64Dictionary) bounds(indexes []int32) (min, max int64) { + min, max, err := dictionaryBoundsInt64(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *floatDictionary) bounds(indexes []int32) (min, max float32) { + min, max, err := dictionaryBoundsFloat32(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *doubleDictionary) bounds(indexes []int32) (min, max float64) { + min, max, err := dictionaryBoundsFloat64(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *uint32Dictionary) bounds(indexes []int32) (min, max uint32) { + min, max, err := dictionaryBoundsUint32(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *uint64Dictionary) bounds(indexes []int32) (min, max uint64) { + min, max, err := dictionaryBoundsUint64(d.values.Slice(), indexes) + err.check() + return min, max +} + +func (d *be128Dictionary) bounds(indexes []int32) (min, max *[16]byte) { + min, max, err := dictionaryBoundsBE128(d.values, indexes) + err.check() + return min, max +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.s b/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.s new file mode 100644 index 00000000000..9372ffbfc02 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_amd64.s @@ -0,0 +1,941 @@ +//go:build !purego + +#include "textflag.h" + +#define errnoIndexOutOfBounds 1 + +// func dictionaryBoundsInt32(dict []int32, indexes []int32) (min, max int32, err errno) +TEXT ·dictionaryBoundsInt32(SB), NOSPLIT, $0-64 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + XORQ R10, R10 // min + XORQ R11, R11 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVL (AX)(DI*4), R10 + MOVL R10, R11 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTD R10, Y3 // [min...] + VMOVDQU32 Y3, Y4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDD (AX)(Y0*4), K1, Y1 + VPMINSD Y1, Y3, Y3 + VPMAXSD Y1, Y4, Y4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERM2I128 $1, Y3, Y3, Y0 + VPERM2I128 $1, Y4, Y4, Y1 + VPMINSD Y0, Y3, Y3 + VPMAXSD Y1, Y4, Y4 + + VPSHUFD $0b1110, Y3, Y0 + VPSHUFD $0b1110, Y4, Y1 + VPMINSD Y0, Y3, Y3 + VPMAXSD Y1, Y4, Y4 + + VPSHUFD $1, Y3, Y0 + VPSHUFD $1, Y4, Y1 + VPMINSD Y0, Y3, Y3 + VPMAXSD Y1, Y4, Y4 + + MOVQ X3, R10 + MOVQ X4, R11 + ANDQ $0xFFFFFFFF, R10 + ANDQ $0xFFFFFFFF, R11 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVL (AX)(DI*4), DI + CMPL DI, R10 + CMOVLLT DI, R10 + CMPL DI, R11 + CMOVLGT DI, R11 + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVL R10, min+48(FP) + MOVL R11, max+52(FP) + MOVQ R12, err+56(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsInt64(dict []int64, indexes []int32) (min, max int64, err errno) +TEXT ·dictionaryBoundsInt64(SB), NOSPLIT, $0-72 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + XORQ R10, R10 // min + XORQ R11, R11 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVQ (AX)(DI*8), R10 + MOVQ R10, R11 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTQ R10, Z3 // [min...] + VMOVDQU64 Z3, Z4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDQ (AX)(Y0*8), K1, Z1 + VPMINSQ Z1, Z3, Z3 + VPMAXSQ Z1, Z4, Z4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERMQ $0b1110, Z3, Z0 + VPERMQ $0b1110, Z4, Z1 + VPMINSQ Z0, Z3, Z3 + VPMAXSQ Z1, Z4, Z4 + + VPERMQ $1, Z3, Z0 + VPERMQ $1, Z4, Z1 + VPMINSQ Z0, Z3, Z3 + VPMAXSQ Z1, Z4, Z4 + + VSHUFF64X2 $2, Z3, Z3, Z0 + VSHUFF64X2 $2, Z4, Z4, Z1 + VPMINSQ Z0, Z3, Z3 + VPMAXSQ Z1, Z4, Z4 + + MOVQ X3, R10 + MOVQ X4, R11 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVQ (AX)(DI*8), DI + CMPQ DI, R10 + CMOVQLT DI, R10 + CMPQ DI, R11 + CMOVQGT DI, R11 + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVQ R10, min+48(FP) + MOVQ R11, max+56(FP) + MOVQ R12, err+64(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsFloat32(dict []float32, indexes []int32) (min, max float32, err errno) +TEXT ·dictionaryBoundsFloat32(SB), NOSPLIT, $0-64 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + PXOR X3, X3 // min + PXOR X4, X4 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVSS (AX)(DI*4), X3 + MOVAPS X3, X4 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTD X3, Y3 // [min...] + VMOVDQU32 Y3, Y4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDD (AX)(Y0*4), K1, Y1 + VMINPS Y1, Y3, Y3 + VMAXPS Y1, Y4, Y4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERM2I128 $1, Y3, Y3, Y0 + VPERM2I128 $1, Y4, Y4, Y1 + VMINPS Y0, Y3, Y3 + VMAXPS Y1, Y4, Y4 + + VPSHUFD $0b1110, Y3, Y0 + VPSHUFD $0b1110, Y4, Y1 + VMINPS Y0, Y3, Y3 + VMAXPS Y1, Y4, Y4 + + VPSHUFD $1, Y3, Y0 + VPSHUFD $1, Y4, Y1 + VMINPS Y0, Y3, Y3 + VMAXPS Y1, Y4, Y4 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVSS (AX)(DI*4), X1 + UCOMISS X3, X1 + JAE skipAssignMin + MOVAPS X1, X3 +skipAssignMin: + UCOMISS X4, X1 + JBE skipAssignMax + MOVAPS X1, X4 +skipAssignMax: + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVSS X3, min+48(FP) + MOVSS X4, max+52(FP) + MOVQ R12, err+56(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsFloat64(dict []float64, indexes []int32) (min, max float64, err errno) +TEXT ·dictionaryBoundsFloat64(SB), NOSPLIT, $0-72 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + PXOR X3, X3 // min + PXOR X4, X4 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVSD (AX)(DI*8), X3 + MOVAPS X3, X4 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTQ X3, Z3 // [min...] + VMOVDQU64 Z3, Z4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDQ (AX)(Y0*8), K1, Z1 + VMINPD Z1, Z3, Z3 + VMAXPD Z1, Z4, Z4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERMQ $0b1110, Z3, Z0 + VPERMQ $0b1110, Z4, Z1 + VMINPD Z0, Z3, Z3 + VMAXPD Z1, Z4, Z4 + + VPERMQ $1, Z3, Z0 + VPERMQ $1, Z4, Z1 + VMINPD Z0, Z3, Z3 + VMAXPD Z1, Z4, Z4 + + VSHUFF64X2 $2, Z3, Z3, Z0 + VSHUFF64X2 $2, Z4, Z4, Z1 + VMINPD Z0, Z3, Z3 + VMAXPD Z1, Z4, Z4 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVSD (AX)(DI*8), X1 + UCOMISD X3, X1 + JAE skipAssignMin + MOVAPD X1, X3 +skipAssignMin: + UCOMISD X4, X1 + JBE skipAssignMax + MOVAPD X1, X4 +skipAssignMax: + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVSD X3, min+48(FP) + MOVSD X4, max+56(FP) + MOVQ R12, err+64(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsUint32(dict []uint32, indexes []int32) (min, max uint32, err errno) +TEXT ·dictionaryBoundsUint32(SB), NOSPLIT, $0-64 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + XORQ R10, R10 // min + XORQ R11, R11 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVL (AX)(DI*4), R10 + MOVL R10, R11 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTD R10, Y3 // [min...] + VMOVDQU32 Y3, Y4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDD (AX)(Y0*4), K1, Y1 + VPMINUD Y1, Y3, Y3 + VPMAXUD Y1, Y4, Y4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERM2I128 $1, Y3, Y3, Y0 + VPERM2I128 $1, Y4, Y4, Y1 + VPMINUD Y0, Y3, Y3 + VPMAXUD Y1, Y4, Y4 + + VPSHUFD $0b1110, Y3, Y0 + VPSHUFD $0b1110, Y4, Y1 + VPMINUD Y0, Y3, Y3 + VPMAXUD Y1, Y4, Y4 + + VPSHUFD $1, Y3, Y0 + VPSHUFD $1, Y4, Y1 + VPMINUD Y0, Y3, Y3 + VPMAXUD Y1, Y4, Y4 + + MOVQ X3, R10 + MOVQ X4, R11 + ANDQ $0xFFFFFFFF, R10 + ANDQ $0xFFFFFFFF, R11 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVL (AX)(DI*4), DI + CMPL DI, R10 + CMOVLCS DI, R10 + CMPL DI, R11 + CMOVLHI DI, R11 + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVL R10, min+48(FP) + MOVL R11, max+52(FP) + MOVQ R12, err+56(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsUint64(dict []uint64, indexes []int32) (min, max uint64, err errno) +TEXT ·dictionaryBoundsUint64(SB), NOSPLIT, $0-72 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + XORQ R10, R10 // min + XORQ R11, R11 // max + XORQ R12, R12 // err + XORQ SI, SI + + CMPQ DX, $0 + JE return + + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVQ (AX)(DI*8), R10 + MOVQ R10, R11 + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ $0xFFFF, R8 + KMOVW R8, K1 + + VPBROADCASTD BX, Y2 // [len(dict)...] + VPBROADCASTQ R10, Z3 // [min...] + VMOVDQU64 Z3, Z4 // [max...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y2, Y0, K2 + KMOVW K2, R9 + CMPB R9, $0xFF + JNE indexOutOfBounds + VPGATHERDQ (AX)(Y0*8), K1, Z1 + VPMINUQ Z1, Z3, Z3 + VPMAXUQ Z1, Z4, Z4 + KMOVW R8, K1 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + + VPERMQ $0b1110, Z3, Z0 + VPERMQ $0b1110, Z4, Z1 + VPMINUQ Z0, Z3, Z3 + VPMAXUQ Z1, Z4, Z4 + + VPERMQ $1, Z3, Z0 + VPERMQ $1, Z4, Z1 + VPMINUQ Z0, Z3, Z3 + VPMAXUQ Z1, Z4, Z4 + + VSHUFF64X2 $2, Z3, Z3, Z0 + VSHUFF64X2 $2, Z4, Z4, Z1 + VPMINUQ Z0, Z3, Z3 + VPMAXUQ Z1, Z4, Z4 + + MOVQ X3, R10 + MOVQ X4, R11 + + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVQ (AX)(DI*8), DI + CMPQ DI, R10 + CMOVQCS DI, R10 + CMPQ DI, R11 + CMOVQHI DI, R11 + INCQ SI +test: + CMPQ SI, DX + JNE loop +return: + MOVQ R10, min+48(FP) + MOVQ R11, max+56(FP) + MOVQ R12, err+64(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, R12 + JMP return + +// func dictionaryBoundsBE128(dict [][16]byte, indexes []int32) (min, max *[16]byte, err errno) +TEXT ·dictionaryBoundsBE128(SB), NOSPLIT, $0-72 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + SHLQ $2, DX // x 4 + ADDQ CX, DX // end + + XORQ R8, R8 // min (pointer) + XORQ R9, R9 // max (pointer) + XORQ SI, SI // err + XORQ DI, DI + + CMPQ DX, $0 + JE return + + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + SHLQ $4, DI // the dictionary contains 16 byte words + LEAQ (AX)(DI*1), R8 + MOVQ R8, R9 + MOVQ 0(AX)(DI*1), R10 // min (high) + MOVQ 8(AX)(DI*1), R11 // min (low) + BSWAPQ R10 + BSWAPQ R11 + MOVQ R10, R12 // max (high) + MOVQ R11, R13 // max (low) + + JMP next +loop: + MOVL (CX), DI + CMPL DI, BX + JAE indexOutOfBounds + SHLQ $4, DI + MOVQ 0(AX)(DI*1), R14 + MOVQ 8(AX)(DI*1), R15 + BSWAPQ R14 + BSWAPQ R15 +testLessThan: + CMPQ R14, R10 + JA testGreaterThan + JB lessThan + CMPQ R15, R11 + JAE testGreaterThan +lessThan: + LEAQ (AX)(DI*1), R8 + MOVQ R14, R10 + MOVQ R15, R11 + JMP next +testGreaterThan: + CMPQ R14, R12 + JB next + JA greaterThan + CMPQ R15, R13 + JBE next +greaterThan: + LEAQ (AX)(DI*1), R9 + MOVQ R14, R12 + MOVQ R15, R13 +next: + ADDQ $4, CX + CMPQ CX, DX + JNE loop +return: + MOVQ R8, min+48(FP) + MOVQ R9, max+56(FP) + MOVQ SI, err+64(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, SI + JMP return + +// The lookup functions provide optimized versions of the dictionary index +// lookup logic. +// +// When AVX512 is available, the AVX512 versions of the functions are used +// which use the VPGATHER* instructions to perform 8 parallel lookups of the +// values in the dictionary, then VPSCATTER* to do 8 parallel writes to the +// sparse output buffer. + +// func dictionaryLookup32(dict []uint32, indexes []int32, rows sparse.Array) errno +TEXT ·dictionaryLookup32(SB), NOSPLIT, $0-80 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + MOVQ rows_array_ptr+48(FP), R8 + MOVQ rows_array_off+64(FP), R9 + + XORQ SI, SI + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ R9, R10 + SHLQ $3, R10 // 8 * size + + MOVW $0xFFFF, R11 + KMOVW R11, K1 + KMOVW R11, K2 + + VPBROADCASTD R9, Y2 // [size...] + VPMULLD ·range0n8(SB), Y2, Y2 // [0*size,1*size,...] + VPBROADCASTD BX, Y3 // [len(dict)...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y3, Y0, K3 + KMOVW K3, R11 + CMPB R11, $0xFF + JNE indexOutOfBounds + VPGATHERDD (AX)(Y0*4), K1, Y1 + VPSCATTERDD Y1, K2, (R8)(Y2*1) + KMOVW R11, K1 + KMOVW R11, K2 + ADDQ R10, R8 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVL (AX)(DI*4), DI + MOVL DI, (R8) + ADDQ R9, R8 + INCQ SI +test: + CMPQ SI, DX + JNE loop + XORQ AX, AX +return: + MOVQ AX, ret+72(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, AX + JMP return + +// func dictionaryLookup64(dict []uint64, indexes []int32, rows sparse.Array) errno +TEXT ·dictionaryLookup64(SB), NOSPLIT, $0-80 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ indexes_base+24(FP), CX + MOVQ indexes_len+32(FP), DX + + MOVQ rows_array_ptr+48(FP), R8 + MOVQ rows_array_off+64(FP), R9 + + XORQ SI, SI + + CMPQ DX, $8 + JB test + + CMPB ·hasAVX512VL(SB), $0 + JE test + + MOVQ DX, DI + SHRQ $3, DI + SHLQ $3, DI + + MOVQ R9, R10 + SHLQ $3, R10 // 8 * size + + MOVW $0xFFFF, R11 + KMOVW R11, K1 + KMOVW R11, K2 + + VPBROADCASTD R9, Y2 // [size...] + VPMULLD ·range0n8(SB), Y2, Y2 // [0*size,1*size,...] + VPBROADCASTD BX, Y3 // [len(dict)...] +loopAVX512: + VMOVDQU32 (CX)(SI*4), Y0 + VPCMPUD $1, Y3, Y0, K3 + KMOVW K3, R11 + CMPB R11, $0xFF + JNE indexOutOfBounds + VPGATHERDQ (AX)(Y0*8), K1, Z1 + VPSCATTERDQ Z1, K2, (R8)(Y2*1) + KMOVW R11, K1 + KMOVW R11, K2 + ADDQ R10, R8 + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX512 + VZEROUPPER + JMP test +loop: + MOVL (CX)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + MOVQ (AX)(DI*8), DI + MOVQ DI, (R8) + ADDQ R9, R8 + INCQ SI +test: + CMPQ SI, DX + JNE loop + XORQ AX, AX +return: + MOVQ AX, ret+72(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, AX + JMP return + +// func dictionaryLookupByteArrayString(dict []uint32, page []byte, indexes []int32, rows sparse.Array) errno +TEXT ·dictionaryLookupByteArrayString(SB), NOSPLIT, $0-104 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + DECQ BX // the offsets have the total length as last element + + MOVQ page_base+24(FP), CX + + MOVQ indexes_base+48(FP), R8 + MOVQ indexes_len+56(FP), R9 + + MOVQ rows_array_ptr+72(FP), R10 + MOVQ rows_array_off+88(FP), R11 + + XORQ DI, DI + XORQ SI, SI +loop: + // Load the index that we want to read the value from. This may come from + // user input so we must validate that the indexes are within the bounds of + // the dictionary. + MOVL (R8)(SI*4), DI + CMPL DI, BX + JAE indexOutOfBounds + + // Load the offsets within the dictionary page where the value is stored. + // We trust the offsets to be correct since they are generated internally by + // the dictionary code, there is no need to check that they are within the + // bounds of the dictionary page. + MOVL 0(AX)(DI*4), DX + MOVL 4(AX)(DI*4), DI + + // Compute the length of the value (the difference between two consecutive + // offsets), and the pointer to the first byte of the string value. + SUBL DX, DI + LEAQ (CX)(DX*1), DX + + // Store the length and pointer to the value into the output location. + // The memory layout is expected to hold a pointer and length, which are + // both 64 bits words. This is the layout used by parquet.Value and the Go + // string value type. + MOVQ DX, (R10) + MOVQ DI, 8(R10) + + ADDQ R11, R10 + INCQ SI +test: + CMPQ SI, R9 + JNE loop + XORQ AX, AX +return: + MOVQ AX, ret+96(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, AX + JMP return + +// func dictionaryLookupFixedLenByteArrayString(dict []byte, len int, indexes []int32, rows sparse.Array) errno +TEXT ·dictionaryLookupFixedLenByteArrayString(SB), NOSPLIT, $0-88 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ len+24(FP), CX + + MOVQ indexes_base+32(FP), DX + MOVQ indexes_len+40(FP), R8 + + MOVQ rows_array_ptr+56(FP), R9 + MOVQ rows_array_off+72(FP), R10 + + XORQ DI, DI + XORQ SI, SI +loop: + MOVL (DX)(SI*4), DI + IMULQ CX, DI + CMPL DI, BX + JAE indexOutOfBounds + + ADDQ AX, DI + MOVQ DI, (R9) + MOVQ CX, 8(R9) + + ADDQ R10, R9 + INCQ SI +test: + CMPQ SI, R8 + JNE loop + XORQ AX, AX +return: + MOVQ AX, ret+80(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, AX + JMP return + +// This is the same algorithm as dictionaryLookupFixedLenByteArrayString but we +// only store the pointer to the location holding the value instead of storing +// the pair of pointer and length. Since the length is fixed for this dictionary +// type, the application can assume it at the call site. +// +// func dictionaryLookupFixedLenByteArrayPointer(dict []byte, len int, indexes []int32, rows sparse.Array) errno +TEXT ·dictionaryLookupFixedLenByteArrayPointer(SB), NOSPLIT, $0-88 + MOVQ dict_base+0(FP), AX + MOVQ dict_len+8(FP), BX + + MOVQ len+24(FP), CX + + MOVQ indexes_base+32(FP), DX + MOVQ indexes_len+40(FP), R8 + + MOVQ rows_array_ptr+56(FP), R9 + MOVQ rows_array_off+72(FP), R10 + + XORQ DI, DI + XORQ SI, SI +loop: + MOVL (DX)(SI*4), DI + IMULQ CX, DI + CMPL DI, BX + JAE indexOutOfBounds + + ADDQ AX, DI + MOVQ DI, (R9) + + ADDQ R10, R9 + INCQ SI +test: + CMPQ SI, R8 + JNE loop + XORQ AX, AX +return: + MOVQ AX, ret+80(FP) + RET +indexOutOfBounds: + MOVQ $errnoIndexOutOfBounds, AX + JMP return + +GLOBL ·range0n8(SB), RODATA|NOPTR, $40 +DATA ·range0n8+0(SB)/4, $0 +DATA ·range0n8+4(SB)/4, $1 +DATA ·range0n8+8(SB)/4, $2 +DATA ·range0n8+12(SB)/4, $3 +DATA ·range0n8+16(SB)/4, $4 +DATA ·range0n8+20(SB)/4, $5 +DATA ·range0n8+24(SB)/4, $6 +DATA ·range0n8+28(SB)/4, $7 +DATA ·range0n8+32(SB)/4, $8 diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_be128.go b/vendor/github.com/parquet-go/parquet-go/dictionary_be128.go new file mode 100644 index 00000000000..108e1382a6d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_be128.go @@ -0,0 +1,184 @@ +package parquet + +import ( + "encoding/binary" + "fmt" + "math" + "unsafe" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/sparse" +) + +type be128Dictionary struct { + be128Page + table *hashprobe.Uint128Table +} + +func newBE128Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *be128Dictionary { + return &be128Dictionary{ + be128Page: be128Page{ + typ: typ, + values: data.Uint128()[:numValues], + columnIndex: ^columnIndex, + }, + } +} + +func (d *be128Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *be128Dictionary) Len() int { return len(d.values) } + +func (d *be128Dictionary) Size() int64 { return int64(len(d.values) * 16) } + +func (d *be128Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *be128Dictionary) index(i int32) *[16]byte { return &d.values[i] } + +func (d *be128Dictionary) Insert(indexes []int32, values []Value) { + _ = indexes[:len(values)] + + for _, v := range values { + if v.kind != ^int8(FixedLenByteArray) { + panic("values inserted in BE128 dictionary must be of type BYTE_ARRAY") + } + if v.u64 != 16 { + panic("values inserted in BE128 dictionary must be of length 16") + } + } + + if d.table == nil { + d.init(indexes) + } + + const chunkSize = insertsTargetCacheFootprint / 16 + var buffer [chunkSize][16]byte + + for i := 0; i < len(values); i += chunkSize { + j := min(chunkSize+i, len(values)) + n := min(chunkSize, len(values)-i) + + probe := buffer[:n:n] + writePointersBE128(probe, makeArrayValue(values[i:j], unsafe.Offsetof(values[i].ptr))) + + if d.table.Probe(probe, indexes[i:j:j]) > 0 { + for k, v := range probe { + if indexes[i+k] == int32(len(d.values)) { + d.values = append(d.values, v) + } + } + } + } +} + +func (d *be128Dictionary) init(indexes []int32) { + d.table = hashprobe.NewUint128Table(len(d.values), 0.75) + + n := min(len(d.values), len(indexes)) + + for i := 0; i < len(d.values); i += n { + j := min(i+n, len(d.values)) + d.table.Probe(d.values[i:j:j], indexes[:n:n]) + } +} + +func (d *be128Dictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 16 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Uint128Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(len(d.values)) { + d.values = append(d.values, values.Index(i+k)) + } + } + } + } +} + +func (d *be128Dictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValueString("") + memsetValues(values, model) + d.lookupString(indexes, makeArrayValue(values, offsetOfPtr)) +} + +func (d *be128Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *be128Dictionary) Reset() { + d.values = d.values[:0] + if d.table != nil { + d.table.Reset() + } +} + +func (d *be128Dictionary) Page() Page { + return &d.be128Page +} + +func (d *be128Dictionary) insertBoolean(value bool) int32 { + var buf [16]byte + if value { + buf[15] = 1 + } + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertInt32(value int32) int32 { + var buf [16]byte + binary.BigEndian.PutUint32(buf[12:16], uint32(value)) + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertInt64(value int64) int32 { + var buf [16]byte + binary.BigEndian.PutUint64(buf[8:16], uint64(value)) + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertInt96(value deprecated.Int96) int32 { + var buf [16]byte + binary.BigEndian.PutUint32(buf[4:8], value[2]) + binary.BigEndian.PutUint32(buf[8:12], value[1]) + binary.BigEndian.PutUint32(buf[12:16], value[0]) + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertFloat(value float32) int32 { + var buf [16]byte + binary.BigEndian.PutUint32(buf[12:16], math.Float32bits(value)) + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertDouble(value float64) int32 { + var buf [16]byte + binary.BigEndian.PutUint64(buf[8:16], math.Float64bits(value)) + return d.insertByteArray(buf[:]) +} + +func (d *be128Dictionary) insertByteArray(value []byte) int32 { + if len(value) != 16 { + panic(fmt.Sprintf("byte array length %d does not match required length 16 for be128", len(value))) + } + + b := ([16]byte)(value) + i := [1]int32{} + d.insert(i[:], makeArrayFromPointer(&b)) + return i[0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_boolean.go b/vendor/github.com/parquet-go/parquet-go/dictionary_boolean.go new file mode 100644 index 00000000000..c41fab38dd6 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_boolean.go @@ -0,0 +1,181 @@ +package parquet + +import ( + "math/bits" + + "github.com/parquet-go/bitpack" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +// The boolean dictionary always contains two values for true and false. +type booleanDictionary struct { + booleanPage + // There are only two possible values for booleans, false and true. + // Rather than using a Go map, we track the indexes of each values + // in an array of two 32 bits integers. When inserting values in the + // dictionary, we ensure that an index exist for each boolean value, + // then use the value 0 or 1 (false or true) to perform a lookup in + // the dictionary's map. + table [2]int32 +} + +func newBooleanDictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *booleanDictionary { + indexOfFalse, indexOfTrue, values := int32(-1), int32(-1), data.Boolean() + + for i := int32(0); i < numValues && indexOfFalse < 0 && indexOfTrue < 0; i += 8 { + v := values[i] + if v != 0x00 { + indexOfTrue = i + int32(bits.TrailingZeros8(v)) + } + if v != 0xFF { + indexOfFalse = i + int32(bits.TrailingZeros8(^v)) + } + } + + return &booleanDictionary{ + booleanPage: booleanPage{ + typ: typ, + bits: memory.SliceBufferFrom(values[:bitpack.ByteCount(uint(numValues))]), + numValues: numValues, + columnIndex: ^columnIndex, + }, + table: [2]int32{ + 0: indexOfFalse, + 1: indexOfTrue, + }, + } +} + +func (d *booleanDictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *booleanDictionary) Len() int { return int(d.numValues) } + +func (d *booleanDictionary) Size() int64 { return int64(d.bits.Len()) } + +func (d *booleanDictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *booleanDictionary) index(i int32) bool { return d.valueAt(int(i)) } + +func (d *booleanDictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfBool)) +} + +func (d *booleanDictionary) insert(indexes []int32, rows sparse.Array) { + _ = indexes[:rows.Len()] + + if d.table[0] < 0 { + d.table[0] = d.numValues + d.numValues++ + bits := plain.AppendBoolean(d.bits.Slice(), int(d.table[0]), false) + d.bits = memory.SliceBufferFrom(bits) + } + + if d.table[1] < 0 { + d.table[1] = d.numValues + d.numValues++ + bits := plain.AppendBoolean(d.bits.Slice(), int(d.table[1]), true) + d.bits = memory.SliceBufferFrom(bits) + } + + values := rows.Uint8Array() + dict := d.table + + for i := range rows.Len() { + v := values.Index(i) & 1 + indexes[i] = dict[v] + } +} + +func (d *booleanDictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(false) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *booleanDictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*bool)(rows.Index(i)) = d.index(j) + } +} + +func (d *booleanDictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + hasFalse, hasTrue := false, false + + for _, i := range indexes { + v := d.index(i) + if v { + hasTrue = true + } else { + hasFalse = true + } + if hasTrue && hasFalse { + break + } + } + + min = d.makeValue(!hasFalse) + max = d.makeValue(hasTrue) + } + return min, max +} + +func (d *booleanDictionary) Reset() { + d.bits.Reset() + d.offset = 0 + d.numValues = 0 + d.table = [2]int32{-1, -1} +} + +func (d *booleanDictionary) Page() Page { + return &d.booleanPage +} + +func (d *booleanDictionary) insertBoolean(value bool) int32 { + // Ensure both indexes are initialized + if d.table[0] < 0 { + d.table[0] = d.numValues + d.numValues++ + bits := plain.AppendBoolean(d.bits.Slice(), int(d.table[0]), false) + d.bits = memory.SliceBufferFrom(bits) + } + if d.table[1] < 0 { + d.table[1] = d.numValues + d.numValues++ + bits := plain.AppendBoolean(d.bits.Slice(), int(d.table[1]), true) + d.bits = memory.SliceBufferFrom(bits) + } + if value { + return d.table[1] + } + return d.table[0] +} + +func (d *booleanDictionary) insertInt32(value int32) int32 { + return d.insertBoolean(value != 0) +} + +func (d *booleanDictionary) insertInt64(value int64) int32 { + return d.insertBoolean(value != 0) +} + +func (d *booleanDictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertBoolean(!value.IsZero()) +} + +func (d *booleanDictionary) insertFloat(value float32) int32 { + return d.insertBoolean(value != 0) +} + +func (d *booleanDictionary) insertDouble(value float64) int32 { + return d.insertBoolean(value != 0) +} + +func (d *booleanDictionary) insertByteArray(value []byte) int32 { + return d.insertBoolean(len(value) != 0) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_byte_array.go b/vendor/github.com/parquet-go/parquet-go/dictionary_byte_array.go new file mode 100644 index 00000000000..e1eac7ebf5c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_byte_array.go @@ -0,0 +1,165 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type byteArrayDictionary struct { + byteArrayPage + table map[string]int32 + alloc allocator +} + +func newByteArrayDictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *byteArrayDictionary { + values, offsets := data.ByteArray() + // The first offset must always be zero, and the last offset is the length + // of the values in bytes. + // + // As an optimization we make the assumption that the backing array of the + // offsets slice belongs to the dictionary. + switch { + case cap(offsets) == 0: + offsets = make([]uint32, 1, 8) + case len(offsets) == 0: + offsets = append(offsets[:0], 0) + } + return &byteArrayDictionary{ + byteArrayPage: byteArrayPage{ + typ: typ, + values: memory.SliceBufferFrom(values), + offsets: memory.SliceBufferFrom(offsets), + columnIndex: ^columnIndex, + }, + } +} + +func (d *byteArrayDictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *byteArrayDictionary) Len() int { return d.len() } + +func (d *byteArrayDictionary) Size() int64 { return int64(d.values.Len()) } + +func (d *byteArrayDictionary) Index(i int32) Value { return d.makeValueBytes(d.index(int(i))) } + +func (d *byteArrayDictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfPtr)) +} + +func (d *byteArrayDictionary) init() { + numValues := d.len() + d.table = make(map[string]int32, numValues) + + for i := range numValues { + d.table[string(d.index(i))] = int32(len(d.table)) + } +} + +func (d *byteArrayDictionary) insert(indexes []int32, rows sparse.Array) { + if d.table == nil { + d.init() + } + + values := rows.StringArray() + + for i := range indexes { + value := values.Index(i) + + index, exists := d.table[value] + if !exists { + value = d.alloc.copyString(value) + index = int32(len(d.table)) + d.table[value] = index + d.values.Append([]byte(value)...) + d.offsets.AppendValue(uint32(d.values.Len())) + } + + indexes[i] = index + } +} + +func (d *byteArrayDictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValueString("") + memsetValues(values, model) + d.lookupString(indexes, makeArrayValue(values, offsetOfPtr)) +} + +func (d *byteArrayDictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + base := d.index(int(indexes[0])) + minValue := unsafecast.String(base) + maxValue := minValue + values := [64]string{} + + for i := 1; i < len(indexes); i += len(values) { + n := len(indexes) - i + if n > len(values) { + n = len(values) + } + j := i + n + d.lookupString(indexes[i:j:j], makeArrayFromSlice(values[:n:n])) + + for _, value := range values[:n:n] { + switch { + case value < minValue: + minValue = value + case value > maxValue: + maxValue = value + } + } + } + + min = d.makeValueString(minValue) + max = d.makeValueString(maxValue) + } + return min, max +} + +func (d *byteArrayDictionary) Reset() { + d.offsets.Resize(1) + d.values.Resize(0) + for k := range d.table { + delete(d.table, k) + } + d.alloc.reset() +} + +func (d *byteArrayDictionary) Page() Page { + return &d.byteArrayPage +} + +func (d *byteArrayDictionary) insertBoolean(value bool) int32 { + return d.insertByteArray(strconv.AppendBool(make([]byte, 0, 8), value)) +} + +func (d *byteArrayDictionary) insertInt32(value int32) int32 { + return d.insertByteArray(strconv.AppendInt(make([]byte, 0, 16), int64(value), 10)) +} + +func (d *byteArrayDictionary) insertInt64(value int64) int32 { + return d.insertByteArray(strconv.AppendInt(make([]byte, 0, 24), value, 10)) +} + +func (d *byteArrayDictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertByteArray([]byte(value.String())) +} + +func (d *byteArrayDictionary) insertFloat(value float32) int32 { + return d.insertByteArray(strconv.AppendFloat(make([]byte, 0, 24), float64(value), 'g', -1, 32)) +} + +func (d *byteArrayDictionary) insertDouble(value float64) int32 { + return d.insertByteArray(strconv.AppendFloat(make([]byte, 0, 24), value, 'g', -1, 64)) +} + +func (d *byteArrayDictionary) insertByteArray(value []byte) int32 { + s := unsafecast.String(value) + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&s)) + return indexes[0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_double.go b/vendor/github.com/parquet-go/parquet-go/dictionary_double.go new file mode 100644 index 00000000000..af1d54e0ed1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_double.go @@ -0,0 +1,137 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type doubleDictionary struct { + doublePage + table *hashprobe.Float64Table +} + +func newDoubleDictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *doubleDictionary { + return &doubleDictionary{ + doublePage: doublePage{ + typ: typ, + values: memory.SliceBufferFrom(data.Double()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *doubleDictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *doubleDictionary) Len() int { return d.values.Len() } + +func (d *doubleDictionary) Size() int64 { return int64(d.values.Len() * 8) } + +func (d *doubleDictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *doubleDictionary) index(i int32) float64 { return d.values.Slice()[i] } + +func (d *doubleDictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *doubleDictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewFloat64Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *doubleDictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 8 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Float64Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *doubleDictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *doubleDictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *doubleDictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *doubleDictionary) Page() Page { + return &d.doublePage +} + +func (d *doubleDictionary) insertBoolean(value bool) int32 { + if value { + return d.insertDouble(1) + } + return d.insertDouble(0) +} + +func (d *doubleDictionary) insertInt32(value int32) int32 { + return d.insertDouble(float64(value)) +} + +func (d *doubleDictionary) insertInt64(value int64) int32 { + return d.insertDouble(float64(value)) +} + +func (d *doubleDictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertDouble(float64(value.Int64())) +} + +func (d *doubleDictionary) insertFloat(value float32) int32 { + return d.insertDouble(float64(value)) +} + +func (d *doubleDictionary) insertDouble(value float64) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} + +func (d *doubleDictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseUint(string(value), 10, 32) + if err != nil { + panic(err) + } + return d.insertDouble(float64(v)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_fixed_len_byte_array.go b/vendor/github.com/parquet-go/parquet-go/dictionary_fixed_len_byte_array.go new file mode 100644 index 00000000000..cb717a2c57a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_fixed_len_byte_array.go @@ -0,0 +1,198 @@ +package parquet + +import ( + "encoding/binary" + "fmt" + "math" + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type fixedLenByteArrayDictionary struct { + fixedLenByteArrayPage + hashmap map[string]int32 +} + +func newFixedLenByteArrayDictionary(typ Type, columnIndex int16, numValues int32, values encoding.Values) *fixedLenByteArrayDictionary { + data, size := values.FixedLenByteArray() + return &fixedLenByteArrayDictionary{ + fixedLenByteArrayPage: fixedLenByteArrayPage{ + typ: typ, + size: size, + data: memory.SliceBufferFrom(data[:int(numValues)*size]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *fixedLenByteArrayDictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *fixedLenByteArrayDictionary) Len() int { return d.data.Len() / d.size } + +func (d *fixedLenByteArrayDictionary) Size() int64 { return int64(d.data.Len()) } + +func (d *fixedLenByteArrayDictionary) Index(i int32) Value { + return d.makeValueBytes(d.index(i)) +} + +func (d *fixedLenByteArrayDictionary) index(i int32) []byte { + data := d.data.Slice() + j := (int(i) + 0) * d.size + k := (int(i) + 1) * d.size + return data[j:k:k] +} + +func (d *fixedLenByteArrayDictionary) Insert(indexes []int32, values []Value) { + d.insertValues(indexes, len(values), func(i int) *byte { + return values[i].ptr + }) +} + +func (d *fixedLenByteArrayDictionary) insert(indexes []int32, rows sparse.Array) { + d.insertValues(indexes, rows.Len(), func(i int) *byte { + return (*byte)(rows.Index(i)) + }) +} + +func (d *fixedLenByteArrayDictionary) insertValues(indexes []int32, count int, valueAt func(int) *byte) { + _ = indexes[:count] + + if d.hashmap == nil { + data := d.data.Slice() + d.hashmap = make(map[string]int32, d.data.Cap()/d.size) + for i, j := 0, int32(0); i < len(data); i += d.size { + d.hashmap[string(data[i:i+d.size])] = j + j++ + } + } + + for i := range count { + value := unsafe.Slice(valueAt(i), d.size) + + index, exists := d.hashmap[string(value)] + if !exists { + index = int32(d.Len()) + start := d.data.Len() + d.data.Append(value...) + data := d.data.Slice() + d.hashmap[string(data[start:])] = index + } + + indexes[i] = index + } +} + +func (d *fixedLenByteArrayDictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValueString("") + memsetValues(values, model) + d.lookupString(indexes, makeArrayValue(values, offsetOfPtr)) +} + +func (d *fixedLenByteArrayDictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + base := d.index(indexes[0]) + minValue := unsafecast.String(base) + maxValue := minValue + values := [64]string{} + + for i := 1; i < len(indexes); i += len(values) { + n := len(indexes) - i + if n > len(values) { + n = len(values) + } + j := i + n + d.lookupString(indexes[i:j:j], makeArrayFromSlice(values[:n:n])) + + for _, value := range values[:n:n] { + switch { + case value < minValue: + minValue = value + case value > maxValue: + maxValue = value + } + } + } + + min = d.makeValueString(minValue) + max = d.makeValueString(maxValue) + } + return min, max +} + +func (d *fixedLenByteArrayDictionary) Reset() { + d.data.Resize(0) + d.hashmap = nil +} + +func (d *fixedLenByteArrayDictionary) Page() Page { + return &d.fixedLenByteArrayPage +} + +func (d *fixedLenByteArrayDictionary) insertBoolean(value bool) int32 { + buf := make([]byte, d.size) + if value { + buf[d.size-1] = 1 + } + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertInt32(value int32) int32 { + if d.size < 4 { + panic(fmt.Sprintf("cannot write 4-byte int32 to fixed length byte array of size %d", d.size)) + } + buf := make([]byte, d.size) + binary.BigEndian.PutUint32(buf[d.size-4:], uint32(value)) + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertInt64(value int64) int32 { + if d.size < 8 { + panic(fmt.Sprintf("cannot write 8-byte int64 to fixed length byte array of size %d", d.size)) + } + buf := make([]byte, d.size) + binary.BigEndian.PutUint64(buf[d.size-8:], uint64(value)) + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertInt96(value deprecated.Int96) int32 { + if d.size < 12 { + panic(fmt.Sprintf("cannot write 12-byte int96 to fixed length byte array of size %d", d.size)) + } + buf := make([]byte, d.size) + binary.BigEndian.PutUint32(buf[d.size-12:d.size-8], value[2]) + binary.BigEndian.PutUint32(buf[d.size-8:d.size-4], value[1]) + binary.BigEndian.PutUint32(buf[d.size-4:], value[0]) + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertFloat(value float32) int32 { + if d.size < 4 { + panic(fmt.Sprintf("cannot write 4-byte float to fixed length byte array of size %d", d.size)) + } + buf := make([]byte, d.size) + binary.BigEndian.PutUint32(buf[d.size-4:], math.Float32bits(value)) + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertDouble(value float64) int32 { + if d.size < 8 { + panic(fmt.Sprintf("cannot write 8-byte double to fixed length byte array of size %d", d.size)) + } + buf := make([]byte, d.size) + binary.BigEndian.PutUint64(buf[d.size-8:], math.Float64bits(value)) + return d.insertByteArray(buf) +} + +func (d *fixedLenByteArrayDictionary) insertByteArray(value []byte) int32 { + if len(value) != d.size { + panic(fmt.Sprintf("byte array length %d does not match fixed length %d", len(value), d.size)) + } + indexes := [1]int32{0} + d.insertValues(indexes[:], 1, func(i int) *byte { return &value[0] }) + return indexes[0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_float.go b/vendor/github.com/parquet-go/parquet-go/dictionary_float.go new file mode 100644 index 00000000000..9c4a697ad3b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_float.go @@ -0,0 +1,137 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type floatDictionary struct { + floatPage + table *hashprobe.Float32Table +} + +func newFloatDictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *floatDictionary { + return &floatDictionary{ + floatPage: floatPage{ + typ: typ, + values: memory.SliceBufferFrom(data.Float()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *floatDictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *floatDictionary) Len() int { return d.values.Len() } + +func (d *floatDictionary) Size() int64 { return int64(d.values.Len() * 4) } + +func (d *floatDictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *floatDictionary) index(i int32) float32 { return d.values.Slice()[i] } + +func (d *floatDictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *floatDictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewFloat32Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *floatDictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 4 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Float32Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *floatDictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *floatDictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *floatDictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *floatDictionary) Page() Page { + return &d.floatPage +} + +func (d *floatDictionary) insertBoolean(value bool) int32 { + if value { + return d.insertFloat(1) + } + return d.insertFloat(0) +} + +func (d *floatDictionary) insertInt32(value int32) int32 { + return d.insertFloat(float32(value)) +} + +func (d *floatDictionary) insertInt64(value int64) int32 { + return d.insertFloat(float32(value)) +} + +func (d *floatDictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertFloat(float32(value.Int32())) +} + +func (d *floatDictionary) insertFloat(value float32) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} + +func (d *floatDictionary) insertDouble(value float64) int32 { + return d.insertFloat(float32(value)) +} + +func (d *floatDictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseUint(string(value), 10, 32) + if err != nil { + panic(err) + } + return d.insertFloat(float32(v)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_int32.go b/vendor/github.com/parquet-go/parquet-go/dictionary_int32.go new file mode 100644 index 00000000000..90ea3968f49 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_int32.go @@ -0,0 +1,150 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type int32Dictionary struct { + int32Page + table *hashprobe.Int32Table +} + +func newInt32Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *int32Dictionary { + return &int32Dictionary{ + int32Page: int32Page{ + typ: typ, + values: memory.SliceBufferFrom(data.Int32()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *int32Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *int32Dictionary) Len() int { return d.values.Len() } + +func (d *int32Dictionary) Size() int64 { return int64(d.values.Len() * 4) } + +func (d *int32Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *int32Dictionary) index(i int32) int32 { return d.values.Slice()[i] } + +func (d *int32Dictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *int32Dictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewInt32Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *int32Dictionary) insert(indexes []int32, rows sparse.Array) { + // Iterating over the input in chunks helps keep relevant data in CPU + // caches when a large number of values are inserted into the dictionary with + // a single method call. + // + // Without this chunking, memory areas from the head of the indexes and + // values arrays end up being evicted from CPU caches as the probing + // operation iterates through the array. The subsequent scan of the indexes + // required to determine which values must be inserted into the page then + // stalls on retrieving data from main memory. + // + // We measured as much as ~37% drop in throughput when disabling the + // chunking, and did not observe any penalties from having it on smaller + // inserts. + const chunkSize = insertsTargetCacheFootprint / 4 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Int32Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *int32Dictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *int32Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *int32Dictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *int32Dictionary) Page() Page { + return &d.int32Page +} + +func (d *int32Dictionary) insertBoolean(value bool) int32 { + if value { + return d.insertInt32(1) + } + return d.insertInt32(0) +} + +func (d *int32Dictionary) insertInt32(value int32) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} + +func (d *int32Dictionary) insertInt64(value int64) int32 { + return d.insertInt32(int32(value)) +} + +func (d *int32Dictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertInt32(int32(value[0])) +} + +func (d *int32Dictionary) insertFloat(value float32) int32 { + return d.insertInt32(int32(value)) +} + +func (d *int32Dictionary) insertDouble(value float64) int32 { + return d.insertInt32(int32(value)) +} + +func (d *int32Dictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseInt(string(value), 10, 32) + if err != nil { + panic(err) + } + return d.insertInt32(int32(v)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_int64.go b/vendor/github.com/parquet-go/parquet-go/dictionary_int64.go new file mode 100644 index 00000000000..fd498ee9b95 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_int64.go @@ -0,0 +1,134 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type int64Dictionary struct { + int64Page + table *hashprobe.Int64Table +} + +func newInt64Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *int64Dictionary { + return &int64Dictionary{ + int64Page: int64Page{ + typ: typ, + values: memory.SliceBufferFrom(data.Int64()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *int64Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *int64Dictionary) Len() int { return d.values.Len() } + +func (d *int64Dictionary) Size() int64 { return int64(d.values.Len() * 8) } + +func (d *int64Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *int64Dictionary) index(i int32) int64 { return d.values.Slice()[i] } + +func (d *int64Dictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *int64Dictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewInt64Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *int64Dictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 8 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Int64Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *int64Dictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *int64Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *int64Dictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *int64Dictionary) Page() Page { + return &d.int64Page +} + +func (d *int64Dictionary) insertBoolean(value bool) int32 { + panic("cannot insert boolean value into int64 dictionary") +} + +func (d *int64Dictionary) insertInt32(value int32) int32 { + return d.insertInt64(int64(value)) +} + +func (d *int64Dictionary) insertInt64(value int64) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} + +func (d *int64Dictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertInt64(value.Int64()) +} + +func (d *int64Dictionary) insertFloat(value float32) int32 { + return d.insertInt64(int64(value)) +} + +func (d *int64Dictionary) insertDouble(value float64) int32 { + return d.insertInt64(int64(value)) +} + +func (d *int64Dictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseInt(string(value), 10, 64) + if err != nil { + panic(err) + } + return d.insertInt64(v) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_int96.go b/vendor/github.com/parquet-go/parquet-go/dictionary_int96.go new file mode 100644 index 00000000000..0c0fdd69aac --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_int96.go @@ -0,0 +1,143 @@ +package parquet + +import ( + "math/big" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/sparse" +) + +type int96Dictionary struct { + int96Page + hashmap map[deprecated.Int96]int32 +} + +func newInt96Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *int96Dictionary { + return &int96Dictionary{ + int96Page: int96Page{ + typ: typ, + values: data.Int96()[:numValues], + columnIndex: ^columnIndex, + }, + } +} + +func (d *int96Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *int96Dictionary) Len() int { return len(d.values) } + +func (d *int96Dictionary) Size() int64 { return int64(len(d.values) * 12) } + +func (d *int96Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *int96Dictionary) index(i int32) deprecated.Int96 { return d.values[i] } + +func (d *int96Dictionary) Insert(indexes []int32, values []Value) { + d.insertValues(indexes, len(values), func(i int) deprecated.Int96 { + return values[i].Int96() + }) +} + +func (d *int96Dictionary) insert(indexes []int32, rows sparse.Array) { + d.insertValues(indexes, rows.Len(), func(i int) deprecated.Int96 { + return *(*deprecated.Int96)(rows.Index(i)) + }) +} + +func (d *int96Dictionary) insertValues(indexes []int32, count int, valueAt func(int) deprecated.Int96) { + _ = indexes[:count] + + if d.hashmap == nil { + d.hashmap = make(map[deprecated.Int96]int32, len(d.values)) + for i, v := range d.values { + d.hashmap[v] = int32(i) + } + } + + for i := range count { + value := valueAt(i) + + index, exists := d.hashmap[value] + if !exists { + index = int32(len(d.values)) + d.values = append(d.values, value) + d.hashmap[value] = index + } + + indexes[i] = index + } +} + +func (d *int96Dictionary) Lookup(indexes []int32, values []Value) { + for i, j := range indexes { + values[i] = d.Index(j) + } +} + +func (d *int96Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue := d.index(indexes[0]) + maxValue := minValue + + for _, i := range indexes[1:] { + value := d.index(i) + switch { + case value.Less(minValue): + minValue = value + case maxValue.Less(value): + maxValue = value + } + } + + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *int96Dictionary) Reset() { + d.values = d.values[:0] + d.hashmap = nil +} + +func (d *int96Dictionary) Page() Page { + return &d.int96Page +} + +func (d *int96Dictionary) insertBoolean(value bool) int32 { + if value { + return d.insertInt96(deprecated.Int96{1, 0, 0}) + } + return d.insertInt96(deprecated.Int96{0, 0, 0}) +} + +func (d *int96Dictionary) insertInt32(value int32) int32 { + return d.insertInt96(deprecated.Int96{uint32(value), 0, 0}) +} + +func (d *int96Dictionary) insertInt64(value int64) int32 { + return d.insertInt96(deprecated.Int64ToInt96(value)) +} + +func (d *int96Dictionary) insertInt96(value deprecated.Int96) int32 { + indexes := [1]int32{0} + d.insertValues(indexes[:], 1, func(i int) deprecated.Int96 { return value }) + return indexes[0] +} + +func (d *int96Dictionary) insertFloat(value float32) int32 { + return d.insertInt96(deprecated.Int64ToInt96(int64(value))) +} + +func (d *int96Dictionary) insertDouble(value float64) int32 { + return d.insertInt96(deprecated.Int64ToInt96(int64(value))) +} + +func (d *int96Dictionary) insertByteArray(value []byte) int32 { + v, ok := new(big.Int).SetString(string(value), 10) + if !ok || v == nil { + panic("invalid byte array for int96: cannot parse") + } + return d.insertInt96(deprecated.Int64ToInt96(v.Int64())) +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_null.go b/vendor/github.com/parquet-go/parquet-go/dictionary_null.go new file mode 100644 index 00000000000..58d7b4bdaed --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_null.go @@ -0,0 +1,75 @@ +package parquet + +import ( + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/sparse" +) + +// nullDictionary is a dictionary for NULL type columns where all operations are no-ops. +type nullDictionary struct { + nullPage +} + +func newNullDictionary(typ Type, columnIndex int16, numValues int32, _ encoding.Values) *nullDictionary { + return &nullDictionary{ + nullPage: *newNullPage(typ, columnIndex, numValues), + } +} + +func (d *nullDictionary) Type() Type { return d.nullPage.Type() } + +func (d *nullDictionary) Len() int { return int(d.nullPage.count) } + +func (d *nullDictionary) Size() int64 { return 0 } + +func (d *nullDictionary) Index(i int32) Value { return NullValue() } + +func (d *nullDictionary) Lookup(indexes []int32, values []Value) { + checkLookupIndexBounds(indexes, makeArrayValue(values, 0)) + for i := range indexes { + values[i] = NullValue() + } +} + +func (d *nullDictionary) Insert(indexes []int32, values []Value) {} + +func (d *nullDictionary) Bounds(indexes []int32) (min, max Value) { + return NullValue(), NullValue() +} + +func (d *nullDictionary) Reset() { + d.nullPage.count = 0 +} + +func (d *nullDictionary) Page() Page { return &d.nullPage } + +func (d *nullDictionary) insert(indexes []int32, rows sparse.Array) {} + +func (d *nullDictionary) insertBoolean(value bool) int32 { + panic("cannot insert boolean value into null dictionary") +} + +func (d *nullDictionary) insertInt32(value int32) int32 { + panic("cannot insert int32 value into null dictionary") +} + +func (d *nullDictionary) insertInt64(value int64) int32 { + panic("cannot insert int64 value into null dictionary") +} + +func (d *nullDictionary) insertInt96(value deprecated.Int96) int32 { + panic("cannot insert int96 value into null dictionary") +} + +func (d *nullDictionary) insertFloat(value float32) int32 { + panic("cannot insert float value into null dictionary") +} + +func (d *nullDictionary) insertDouble(value float64) int32 { + panic("cannot insert double value into null dictionary") +} + +func (d *nullDictionary) insertByteArray(value []byte) int32 { + panic("cannot insert byte array value into null dictionary") +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_purego.go b/vendor/github.com/parquet-go/parquet-go/dictionary_purego.go new file mode 100644 index 00000000000..8d71b982792 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_purego.go @@ -0,0 +1,210 @@ +//go:build purego || !amd64 + +package parquet + +import ( + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/sparse" +) + +func (d *int32Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*int32)(rows.Index(i)) = d.index(j) + } +} + +func (d *int64Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*int64)(rows.Index(i)) = d.index(j) + } +} + +func (d *floatDictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*float32)(rows.Index(i)) = d.index(j) + } +} + +func (d *doubleDictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*float64)(rows.Index(i)) = d.index(j) + } +} + +func (d *byteArrayDictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*string)(rows.Index(i)) = unsafecast.String(d.index(int(j))) + } +} + +func (d *fixedLenByteArrayDictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*string)(rows.Index(i)) = unsafecast.String(d.index(j)) + } +} + +func (d *uint32Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*uint32)(rows.Index(i)) = d.index(j) + } +} + +func (d *uint64Dictionary) lookup(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(*uint64)(rows.Index(i)) = d.index(j) + } +} + +func (d *be128Dictionary) lookupString(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + s := "0123456789ABCDEF" + for i, j := range indexes { + *(**[16]byte)(unsafe.Pointer(&s)) = d.index(j) + *(*string)(rows.Index(i)) = s + } +} + +func (d *be128Dictionary) lookupPointer(indexes []int32, rows sparse.Array) { + checkLookupIndexBounds(indexes, rows) + for i, j := range indexes { + *(**[16]byte)(rows.Index(i)) = d.index(j) + } +} + +func (d *int32Dictionary) bounds(indexes []int32) (min, max int32) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *int64Dictionary) bounds(indexes []int32) (min, max int64) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *floatDictionary) bounds(indexes []int32) (min, max float32) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *doubleDictionary) bounds(indexes []int32) (min, max float64) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *uint32Dictionary) bounds(indexes []int32) (min, max uint32) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *uint64Dictionary) bounds(indexes []int32) (min, max uint64) { + min = d.index(indexes[0]) + max = min + + for _, i := range indexes[1:] { + value := d.index(i) + if value < min { + min = value + } + if value > max { + max = value + } + } + + return min, max +} + +func (d *be128Dictionary) bounds(indexes []int32) (min, max *[16]byte) { + values := [64]*[16]byte{} + min = d.index(indexes[0]) + max = min + + for i := 1; i < len(indexes); i += len(values) { + n := len(indexes) - i + if n > len(values) { + n = len(values) + } + j := i + n + d.lookupPointer(indexes[i:j:j], makeArrayFromSlice(values[:n:n])) + + for _, value := range values[:n:n] { + switch { + case lessBE128(value, min): + min = value + case lessBE128(max, value): + max = value + } + } + } + + return min, max +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_uint32.go b/vendor/github.com/parquet-go/parquet-go/dictionary_uint32.go new file mode 100644 index 00000000000..7ae64be0059 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_uint32.go @@ -0,0 +1,141 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type uint32Dictionary struct { + uint32Page + table *hashprobe.Uint32Table +} + +func newUint32Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *uint32Dictionary { + return &uint32Dictionary{ + uint32Page: uint32Page{ + typ: typ, + values: memory.SliceBufferFrom(data.Uint32()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *uint32Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *uint32Dictionary) Len() int { return d.values.Len() } + +func (d *uint32Dictionary) Size() int64 { return int64(d.values.Len() * 4) } + +func (d *uint32Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *uint32Dictionary) index(i int32) uint32 { return d.values.Slice()[i] } + +func (d *uint32Dictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *uint32Dictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewUint32Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *uint32Dictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 4 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Uint32Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *uint32Dictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) +} + +func (d *uint32Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *uint32Dictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *uint32Dictionary) Page() Page { + return &d.uint32Page +} + +func (d *uint32Dictionary) insertBoolean(value bool) int32 { + if value { + return d.insertUint32(1) + } + return d.insertUint32(0) +} + +func (d *uint32Dictionary) insertInt32(value int32) int32 { + return d.insertUint32(uint32(value)) +} + +func (d *uint32Dictionary) insertInt64(value int64) int32 { + return d.insertUint32(uint32(value)) +} + +func (d *uint32Dictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertUint32(value[0]) +} + +func (d *uint32Dictionary) insertFloat(value float32) int32 { + return d.insertUint32(uint32(value)) +} + +func (d *uint32Dictionary) insertDouble(value float64) int32 { + return d.insertUint32(uint32(value)) +} + +func (d *uint32Dictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseUint(string(value), 10, 32) + if err != nil { + panic(err) + } + return d.insertUint32(uint32(v)) +} + +func (d *uint32Dictionary) insertUint32(value uint32) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary_uint64.go b/vendor/github.com/parquet-go/parquet-go/dictionary_uint64.go new file mode 100644 index 00000000000..f462f1575c9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/dictionary_uint64.go @@ -0,0 +1,141 @@ +package parquet + +import ( + "strconv" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/hashprobe" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +type uint64Dictionary struct { + uint64Page + table *hashprobe.Uint64Table +} + +func newUint64Dictionary(typ Type, columnIndex int16, numValues int32, data encoding.Values) *uint64Dictionary { + return &uint64Dictionary{ + uint64Page: uint64Page{ + typ: typ, + values: memory.SliceBufferFrom(data.Uint64()[:numValues]), + columnIndex: ^columnIndex, + }, + } +} + +func (d *uint64Dictionary) Type() Type { return newIndexedType(d.typ, d) } + +func (d *uint64Dictionary) Len() int { return d.values.Len() } + +func (d *uint64Dictionary) Size() int64 { return int64(d.values.Len() * 8) } + +func (d *uint64Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) } + +func (d *uint64Dictionary) index(i int32) uint64 { return d.values.Slice()[i] } + +func (d *uint64Dictionary) Insert(indexes []int32, values []Value) { + d.insert(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *uint64Dictionary) init(indexes []int32) { + values := d.values.Slice() + d.table = hashprobe.NewUint64Table(len(values), hashprobeTableMaxLoad) + + n := min(len(values), len(indexes)) + + for i := 0; i < len(values); i += n { + j := min(i+n, len(values)) + d.table.Probe(values[i:j:j], indexes[:n:n]) + } +} + +func (d *uint64Dictionary) insert(indexes []int32, rows sparse.Array) { + const chunkSize = insertsTargetCacheFootprint / 8 + + if d.table == nil { + d.init(indexes) + } + + values := rows.Uint64Array() + + for i := 0; i < values.Len(); i += chunkSize { + j := min(i+chunkSize, values.Len()) + + if d.table.ProbeArray(values.Slice(i, j), indexes[i:j:j]) > 0 { + for k, index := range indexes[i:j] { + if index == int32(d.values.Len()) { + d.values.Append(values.Index(i + k)) + } + } + } + } +} + +func (d *uint64Dictionary) Lookup(indexes []int32, values []Value) { + model := d.makeValue(0) + memsetValues(values, model) + d.lookup(indexes, makeArrayValue(values, offsetOfU64)) +} + +func (d *uint64Dictionary) Bounds(indexes []int32) (min, max Value) { + if len(indexes) > 0 { + minValue, maxValue := d.bounds(indexes) + min = d.makeValue(minValue) + max = d.makeValue(maxValue) + } + return min, max +} + +func (d *uint64Dictionary) Reset() { + d.values.Reset() + if d.table != nil { + d.table.Reset() + } +} + +func (d *uint64Dictionary) Page() Page { + return &d.uint64Page +} + +func (d *uint64Dictionary) insertBoolean(value bool) int32 { + if value { + return d.insertUint64(1) + } + return d.insertUint64(0) +} + +func (d *uint64Dictionary) insertInt32(value int32) int32 { + return d.insertUint64(uint64(value)) +} + +func (d *uint64Dictionary) insertInt64(value int64) int32 { + return d.insertUint64(uint64(value)) +} + +func (d *uint64Dictionary) insertInt96(value deprecated.Int96) int32 { + return d.insertUint64(uint64(value.Int64())) +} + +func (d *uint64Dictionary) insertFloat(value float32) int32 { + return d.insertUint64(uint64(value)) +} + +func (d *uint64Dictionary) insertDouble(value float64) int32 { + return d.insertUint64(uint64(value)) +} + +func (d *uint64Dictionary) insertByteArray(value []byte) int32 { + v, err := strconv.ParseUint(string(value), 10, 32) + if err != nil { + panic(err) + } + return d.insertUint64(uint64(v)) +} + +func (d *uint64Dictionary) insertUint64(value uint64) int32 { + var indexes [1]int32 + d.insert(indexes[:], makeArrayFromPointer(&value)) + return indexes[0] +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding.go b/vendor/github.com/parquet-go/parquet-go/encoding.go new file mode 100644 index 00000000000..65b04ca7a93 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding.go @@ -0,0 +1,160 @@ +package parquet + +import ( + "math/bits" + "sync" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/encoding/bitpacked" + "github.com/parquet-go/parquet-go/encoding/bytestreamsplit" + "github.com/parquet-go/parquet-go/encoding/delta" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/encoding/rle" + "github.com/parquet-go/parquet-go/format" +) + +var ( + // Plain is the default parquet encoding. + Plain plain.Encoding + + // RLE is the hybrid bit-pack/run-length parquet encoding. + RLE rle.Encoding + + // BitPacked is the deprecated bit-packed encoding for repetition and + // definition levels. + BitPacked bitpacked.Encoding + + // PlainDictionary is the plain dictionary parquet encoding. + // + // This encoding should not be used anymore in parquet 2.0 and later, + // it is implemented for backwards compatibility to support reading + // files that were encoded with older parquet libraries. + PlainDictionary plain.DictionaryEncoding + + // RLEDictionary is the RLE dictionary parquet encoding. + RLEDictionary rle.DictionaryEncoding + + // DeltaBinaryPacked is the delta binary packed parquet encoding. + DeltaBinaryPacked delta.BinaryPackedEncoding + + // DeltaLengthByteArray is the delta length byte array parquet encoding. + DeltaLengthByteArray delta.LengthByteArrayEncoding + + // DeltaByteArray is the delta byte array parquet encoding. + DeltaByteArray delta.ByteArrayEncoding + + // ByteStreamSplit is an encoding for floating-point data. + ByteStreamSplit bytestreamsplit.Encoding + + // Table indexing the encodings supported by this package. + encodings = [...]encoding.Encoding{ + format.Plain: &Plain, + format.PlainDictionary: &PlainDictionary, + format.BitPacked: &BitPacked, + format.RLE: &RLE, + format.RLEDictionary: &RLEDictionary, + format.DeltaBinaryPacked: &DeltaBinaryPacked, + format.DeltaLengthByteArray: &DeltaLengthByteArray, + format.DeltaByteArray: &DeltaByteArray, + format.ByteStreamSplit: &ByteStreamSplit, + } + + // Table indexing RLE encodings for repetition and definition levels of + // all supported bit widths. + levelEncodingsRLE = [...]rle.Encoding{ + 0: {BitWidth: 1}, + 1: {BitWidth: 2}, + 2: {BitWidth: 3}, + 3: {BitWidth: 4}, + 4: {BitWidth: 5}, + 5: {BitWidth: 6}, + 6: {BitWidth: 7}, + 7: {BitWidth: 8}, + } + + levelEncodingsBitPacked = [...]bitpacked.Encoding{ + 0: {BitWidth: 1}, + 1: {BitWidth: 2}, + 2: {BitWidth: 3}, + 3: {BitWidth: 4}, + 4: {BitWidth: 5}, + 5: {BitWidth: 6}, + 6: {BitWidth: 7}, + 7: {BitWidth: 8}, + } +) + +var extraEncodings sync.Map + +func isDictionaryEncoding(encoding encoding.Encoding) bool { + return isDictionaryFormat(encoding.Encoding()) +} + +func isDictionaryFormat(encoding format.Encoding) bool { + return encoding == format.PlainDictionary || encoding == format.RLEDictionary +} + +func RegisterEncoding(enc encoding.Encoding) { + ns := encoding.NotSupported{} + if enc == ns { + panic("cannot register parquet encoding as not-supported") + } + if LookupEncoding(enc.Encoding()) != ns { + panic("cannot register parquet encoding that overrides the standard specification") + } + extraEncodings.Store(enc.Encoding(), enc) +} + +// LookupEncoding returns the parquet encoding associated with the given code. +// +// The function never returns nil. If the encoding is not supported, +// encoding.NotSupported is returned. +func LookupEncoding(enc format.Encoding) encoding.Encoding { + if enc >= 0 && int(enc) < len(encodings) { + if e := encodings[enc]; e != nil { + return e + } + } + if enc, ok := extraEncodings.Load(enc); ok { + return enc.(encoding.Encoding) + } + return encoding.NotSupported{} +} + +func lookupLevelEncoding(enc format.Encoding, max byte) encoding.Encoding { + i := bits.Len8(max) - 1 + switch enc { + case format.RLE: + return &levelEncodingsRLE[i] + case format.BitPacked: + return &levelEncodingsBitPacked[i] + default: + return encoding.NotSupported{} + } +} + +func canEncode(e encoding.Encoding, k Kind) bool { + if isDictionaryEncoding(e) { + return true + } + switch k { + case Boolean: + return encoding.CanEncodeBoolean(e) + case Int32: + return encoding.CanEncodeInt32(e) + case Int64: + return encoding.CanEncodeInt64(e) + case Int96: + return encoding.CanEncodeInt96(e) + case Float: + return encoding.CanEncodeFloat(e) + case Double: + return encoding.CanEncodeDouble(e) + case ByteArray: + return encoding.CanEncodeByteArray(e) + case FixedLenByteArray: + return encoding.CanEncodeFixedLenByteArray(e) + default: + return false + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/bitpacked/bitpacked.go b/vendor/github.com/parquet-go/parquet-go/encoding/bitpacked/bitpacked.go new file mode 100644 index 00000000000..47c3dd09065 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/bitpacked/bitpacked.go @@ -0,0 +1,119 @@ +package bitpacked + +import ( + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type Encoding struct { + encoding.NotSupported + BitWidth int +} + +func (e *Encoding) String() string { + return "BIT_PACKED" +} + +func (e *Encoding) Encoding() format.Encoding { + return format.BitPacked +} + +func (e *Encoding) EncodeLevels(dst []byte, src []uint8) ([]byte, error) { + dst, err := encodeLevels(dst[:0], src, uint(e.BitWidth)) + return dst, e.wrap(err) +} + +func (e *Encoding) DecodeLevels(dst []uint8, src []byte) ([]uint8, error) { + dst, err := decodeLevels(dst[:0], src, uint(e.BitWidth)) + return dst, e.wrap(err) +} + +func (e *Encoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} + +func encodeLevels(dst, src []byte, bitWidth uint) ([]byte, error) { + if bitWidth == 0 || len(src) == 0 { + return append(dst[:0], 0), nil + } + + n := ((int(bitWidth) * len(src)) + 7) / 8 + c := n + 1 + + if cap(dst) < c { + dst = make([]byte, c, 2*c) + } else { + dst = dst[:c] + for i := range dst { + dst[i] = 0 + } + } + + bitMask := byte(1<> bitShift + i := bitOffset / 8 + j := bitOffset % 8 + dst[i+0] |= (v & bitMask) << j + dst[i+1] |= (v >> (8 - j)) + bitOffset += bitWidth + } + + return dst[:n], nil +} + +func decodeLevels(dst, src []byte, bitWidth uint) ([]byte, error) { + if bitWidth == 0 || len(src) == 0 { + return append(dst[:0], 0), nil + } + + numBits := 8 * uint(len(src)) + numValues := int(numBits / bitWidth) + if (numBits % bitWidth) != 0 { + numValues++ + } + + if cap(dst) < numValues { + dst = make([]byte, numValues, 2*numValues) + } else { + dst = dst[:numValues] + for i := range dst { + dst[i] = 0 + } + } + + bitMask := byte(1<> j) + if int(i+1) < len(src) { + v |= (src[i+1] << (8 - j)) + } + v &= bitMask + dst[k] = bitFlip(v) >> bitShift + bitOffset += bitWidth + } + + return dst, nil +} + +func bitFlip(b byte) byte { + return (((b >> 0) & 1) << 7) | + (((b >> 1) & 1) << 6) | + (((b >> 2) & 1) << 5) | + (((b >> 3) & 1) << 4) | + (((b >> 4) & 1) << 3) | + (((b >> 5) & 1) << 2) | + (((b >> 6) & 1) << 1) | + (((b >> 7) & 1) << 0) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit.go b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit.go new file mode 100644 index 00000000000..de78dfe2e34 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit.go @@ -0,0 +1,60 @@ +package bytestreamsplit + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// This encoder implements a version of the Byte Stream Split encoding as described +// in https://github.com/apache/parquet-format/blob/master/Encodings.md#byte-stream-split-byte_stream_split--9 +type Encoding struct { + encoding.NotSupported +} + +func (e *Encoding) String() string { + return "BYTE_STREAM_SPLIT" +} + +func (e *Encoding) Encoding() format.Encoding { + return format.ByteStreamSplit +} + +func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + dst = resize(dst, 4*len(src)) + encodeFloat(dst, unsafecast.Slice[byte](src)) + return dst, nil +} + +func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + dst = resize(dst, 8*len(src)) + encodeDouble(dst, unsafecast.Slice[byte](src)) + return dst, nil +} + +func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) + } + buf := resize(unsafecast.Slice[byte](dst), len(src)) + decodeFloat(buf, src) + return unsafecast.Slice[float32](buf), nil +} + +func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) + } + buf := resize(unsafecast.Slice[byte](dst), len(src)) + decodeDouble(buf, src) + return unsafecast.Slice[float64](buf), nil +} + +func resize(buf []byte, size int) []byte { + if cap(buf) < size { + buf = make([]byte, size, 2*size) + } else { + buf = buf[:size] + } + return buf +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.go new file mode 100644 index 00000000000..1798c8916c0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.go @@ -0,0 +1,35 @@ +//go:build !purego + +package bytestreamsplit + +import ( + "golang.org/x/sys/cpu" +) + +var encodeFloatHasAVX512 = cpu.X86.HasAVX512 && + cpu.X86.HasAVX512F && + cpu.X86.HasAVX512VL + +var encodeDoubleHasAVX512 = cpu.X86.HasAVX512 && + cpu.X86.HasAVX512F && + cpu.X86.HasAVX512VL && + cpu.X86.HasAVX512VBMI // VPERMB + +var decodeFloatHasAVX2 = cpu.X86.HasAVX2 + +var decodeDoubleHasAVX512 = cpu.X86.HasAVX512 && + cpu.X86.HasAVX512F && + cpu.X86.HasAVX512VL && + cpu.X86.HasAVX512VBMI // VPERMB + +//go:noescape +func encodeFloat(dst, src []byte) + +//go:noescape +func encodeDouble(dst, src []byte) + +//go:noescape +func decodeFloat(dst, src []byte) + +//go:noescape +func decodeDouble(dst, src []byte) diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.s new file mode 100644 index 00000000000..b0c7622dbc4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_amd64.s @@ -0,0 +1,426 @@ + //go:build !purego + +#include "textflag.h" + +// This file contains optimizations of the BYTE_STREAM_SPLIT encoding using AVX2 +// and AVX512 (when available). +// +// The AVX2/512 instruction set comes with instructions to load memory from, or +// store memory at sparse locations called VPGATHER and VPSCATTER. VPGATHER was +// available in the AVX2 instruction set, VPSCATTER was introduced in AVX512 +// (when the AVX512_VBMI extension is supported). Gathering bytes are sparse +// memory locations is useful during the decoding process since we are +// recomposing 32 or 64 bit floating point values from 4 or 8 bytes dispatched +// in the input byte array. +// +// To either deconstruct or reconstruct floating point values, we need to +// reorder the bytes of each value. If we have 4 32 bit floats, we can permute +// their bytes so that the first one contains all the first bytes, the second +// contains all the second bytes, etc... The VPSHUFB instruction is used to +// perform the byte permutation, or the VPERMB instruction for 64 bit floats. +// +// We use different instructions because the VPSHUFB instruction works on two +// lanes of 16 bytes when used on YMM registers. 4 32 bit floats take 16 bytes, +// so a a YMM register can hold two lanes of 4 32 bit floats and the VPSHUFB +// can permute the two sets of values in a single invocation. For 64 bit floats +// we need to permute 8 values, which take 64 bytes and therefore need to be +// held in a ZMM register and apply permutations across the entire register, +// which is only possible using VPERMB. +// +// Technically we could use ZMM registers when working on 32 bit floats to work +// on 16 values per iteration. However, measurements indicated that the latency +// of VPGATHERDD/VPSCATTERDD on ZMM registers did not provide any improvements +// to the throughput of the algorithms, but working on more values increased the +// code complexity. Using YMM registers offered the best balance between +// performance and maintainability. +// +// At a high level the vectorized algorithms are the following: +// +// encoding +// -------- +// * Load a vector of data from the input buffer +// * Permute bytes, grouping bytes by index +// * Scatter bytes of the register to the output buffer +// +// decoding +// -------- +// * Gather sparse bytes from the input buffer +// * Permute bytes, reconstructing the original values +// * Store the vector in the output buffer +// +// When AVX instructions are not available, the functions fallback to scalar +// implementations of the algorithms. These yield much lower throughput, but +// performed 20-30% better than the code generated by the Go compiler. + +// func encodeFloat(dst, src []byte) +TEXT ·encodeFloat(SB), NOSPLIT, $0-48 + MOVQ src_base+24(FP), AX + MOVQ src_len+32(FP), BX + MOVQ dst_base+0(FP), DX + + MOVQ AX, CX + ADDQ BX, CX // end + SHRQ $2, BX // len + + CMPQ BX, $0 + JE done + + CMPB ·encodeFloatHasAVX512(SB), $0 + JE loop1x4 + + CMPQ BX, $8 + JB loop1x4 + + MOVQ CX, DI + SUBQ AX, DI + SHRQ $5, DI + SHLQ $5, DI + ADDQ AX, DI + + VMOVDQU32 shuffle8x4<>(SB), Y0 + VPBROADCASTD BX, Y2 + VPMULLD scale8x4<>(SB), Y2, Y2 + VPADDD offset8x4<>(SB), Y2, Y2 +loop8x4: + KXORQ K1, K1, K1 + KNOTQ K1, K1 + + VMOVDQU32 (AX), Y1 + VPSHUFB Y0, Y1, Y1 + VPSCATTERDD Y1, K1, (DX)(Y2*1) + + ADDQ $32, AX + ADDQ $8, DX + CMPQ AX, DI + JNE loop8x4 + VZEROUPPER + + CMPQ AX, CX + JE done +loop1x4: + MOVL (AX), SI + MOVQ DX, DI + + MOVB SI, (DI) + SHRL $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRL $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRL $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + + ADDQ $4, AX + INCQ DX + CMPQ AX, CX + JB loop1x4 +done: + RET + +// func encodeDouble(dst, src []byte) +TEXT ·encodeDouble(SB), NOSPLIT, $0-48 + MOVQ src_base+24(FP), AX + MOVQ src_len+32(FP), BX + MOVQ dst_base+0(FP), DX + + MOVQ AX, CX + ADDQ BX, CX + SHRQ $3, BX + + CMPQ BX, $0 + JE done + + CMPB ·encodeDoubleHasAVX512(SB), $0 + JE loop1x8 + + CMPQ BX, $8 + JB loop1x8 + + MOVQ CX, DI + SUBQ AX, DI + SHRQ $6, DI + SHLQ $6, DI + ADDQ AX, DI + + VMOVDQU64 shuffle8x8<>(SB), Z0 + VPBROADCASTQ BX, Z2 + VPMULLQ scale8x8<>(SB), Z2, Z2 +loop8x8: + KXORQ K1, K1, K1 + KNOTQ K1, K1 + + VMOVDQU64 (AX), Z1 + VPERMB Z1, Z0, Z1 + VPSCATTERQQ Z1, K1, (DX)(Z2*1) + + ADDQ $64, AX + ADDQ $8, DX + CMPQ AX, DI + JNE loop8x8 + VZEROUPPER + + CMPQ AX, CX + JE done +loop1x8: + MOVQ (AX), SI + MOVQ DX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + SHRQ $8, SI + ADDQ BX, DI + + MOVB SI, (DI) + + ADDQ $8, AX + INCQ DX + CMPQ AX, CX + JB loop1x8 +done: + RET + +// func decodeFloat(dst, src []byte) +TEXT ·decodeFloat(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVQ src_base+24(FP), DX + + MOVQ AX, CX + ADDQ BX, CX // end + SHRQ $2, BX // len + + CMPQ BX, $0 + JE done + + CMPB ·decodeFloatHasAVX2(SB), $0 + JE loop1x4 + + CMPQ BX, $8 + JB loop1x4 + + MOVQ CX, DI + SUBQ AX, DI + SHRQ $5, DI + SHLQ $5, DI + ADDQ AX, DI + + MOVQ $0xFFFFFFFF, SI + MOVQ BX, X5 + MOVQ SI, X6 + VMOVDQU shuffle8x4<>(SB), Y0 + VPBROADCASTD X5, Y2 + VPBROADCASTD X6, Y3 + VPMULLD scale8x4<>(SB), Y2, Y2 + VPADDD offset8x4<>(SB), Y2, Y2 + VMOVDQU Y3, Y4 +loop8x4: + VPGATHERDD Y4, (DX)(Y2*1), Y1 + VPSHUFB Y0, Y1, Y1 + VMOVDQU Y1, (AX) + VMOVDQU Y3, Y4 + + ADDQ $32, AX + ADDQ $8, DX + CMPQ AX, DI + JNE loop8x4 + VZEROUPPER + + CMPQ AX, CX + JE done +loop1x4: + MOVQ DX, DI + MOVBLZX (DI), R8 + ADDQ BX, DI + MOVBLZX (DI), R9 + ADDQ BX, DI + MOVBLZX (DI), R10 + ADDQ BX, DI + MOVBLZX (DI), R11 + + SHLL $8, R9 + SHLL $16, R10 + SHLL $24, R11 + + ORL R9, R8 + ORL R10, R8 + ORL R11, R8 + + MOVL R8, (AX) + + ADDQ $4, AX + INCQ DX + CMPQ AX, CX + JB loop1x4 +done: + RET + +// func decodeDouble(dst, src []byte) +TEXT ·decodeDouble(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVQ src_base+24(FP), DX + + MOVQ AX, CX + ADDQ BX, CX + SHRQ $3, BX + + CMPQ BX, $0 + JE done + + CMPB ·decodeDoubleHasAVX512(SB), $0 + JE loop1x8 + + CMPQ BX, $8 + JB loop1x8 + + MOVQ CX, DI + SUBQ AX, DI + SHRQ $6, DI + SHLQ $6, DI + ADDQ AX, DI + + VMOVDQU64 shuffle8x8<>(SB), Z0 + VPBROADCASTQ BX, Z2 + VPMULLQ scale8x8<>(SB), Z2, Z2 +loop8x8: + KXORQ K1, K1, K1 + KNOTQ K1, K1 + + VPGATHERQQ (DX)(Z2*1), K1, Z1 + VPERMB Z1, Z0, Z1 + VMOVDQU64 Z1, (AX) + + ADDQ $64, AX + ADDQ $8, DX + CMPQ AX, DI + JNE loop8x8 + VZEROUPPER + + CMPQ AX, CX + JE done +loop1x8: + MOVQ DX, DI + XORQ R12, R12 + + MOVBQZX (DI), R8 + ADDQ BX, DI + MOVBQZX (DI), R9 + ADDQ BX, DI + MOVBQZX (DI), R10 + ADDQ BX, DI + MOVBQZX (DI), R11 + ADDQ BX, DI + + SHLQ $8, R9 + SHLQ $16, R10 + SHLQ $24, R11 + + ORQ R8, R12 + ORQ R9, R12 + ORQ R10, R12 + ORQ R11, R12 + + MOVBQZX (DI), R8 + ADDQ BX, DI + MOVBQZX (DI), R9 + ADDQ BX, DI + MOVBQZX (DI), R10 + ADDQ BX, DI + MOVBQZX (DI), R11 + + SHLQ $32, R8 + SHLQ $40, R9 + SHLQ $48, R10 + SHLQ $56, R11 + + ORQ R8, R12 + ORQ R9, R12 + ORQ R10, R12 + ORQ R11, R12 + + MOVQ R12, (AX) + + ADDQ $8, AX + INCQ DX + CMPQ AX, CX + JB loop1x8 +done: + RET + +GLOBL scale8x4<>(SB), RODATA|NOPTR, $32 +DATA scale8x4<>+0(SB)/4, $0 +DATA scale8x4<>+4(SB)/4, $1 +DATA scale8x4<>+8(SB)/4, $2 +DATA scale8x4<>+12(SB)/4, $3 +DATA scale8x4<>+16(SB)/4, $0 +DATA scale8x4<>+20(SB)/4, $1 +DATA scale8x4<>+24(SB)/4, $2 +DATA scale8x4<>+28(SB)/4, $3 + +GLOBL offset8x4<>(SB), RODATA|NOPTR, $32 +DATA offset8x4<>+0(SB)/4, $0 +DATA offset8x4<>+4(SB)/4, $0 +DATA offset8x4<>+8(SB)/4, $0 +DATA offset8x4<>+12(SB)/4, $0 +DATA offset8x4<>+16(SB)/4, $4 +DATA offset8x4<>+20(SB)/4, $4 +DATA offset8x4<>+24(SB)/4, $4 +DATA offset8x4<>+28(SB)/4, $4 + +GLOBL shuffle8x4<>(SB), RODATA|NOPTR, $32 +DATA shuffle8x4<>+0(SB)/4, $0x0C080400 +DATA shuffle8x4<>+4(SB)/4, $0x0D090501 +DATA shuffle8x4<>+8(SB)/4, $0x0E0A0602 +DATA shuffle8x4<>+12(SB)/4, $0x0F0B0703 +DATA shuffle8x4<>+16(SB)/4, $0x0C080400 +DATA shuffle8x4<>+20(SB)/4, $0x0D090501 +DATA shuffle8x4<>+24(SB)/4, $0x0E0A0602 +DATA shuffle8x4<>+28(SB)/4, $0x0F0B0703 + +GLOBL scale8x8<>(SB), RODATA|NOPTR, $64 +DATA scale8x8<>+0(SB)/8, $0 +DATA scale8x8<>+8(SB)/8, $1 +DATA scale8x8<>+16(SB)/8, $2 +DATA scale8x8<>+24(SB)/8, $3 +DATA scale8x8<>+32(SB)/8, $4 +DATA scale8x8<>+40(SB)/8, $5 +DATA scale8x8<>+48(SB)/8, $6 +DATA scale8x8<>+56(SB)/8, $7 + +GLOBL shuffle8x8<>(SB), RODATA|NOPTR, $64 +DATA shuffle8x8<>+0(SB)/8, $0x3830282018100800 +DATA shuffle8x8<>+8(SB)/8, $0x3931292119110901 +DATA shuffle8x8<>+16(SB)/8, $0x3A322A221A120A02 +DATA shuffle8x8<>+24(SB)/8, $0x3B332B231B130B03 +DATA shuffle8x8<>+32(SB)/8, $0x3C342C241C140C04 +DATA shuffle8x8<>+40(SB)/8, $0x3D352D251D150D05 +DATA shuffle8x8<>+48(SB)/8, $0x3E362E261E160E06 +DATA shuffle8x8<>+56(SB)/8, $0x3F372F271F170F07 diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_purego.go b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_purego.go new file mode 100644 index 00000000000..1007e3c7683 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/bytestreamsplit/bytestreamsplit_purego.go @@ -0,0 +1,83 @@ +//go:build purego || !amd64 + +package bytestreamsplit + +import "github.com/parquet-go/bitpack/unsafecast" + +func encodeFloat(dst, src []byte) { + n := len(src) / 4 + b0 := dst[0*n : 1*n] + b1 := dst[1*n : 2*n] + b2 := dst[2*n : 3*n] + b3 := dst[3*n : 4*n] + + for i, v := range unsafecast.Slice[uint32](src) { + b0[i] = byte(v >> 0) + b1[i] = byte(v >> 8) + b2[i] = byte(v >> 16) + b3[i] = byte(v >> 24) + } +} + +func encodeDouble(dst, src []byte) { + n := len(src) / 8 + b0 := dst[0*n : 1*n] + b1 := dst[1*n : 2*n] + b2 := dst[2*n : 3*n] + b3 := dst[3*n : 4*n] + b4 := dst[4*n : 5*n] + b5 := dst[5*n : 6*n] + b6 := dst[6*n : 7*n] + b7 := dst[7*n : 8*n] + + for i, v := range unsafecast.Slice[uint64](src) { + b0[i] = byte(v >> 0) + b1[i] = byte(v >> 8) + b2[i] = byte(v >> 16) + b3[i] = byte(v >> 24) + b4[i] = byte(v >> 32) + b5[i] = byte(v >> 40) + b6[i] = byte(v >> 48) + b7[i] = byte(v >> 56) + } +} + +func decodeFloat(dst, src []byte) { + n := len(src) / 4 + b0 := src[0*n : 1*n] + b1 := src[1*n : 2*n] + b2 := src[2*n : 3*n] + b3 := src[3*n : 4*n] + + dst32 := unsafecast.Slice[uint32](dst) + for i := range dst32 { + dst32[i] = uint32(b0[i]) | + uint32(b1[i])<<8 | + uint32(b2[i])<<16 | + uint32(b3[i])<<24 + } +} + +func decodeDouble(dst, src []byte) { + n := len(src) / 8 + b0 := src[0*n : 1*n] + b1 := src[1*n : 2*n] + b2 := src[2*n : 3*n] + b3 := src[3*n : 4*n] + b4 := src[4*n : 5*n] + b5 := src[5*n : 6*n] + b6 := src[6*n : 7*n] + b7 := src[7*n : 8*n] + + dst64 := unsafecast.Slice[uint64](dst) + for i := range dst64 { + dst64[i] = uint64(b0[i]) | + uint64(b1[i])<<8 | + uint64(b2[i])<<16 | + uint64(b3[i])<<24 | + uint64(b4[i])<<32 | + uint64(b5[i])<<40 | + uint64(b6[i])<<48 | + uint64(b7[i])<<56 + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed.go new file mode 100644 index 00000000000..9e6f8c9b289 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed.go @@ -0,0 +1,488 @@ +package delta + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" + + "github.com/parquet-go/bitpack" + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type BinaryPackedEncoding struct { + encoding.NotSupported +} + +func (e *BinaryPackedEncoding) String() string { + return "DELTA_BINARY_PACKED" +} + +func (e *BinaryPackedEncoding) Encoding() format.Encoding { + return format.DeltaBinaryPacked +} + +func (e *BinaryPackedEncoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + return encodeInt32(dst[:0], src), nil +} + +func (e *BinaryPackedEncoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + return encodeInt64(dst[:0], src), nil +} + +func (e *BinaryPackedEncoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + buf := unsafecast.Slice[byte](dst) + buf, _, err := decodeInt32(buf[:0], src) + return unsafecast.Slice[int32](buf), e.wrap(err) +} + +func (e *BinaryPackedEncoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + buf := unsafecast.Slice[byte](dst) + buf, _, err := decodeInt64(buf[:0], src) + return unsafecast.Slice[int64](buf), e.wrap(err) +} + +func (e *BinaryPackedEncoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} + +const ( + blockSize = 128 + numMiniBlocks = 4 + miniBlockSize = blockSize / numMiniBlocks + // The parquet spec does not enforce a limit to the block size, but we need + // one otherwise invalid inputs may result in unbounded memory allocations. + // + // 65K+ values should be enough for any valid use case. + maxSupportedBlockSize = 65536 + + maxHeaderLength32 = 4 * binary.MaxVarintLen64 + maxMiniBlockLength32 = binary.MaxVarintLen64 + numMiniBlocks + (4 * blockSize) + + maxHeaderLength64 = 8 * binary.MaxVarintLen64 + maxMiniBlockLength64 = binary.MaxVarintLen64 + numMiniBlocks + (8 * blockSize) +) + +var ( + encodeInt32 = encodeInt32Default + encodeInt64 = encodeInt64Default +) + +func encodeInt32Default(dst []byte, src []int32) []byte { + totalValues := len(src) + firstValue := int32(0) + if totalValues > 0 { + firstValue = src[0] + } + + n := len(dst) + dst = resize(dst, n+maxHeaderLength32) + dst = dst[:n+encodeBinaryPackedHeader(dst[n:], blockSize, numMiniBlocks, totalValues, int64(firstValue))] + + if totalValues < 2 { + return dst + } + + lastValue := firstValue + for i := 1; i < len(src); i += blockSize { + block := [blockSize]int32{} + blockLength := copy(block[:], src[i:]) + + lastValue = blockDeltaInt32(&block, lastValue) + minDelta := blockMinInt32(&block) + blockSubInt32(&block, minDelta) + blockClearInt32(&block, blockLength) + + bitWidths := [numMiniBlocks]byte{} + blockBitWidthsInt32(&bitWidths, &block) + + n := len(dst) + dst = resize(dst, n+maxMiniBlockLength32+4) + n += encodeBlockHeader(dst[n:], int64(minDelta), bitWidths) + + for i, bitWidth := range bitWidths { + if bitWidth != 0 { + miniBlock := (*[miniBlockSize]int32)(block[i*miniBlockSize:]) + encodeMiniBlockInt32(dst[n:], miniBlock, uint(bitWidth)) + n += (miniBlockSize * int(bitWidth)) / 8 + } + } + + dst = dst[:n] + } + + return dst +} + +func encodeInt64Default(dst []byte, src []int64) []byte { + totalValues := len(src) + firstValue := int64(0) + if totalValues > 0 { + firstValue = src[0] + } + + n := len(dst) + dst = resize(dst, n+maxHeaderLength64) + dst = dst[:n+encodeBinaryPackedHeader(dst[n:], blockSize, numMiniBlocks, totalValues, firstValue)] + + if totalValues < 2 { + return dst + } + + lastValue := firstValue + for i := 1; i < len(src); i += blockSize { + block := [blockSize]int64{} + blockLength := copy(block[:], src[i:]) + + lastValue = blockDeltaInt64(&block, lastValue) + minDelta := blockMinInt64(&block) + blockSubInt64(&block, minDelta) + blockClearInt64(&block, blockLength) + + bitWidths := [numMiniBlocks]byte{} + blockBitWidthsInt64(&bitWidths, &block) + + n := len(dst) + dst = resize(dst, n+maxMiniBlockLength64+8) + n += encodeBlockHeader(dst[n:], minDelta, bitWidths) + + for i, bitWidth := range bitWidths { + if bitWidth != 0 { + miniBlock := (*[miniBlockSize]int64)(block[i*miniBlockSize:]) + encodeMiniBlockInt64(dst[n:], miniBlock, uint(bitWidth)) + n += (miniBlockSize * int(bitWidth)) / 8 + } + } + + dst = dst[:n] + } + + return dst +} + +func encodeBinaryPackedHeader(dst []byte, blockSize, numMiniBlocks, totalValues int, firstValue int64) (n int) { + n += binary.PutUvarint(dst[n:], uint64(blockSize)) + n += binary.PutUvarint(dst[n:], uint64(numMiniBlocks)) + n += binary.PutUvarint(dst[n:], uint64(totalValues)) + n += binary.PutVarint(dst[n:], firstValue) + return n +} + +func encodeBlockHeader(dst []byte, minDelta int64, bitWidths [numMiniBlocks]byte) (n int) { + n += binary.PutVarint(dst, int64(minDelta)) + n += copy(dst[n:], bitWidths[:]) + return n +} + +func blockClearInt32(block *[blockSize]int32, blockLength int) { + if blockLength < blockSize { + clear := block[blockLength:] + for i := range clear { + clear[i] = 0 + } + } +} + +func blockDeltaInt32(block *[blockSize]int32, lastValue int32) int32 { + for i, v := range block { + block[i], lastValue = v-lastValue, v + } + return lastValue +} + +func blockMinInt32(block *[blockSize]int32) int32 { + min := block[0] + for _, v := range block[1:] { + if v < min { + min = v + } + } + return min +} + +func blockSubInt32(block *[blockSize]int32, value int32) { + for i := range block { + block[i] -= value + } +} + +func blockBitWidthsInt32(bitWidths *[numMiniBlocks]byte, block *[blockSize]int32) { + for i := range bitWidths { + j := (i + 0) * miniBlockSize + k := (i + 1) * miniBlockSize + bitWidth := 0 + + for _, v := range block[j:k] { + if n := bits.Len32(uint32(v)); n > bitWidth { + bitWidth = n + } + } + + bitWidths[i] = byte(bitWidth) + } +} + +func blockClearInt64(block *[blockSize]int64, blockLength int) { + if blockLength < blockSize { + clear := block[blockLength:] + for i := range clear { + clear[i] = 0 + } + } +} + +func blockDeltaInt64(block *[blockSize]int64, lastValue int64) int64 { + for i, v := range block { + block[i], lastValue = v-lastValue, v + } + return lastValue +} + +func blockMinInt64(block *[blockSize]int64) int64 { + min := block[0] + for _, v := range block[1:] { + if v < min { + min = v + } + } + return min +} + +func blockSubInt64(block *[blockSize]int64, value int64) { + for i := range block { + block[i] -= value + } +} + +func blockBitWidthsInt64(bitWidths *[numMiniBlocks]byte, block *[blockSize]int64) { + for i := range bitWidths { + j := (i + 0) * miniBlockSize + k := (i + 1) * miniBlockSize + bitWidth := 0 + + for _, v := range block[j:k] { + if n := bits.Len64(uint64(v)); n > bitWidth { + bitWidth = n + } + } + + bitWidths[i] = byte(bitWidth) + } +} + +func decodeInt32(dst, src []byte) ([]byte, []byte, error) { + blockSize, numMiniBlocks, totalValues, firstValue, src, err := decodeBinaryPackedHeader(src) + if err != nil { + return dst, src, err + } + if totalValues == 0 { + return dst, src, nil + } + if firstValue < math.MinInt32 || firstValue > math.MaxInt32 { + return dst, src, fmt.Errorf("first value out of range: %d", firstValue) + } + + writeOffset := len(dst) + dst = resize(dst, len(dst)+4*totalValues) + out := unsafecast.Slice[int32](dst) + out[writeOffset] = int32(firstValue) + writeOffset++ + totalValues-- + lastValue := int32(firstValue) + numValuesInMiniBlock := blockSize / numMiniBlocks + + const padding = 16 + miniBlockTemp := make([]byte, 256+padding) + + for totalValues > 0 && len(src) > 0 { + var minDelta int64 + var bitWidths []byte + minDelta, bitWidths, src, err = decodeBinaryPackedBlock(src, numMiniBlocks) + if err != nil { + return dst, src, err + } + + blockOffset := writeOffset + + for _, bitWidth := range bitWidths { + n := min(numValuesInMiniBlock, totalValues) + if bitWidth != 0 { + miniBlockSize := (numValuesInMiniBlock * int(bitWidth)) / 8 + miniBlockData := src + if miniBlockSize <= len(src) { + miniBlockData = miniBlockData[:miniBlockSize] + } + src = src[len(miniBlockData):] + if cap(miniBlockData) < miniBlockSize+bitpack.PaddingInt32 { + miniBlockTemp = resize(miniBlockTemp[:0], miniBlockSize+bitpack.PaddingInt32) + miniBlockData = miniBlockTemp[:copy(miniBlockTemp, miniBlockData)] + } + miniBlockData = miniBlockData[:miniBlockSize] + bitpack.Unpack(out[writeOffset:writeOffset+n], miniBlockData, uint(bitWidth)) + } + writeOffset += n + totalValues -= n + if totalValues == 0 { + break + } + } + + lastValue = decodeBlockInt32(out[blockOffset:writeOffset], int32(minDelta), lastValue) + } + + if totalValues > 0 { + return dst, src, fmt.Errorf("%d missing values: %w", totalValues, io.ErrUnexpectedEOF) + } + + return dst, src, nil +} + +func decodeInt64(dst, src []byte) ([]byte, []byte, error) { + blockSize, numMiniBlocks, totalValues, firstValue, src, err := decodeBinaryPackedHeader(src) + if err != nil { + return dst, src, err + } + if totalValues == 0 { + return dst, src, nil + } + + writeOffset := len(dst) + dst = resize(dst, len(dst)+8*totalValues) + out := unsafecast.Slice[int64](dst) + out[writeOffset] = firstValue + writeOffset++ + totalValues-- + lastValue := firstValue + numValuesInMiniBlock := blockSize / numMiniBlocks + + const padding = 16 + miniBlockTemp := make([]byte, 512+padding) + + for totalValues > 0 && len(src) > 0 { + var minDelta int64 + var bitWidths []byte + minDelta, bitWidths, src, err = decodeBinaryPackedBlock(src, numMiniBlocks) + if err != nil { + return dst, src, err + } + blockOffset := writeOffset + + for _, bitWidth := range bitWidths { + n := min(numValuesInMiniBlock, totalValues) + if bitWidth != 0 { + miniBlockSize := (numValuesInMiniBlock * int(bitWidth)) / 8 + miniBlockData := src + if miniBlockSize <= len(src) { + miniBlockData = src[:miniBlockSize] + } + src = src[len(miniBlockData):] + if len(miniBlockData) < miniBlockSize+bitpack.PaddingInt64 { + miniBlockTemp = resize(miniBlockTemp[:0], miniBlockSize+bitpack.PaddingInt64) + miniBlockData = miniBlockTemp[:copy(miniBlockTemp, miniBlockData)] + } + miniBlockData = miniBlockData[:miniBlockSize] + bitpack.Unpack(out[writeOffset:writeOffset+n], miniBlockData, uint(bitWidth)) + } + writeOffset += n + totalValues -= n + if totalValues == 0 { + break + } + } + + lastValue = decodeBlockInt64(out[blockOffset:writeOffset], minDelta, lastValue) + } + + if totalValues > 0 { + return dst, src, fmt.Errorf("%d missing values: %w", totalValues, io.ErrUnexpectedEOF) + } + + return dst, src, nil +} + +func decodeBinaryPackedHeader(src []byte) (blockSize, numMiniBlocks, totalValues int, firstValue int64, next []byte, err error) { + u := uint64(0) + n := 0 + i := 0 + + if u, n, err = decodeUvarint(src[i:], "block size"); err != nil { + return + } + i += n + blockSize = int(u) + + if u, n, err = decodeUvarint(src[i:], "number of mini-blocks"); err != nil { + return + } + i += n + numMiniBlocks = int(u) + + if u, n, err = decodeUvarint(src[i:], "total values"); err != nil { + return + } + i += n + totalValues = int(u) + + if firstValue, n, err = decodeVarint(src[i:], "first value"); err != nil { + return + } + i += n + + if numMiniBlocks == 0 { + err = fmt.Errorf("invalid number of mini block (%d)", numMiniBlocks) + } else if (blockSize <= 0) || (blockSize%128) != 0 { + err = fmt.Errorf("invalid block size is not a multiple of 128 (%d)", blockSize) + } else if blockSize > maxSupportedBlockSize { + err = fmt.Errorf("invalid block size is too large (%d)", blockSize) + } else if miniBlockSize := blockSize / numMiniBlocks; (numMiniBlocks <= 0) || (miniBlockSize%32) != 0 { + err = fmt.Errorf("invalid mini block size is not a multiple of 32 (%d)", miniBlockSize) + } else if totalValues < 0 { + err = fmt.Errorf("invalid total number of values is negative (%d)", totalValues) + } else if totalValues > math.MaxInt32 { + err = fmt.Errorf("too many values: %d", totalValues) + } + + return blockSize, numMiniBlocks, totalValues, firstValue, src[i:], err +} + +func decodeBinaryPackedBlock(src []byte, numMiniBlocks int) (minDelta int64, bitWidths, next []byte, err error) { + minDelta, n, err := decodeVarint(src, "min delta") + if err != nil { + return 0, nil, src, err + } + src = src[n:] + if len(src) < numMiniBlocks { + bitWidths, next = src, nil + } else { + bitWidths, next = src[:numMiniBlocks], src[numMiniBlocks:] + } + return minDelta, bitWidths, next, nil +} + +func decodeUvarint(buf []byte, what string) (u uint64, n int, err error) { + u, n = binary.Uvarint(buf) + if n == 0 { + return 0, 0, fmt.Errorf("decoding %s: %w", what, io.ErrUnexpectedEOF) + } + if n < 0 { + return 0, 0, fmt.Errorf("overflow decoding %s (read %d/%d bytes)", what, -n, len(buf)) + } + return u, n, nil +} + +func decodeVarint(buf []byte, what string) (v int64, n int, err error) { + v, n = binary.Varint(buf) + if n == 0 { + return 0, 0, fmt.Errorf("decoding %s: %w", what, io.ErrUnexpectedEOF) + } + if n < 0 { + return 0, 0, fmt.Errorf("overflow decoding %s (read %d/%d bytes)", what, -n, len(buf)) + } + return v, n, nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.go new file mode 100644 index 00000000000..a466e5b9ae2 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.go @@ -0,0 +1,256 @@ +//go:build !purego + +package delta + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "golang.org/x/sys/cpu" +) + +func init() { + if cpu.X86.HasAVX2 { + encodeInt32 = encodeInt32AVX2 + encodeInt64 = encodeInt64AVX2 + } +} + +//go:noescape +func blockDeltaInt32AVX2(block *[blockSize]int32, lastValue int32) int32 + +//go:noescape +func blockMinInt32AVX2(block *[blockSize]int32) int32 + +//go:noescape +func blockSubInt32AVX2(block *[blockSize]int32, value int32) + +//go:noescape +func blockBitWidthsInt32AVX2(bitWidths *[numMiniBlocks]byte, block *[blockSize]int32) + +//go:noescape +func encodeMiniBlockInt32Default(dst *byte, src *[miniBlockSize]int32, bitWidth uint) + +//go:noescape +func encodeMiniBlockInt32x1bitAVX2(dst *byte, src *[miniBlockSize]int32) + +//go:noescape +func encodeMiniBlockInt32x2bitsAVX2(dst *byte, src *[miniBlockSize]int32) + +//go:noescape +func encodeMiniBlockInt32x3to16bitsAVX2(dst *byte, src *[miniBlockSize]int32, bitWidth uint) + +//go:noescape +func encodeMiniBlockInt32x32bitsAVX2(dst *byte, src *[miniBlockSize]int32) + +func encodeMiniBlockInt32(dst []byte, src *[miniBlockSize]int32, bitWidth uint) { + encodeMiniBlockInt32Default(&dst[0], src, bitWidth) +} + +func encodeMiniBlockInt32AVX2(dst *byte, src *[miniBlockSize]int32, bitWidth uint) { + switch { + case bitWidth == 1: + encodeMiniBlockInt32x1bitAVX2(dst, src) + case bitWidth == 2: + encodeMiniBlockInt32x2bitsAVX2(dst, src) + case bitWidth == 32: + encodeMiniBlockInt32x32bitsAVX2(dst, src) + case bitWidth <= 16: + encodeMiniBlockInt32x3to16bitsAVX2(dst, src, bitWidth) + default: + encodeMiniBlockInt32Default(dst, src, bitWidth) + } +} + +func encodeInt32AVX2(dst []byte, src []int32) []byte { + totalValues := len(src) + firstValue := int32(0) + if totalValues > 0 { + firstValue = src[0] + } + + n := len(dst) + dst = resize(dst, n+maxHeaderLength32) + dst = dst[:n+encodeBinaryPackedHeader(dst[n:], blockSize, numMiniBlocks, totalValues, int64(firstValue))] + + if totalValues < 2 { + return dst + } + + lastValue := firstValue + for i := 1; i < len(src); i += blockSize { + block := [blockSize]int32{} + blockLength := copy(block[:], src[i:]) + + lastValue = blockDeltaInt32AVX2(&block, lastValue) + minDelta := blockMinInt32AVX2(&block) + blockSubInt32AVX2(&block, minDelta) + blockClearInt32(&block, blockLength) + + bitWidths := [numMiniBlocks]byte{} + blockBitWidthsInt32AVX2(&bitWidths, &block) + + n := len(dst) + dst = resize(dst, n+maxMiniBlockLength32+16) + n += encodeBlockHeader(dst[n:], int64(minDelta), bitWidths) + + for i, bitWidth := range bitWidths { + if bitWidth != 0 { + miniBlock := (*[miniBlockSize]int32)(block[i*miniBlockSize:]) + encodeMiniBlockInt32AVX2(&dst[n], miniBlock, uint(bitWidth)) + n += (miniBlockSize * int(bitWidth)) / 8 + } + } + + dst = dst[:n] + } + + return dst +} + +//go:noescape +func blockDeltaInt64AVX2(block *[blockSize]int64, lastValue int64) int64 + +//go:noescape +func blockMinInt64AVX2(block *[blockSize]int64) int64 + +//go:noescape +func blockSubInt64AVX2(block *[blockSize]int64, value int64) + +//go:noescape +func blockBitWidthsInt64AVX2(bitWidths *[numMiniBlocks]byte, block *[blockSize]int64) + +//go:noescape +func encodeMiniBlockInt64Default(dst *byte, src *[miniBlockSize]int64, bitWidth uint) + +//go:noescape +func encodeMiniBlockInt64x1bitAVX2(dst *byte, src *[miniBlockSize]int64) + +//go:noescape +func encodeMiniBlockInt64x2bitsAVX2(dst *byte, src *[miniBlockSize]int64) + +//go:noescape +func encodeMiniBlockInt64x64bitsAVX2(dst *byte, src *[miniBlockSize]int64) + +func encodeMiniBlockInt64(dst []byte, src *[miniBlockSize]int64, bitWidth uint) { + encodeMiniBlockInt64Default(&dst[0], src, bitWidth) +} + +func encodeMiniBlockInt64AVX2(dst *byte, src *[miniBlockSize]int64, bitWidth uint) { + switch { + case bitWidth == 1: + encodeMiniBlockInt64x1bitAVX2(dst, src) + case bitWidth == 2: + encodeMiniBlockInt64x2bitsAVX2(dst, src) + case bitWidth == 64: + encodeMiniBlockInt64x64bitsAVX2(dst, src) + default: + encodeMiniBlockInt64Default(dst, src, bitWidth) + } +} + +func encodeInt64AVX2(dst []byte, src []int64) []byte { + totalValues := len(src) + firstValue := int64(0) + if totalValues > 0 { + firstValue = src[0] + } + + n := len(dst) + dst = resize(dst, n+maxHeaderLength64) + dst = dst[:n+encodeBinaryPackedHeader(dst[n:], blockSize, numMiniBlocks, totalValues, int64(firstValue))] + + if totalValues < 2 { + return dst + } + + lastValue := firstValue + for i := 1; i < len(src); i += blockSize { + block := [blockSize]int64{} + blockLength := copy(block[:], src[i:]) + + lastValue = blockDeltaInt64AVX2(&block, lastValue) + minDelta := blockMinInt64AVX2(&block) + blockSubInt64AVX2(&block, minDelta) + blockClearInt64(&block, blockLength) + + bitWidths := [numMiniBlocks]byte{} + blockBitWidthsInt64AVX2(&bitWidths, &block) + + n := len(dst) + dst = resize(dst, n+maxMiniBlockLength64+16) + n += encodeBlockHeader(dst[n:], int64(minDelta), bitWidths) + + for i, bitWidth := range bitWidths { + if bitWidth != 0 { + miniBlock := (*[miniBlockSize]int64)(block[i*miniBlockSize:]) + encodeMiniBlockInt64AVX2(&dst[n], miniBlock, uint(bitWidth)) + n += (miniBlockSize * int(bitWidth)) / 8 + } + } + + dst = dst[:n] + } + + return dst +} + +//go:noescape +func decodeBlockInt32Default(dst []int32, minDelta, lastValue int32) int32 + +//go:noescape +func decodeBlockInt32AVX2(dst []int32, minDelta, lastValue int32) int32 + +func decodeBlockInt32(dst []int32, minDelta, lastValue int32) int32 { + switch { + case cpu.X86.HasAVX2: + return decodeBlockInt32AVX2(dst, minDelta, lastValue) + default: + return decodeBlockInt32Default(dst, minDelta, lastValue) + } +} + +//go:noescape +func decodeMiniBlockInt32Default(dst []int32, src []uint32, bitWidth uint) + +//go:noescape +func decodeMiniBlockInt32x1to16bitsAVX2(dst []int32, src []uint32, bitWidth uint) + +//go:noescape +func decodeMiniBlockInt32x17to26bitsAVX2(dst []int32, src []uint32, bitWidth uint) + +//go:noescape +func decodeMiniBlockInt32x27to31bitsAVX2(dst []int32, src []uint32, bitWidth uint) + +func decodeMiniBlockInt32(dst []int32, src []uint32, bitWidth uint) { + hasAVX2 := cpu.X86.HasAVX2 + switch { + case hasAVX2 && bitWidth <= 16: + decodeMiniBlockInt32x1to16bitsAVX2(dst, src, bitWidth) + case hasAVX2 && bitWidth <= 26: + decodeMiniBlockInt32x17to26bitsAVX2(dst, src, bitWidth) + case hasAVX2 && bitWidth <= 31: + decodeMiniBlockInt32x27to31bitsAVX2(dst, src, bitWidth) + case bitWidth == 32: + copy(dst, unsafecast.Slice[int32](src)) + default: + decodeMiniBlockInt32Default(dst, src, bitWidth) + } +} + +//go:noescape +func decodeBlockInt64Default(dst []int64, minDelta, lastValue int64) int64 + +func decodeBlockInt64(dst []int64, minDelta, lastValue int64) int64 { + return decodeBlockInt64Default(dst, minDelta, lastValue) +} + +//go:noescape +func decodeMiniBlockInt64Default(dst []int64, src []uint32, bitWidth uint) + +func decodeMiniBlockInt64(dst []int64, src []uint32, bitWidth uint) { + switch { + case bitWidth == 64: + copy(dst, unsafecast.Slice[int64](src)) + default: + decodeMiniBlockInt64Default(dst, src, bitWidth) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.s new file mode 100644 index 00000000000..08f80f09005 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_amd64.s @@ -0,0 +1,920 @@ +//go:build !purego + +#include "textflag.h" + +#define blockSize 128 +#define numMiniBlocks 4 +#define miniBlockSize 32 + +// ----------------------------------------------------------------------------- +// 32 bits +// ----------------------------------------------------------------------------- + +#define deltaInt32AVX2x8(baseAddr) \ + VMOVDQU baseAddr, Y1 \ // [0,1,2,3,4,5,6,7] + VPERMD Y1, Y3, Y2 \ // [7,0,1,2,3,4,5,6] + VPBLENDD $1, Y0, Y2, Y2 \ // [x,0,1,2,3,4,5,6] + VPSUBD Y2, Y1, Y2 \ // [0,1,2,...] - [x,0,1,...] + VMOVDQU Y2, baseAddr \ + VPERMD Y1, Y3, Y0 + +// func blockDeltaInt32AVX2(block *[blockSize]int32, lastValue int32) int32 +TEXT ·blockDeltaInt32AVX2(SB), NOSPLIT, $0-20 + MOVQ block+0(FP), AX + MOVL 4*blockSize-4(AX), CX + MOVL CX, ret+16(FP) + + VPBROADCASTD lastValue+8(FP), Y0 + VMOVDQU ·rotateLeft32(SB), Y3 + + XORQ SI, SI +loop: + deltaInt32AVX2x8(0(AX)(SI*4)) + deltaInt32AVX2x8(32(AX)(SI*4)) + deltaInt32AVX2x8(64(AX)(SI*4)) + deltaInt32AVX2x8(96(AX)(SI*4)) + ADDQ $32, SI + CMPQ SI, $blockSize + JNE loop + VZEROUPPER + RET + +// func blockMinInt32AVX2(block *[blockSize]int32) int32 +TEXT ·blockMinInt32AVX2(SB), NOSPLIT, $0-12 + MOVQ block+0(FP), AX + VPBROADCASTD (AX), Y15 + + VPMINSD 0(AX), Y15, Y0 + VPMINSD 32(AX), Y15, Y1 + VPMINSD 64(AX), Y15, Y2 + VPMINSD 96(AX), Y15, Y3 + VPMINSD 128(AX), Y15, Y4 + VPMINSD 160(AX), Y15, Y5 + VPMINSD 192(AX), Y15, Y6 + VPMINSD 224(AX), Y15, Y7 + VPMINSD 256(AX), Y15, Y8 + VPMINSD 288(AX), Y15, Y9 + VPMINSD 320(AX), Y15, Y10 + VPMINSD 352(AX), Y15, Y11 + VPMINSD 384(AX), Y15, Y12 + VPMINSD 416(AX), Y15, Y13 + VPMINSD 448(AX), Y15, Y14 + VPMINSD 480(AX), Y15, Y15 + + VPMINSD Y1, Y0, Y0 + VPMINSD Y3, Y2, Y2 + VPMINSD Y5, Y4, Y4 + VPMINSD Y7, Y6, Y6 + VPMINSD Y9, Y8, Y8 + VPMINSD Y11, Y10, Y10 + VPMINSD Y13, Y12, Y12 + VPMINSD Y15, Y14, Y14 + + VPMINSD Y2, Y0, Y0 + VPMINSD Y6, Y4, Y4 + VPMINSD Y10, Y8, Y8 + VPMINSD Y14, Y12, Y12 + + VPMINSD Y4, Y0, Y0 + VPMINSD Y12, Y8, Y8 + + VPMINSD Y8, Y0, Y0 + + VPERM2I128 $1, Y0, Y0, Y1 + VPMINSD Y1, Y0, Y0 + + VPSHUFD $0b00011011, Y0, Y1 + VPMINSD Y1, Y0, Y0 + VZEROUPPER + + MOVQ X0, CX + MOVL CX, BX + SHRQ $32, CX + CMPL CX, BX + CMOVLLT CX, BX + MOVL BX, ret+8(FP) + RET + +#define subInt32AVX2x32(baseAddr, offset) \ + VMOVDQU offset+0(baseAddr), Y1 \ + VMOVDQU offset+32(baseAddr), Y2 \ + VMOVDQU offset+64(baseAddr), Y3 \ + VMOVDQU offset+96(baseAddr), Y4 \ + VPSUBD Y0, Y1, Y1 \ + VPSUBD Y0, Y2, Y2 \ + VPSUBD Y0, Y3, Y3 \ + VPSUBD Y0, Y4, Y4 \ + VMOVDQU Y1, offset+0(baseAddr) \ + VMOVDQU Y2, offset+32(baseAddr) \ + VMOVDQU Y3, offset+64(baseAddr) \ + VMOVDQU Y4, offset+96(baseAddr) + +// func blockSubInt32AVX2(block *[blockSize]int32, value int32) +TEXT ·blockSubInt32AVX2(SB), NOSPLIT, $0-12 + MOVQ block+0(FP), AX + VPBROADCASTD value+8(FP), Y0 + subInt32AVX2x32(AX, 0) + subInt32AVX2x32(AX, 128) + subInt32AVX2x32(AX, 256) + subInt32AVX2x32(AX, 384) + VZEROUPPER + RET + +// func blockBitWidthsInt32AVX2(bitWidths *[numMiniBlocks]byte, block *[blockSize]int32) +TEXT ·blockBitWidthsInt32AVX2(SB), NOSPLIT, $0-16 + MOVQ bitWidths+0(FP), AX + MOVQ block+8(FP), BX + + // AVX2 only has signed comparisons (and min/max), we emulate working on + // unsigned values by adding -2^31 to the values. Y5 is a vector of -2^31 + // used to offset 8 packed 32 bits integers in other YMM registers where + // the block data are loaded. + VPCMPEQD Y5, Y5, Y5 + VPSLLD $31, Y5, Y5 + + XORQ DI, DI +loop: + VPBROADCASTD (BX), Y0 // max + VPADDD Y5, Y0, Y0 + + VMOVDQU (BX), Y1 + VMOVDQU 32(BX), Y2 + VMOVDQU 64(BX), Y3 + VMOVDQU 96(BX), Y4 + + VPADDD Y5, Y1, Y1 + VPADDD Y5, Y2, Y2 + VPADDD Y5, Y3, Y3 + VPADDD Y5, Y4, Y4 + + VPMAXSD Y2, Y1, Y1 + VPMAXSD Y4, Y3, Y3 + VPMAXSD Y3, Y1, Y1 + VPMAXSD Y1, Y0, Y0 + + VPERM2I128 $1, Y0, Y0, Y1 + VPMAXSD Y1, Y0, Y0 + + VPSHUFD $0b00011011, Y0, Y1 + VPMAXSD Y1, Y0, Y0 + VPSUBD Y5, Y0, Y0 + + MOVQ X0, CX + MOVL CX, DX + SHRQ $32, CX + CMPL CX, DX + CMOVLHI CX, DX + + LZCNTL DX, DX + NEGL DX + ADDL $32, DX + MOVB DX, (AX)(DI*1) + + ADDQ $128, BX + INCQ DI + CMPQ DI, $numMiniBlocks + JNE loop + VZEROUPPER + RET + +// encodeMiniBlockInt32Default is the generic implementation of the algorithm to +// pack 32 bit integers into values of a given bit width (<=32). +// +// This algorithm is much slower than the vectorized versions, but is useful +// as a reference implementation to run the tests against, and as fallback when +// the code runs on a CPU which does not support the AVX2 instruction set. +// +// func encodeMiniBlockInt32Default(dst *byte, src *[miniBlockSize]int32, bitWidth uint) +TEXT ·encodeMiniBlockInt32Default(SB), NOSPLIT, $0-24 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + MOVQ bitWidth+16(FP), R9 + + XORQ DI, DI // bitOffset + XORQ SI, SI +loop: + MOVQ DI, CX + MOVQ DI, DX + + ANDQ $0b11111, CX // bitOffset % 32 + SHRQ $5, DX // bitOffset / 32 + + MOVLQZX (BX)(SI*4), R8 + SHLQ CX, R8 + ORQ R8, (AX)(DX*4) + + ADDQ R9, DI + INCQ SI + CMPQ SI, $miniBlockSize + JNE loop + RET + +// encodeMiniBlockInt32x1bitAVX2 packs 32 bit integers into 1 bit values in the +// the output buffer. +// +// The algorithm uses MOVMSKPS to extract the 8 relevant bits from the 8 values +// packed in YMM registers, then combines 4 of these into a 32 bit word which +// then gets written to the output. The result is 32 bits because each mini +// block has 32 values (the block size is 128 and there are 4 mini blocks per +// block). +// +// func encodeMiniBlockInt32x1bitAVX2(dst *byte, src *[miniBlockSize]int32) +TEXT ·encodeMiniBlockInt32x1bitAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + + VMOVDQU 0(BX), Y0 + VMOVDQU 32(BX), Y1 + VMOVDQU 64(BX), Y2 + VMOVDQU 96(BX), Y3 + + VPSLLD $31, Y0, Y0 + VPSLLD $31, Y1, Y1 + VPSLLD $31, Y2, Y2 + VPSLLD $31, Y3, Y3 + + VMOVMSKPS Y0, R8 + VMOVMSKPS Y1, R9 + VMOVMSKPS Y2, R10 + VMOVMSKPS Y3, R11 + + SHLL $8, R9 + SHLL $16, R10 + SHLL $24, R11 + + ORL R9, R8 + ORL R10, R8 + ORL R11, R8 + MOVL R8, (AX) + VZEROUPPER + RET + +// encodeMiniBlockInt32x2bitsAVX2 implements an algorithm for packing 32 bit +// integers into 2 bit values. +// +// The algorithm is derived from the one employed in encodeMiniBlockInt32x1bitAVX2 +// but needs to perform a bit extra work since MOVMSKPS can only extract one bit +// per packed integer of each YMM vector. We run two passes to extract the two +// bits needed to compose each item of the result, and merge the values by +// interleaving the first and second bits with PDEP. +// +// func encodeMiniBlockInt32x2bitsAVX2(dst *byte, src *[miniBlockSize]int32) +TEXT ·encodeMiniBlockInt32x2bitsAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + + VMOVDQU 0(BX), Y0 + VMOVDQU 32(BX), Y1 + VMOVDQU 64(BX), Y2 + VMOVDQU 96(BX), Y3 + + VPSLLD $31, Y0, Y4 + VPSLLD $31, Y1, Y5 + VPSLLD $31, Y2, Y6 + VPSLLD $31, Y3, Y7 + + VMOVMSKPS Y4, R8 + VMOVMSKPS Y5, R9 + VMOVMSKPS Y6, R10 + VMOVMSKPS Y7, R11 + + SHLQ $8, R9 + SHLQ $16, R10 + SHLQ $24, R11 + ORQ R9, R8 + ORQ R10, R8 + ORQ R11, R8 + + MOVQ $0x5555555555555555, DX // 0b010101... + PDEPQ DX, R8, R8 + + VPSLLD $30, Y0, Y8 + VPSLLD $30, Y1, Y9 + VPSLLD $30, Y2, Y10 + VPSLLD $30, Y3, Y11 + + VMOVMSKPS Y8, R12 + VMOVMSKPS Y9, R13 + VMOVMSKPS Y10, R14 + VMOVMSKPS Y11, R15 + + SHLQ $8, R13 + SHLQ $16, R14 + SHLQ $24, R15 + ORQ R13, R12 + ORQ R14, R12 + ORQ R15, R12 + + MOVQ $0xAAAAAAAAAAAAAAAA, DI // 0b101010... + PDEPQ DI, R12, R12 + + ORQ R12, R8 + MOVQ R8, (AX) + VZEROUPPER + RET + +// encodeMiniBlockInt32x32bitsAVX2 is a specialization of the bit packing logic +// for 32 bit integers when the output bit width is also 32, in which case a +// simple copy of the mini block to the output buffer produces the result. +// +// func encodeMiniBlockInt32x32bitsAVX2(dst *byte, src *[miniBlockSize]int32) +TEXT ·encodeMiniBlockInt32x32bitsAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + VMOVDQU 0(BX), Y0 + VMOVDQU 32(BX), Y1 + VMOVDQU 64(BX), Y2 + VMOVDQU 96(BX), Y3 + VMOVDQU Y0, 0(AX) + VMOVDQU Y1, 32(AX) + VMOVDQU Y2, 64(AX) + VMOVDQU Y3, 96(AX) + VZEROUPPER + RET + +// encodeMiniBlockInt32x3to16bitsAVX2 is the algorithm used to bit-pack 32 bit +// integers into values of width 3 to 16 bits. +// +// This function is a small overhead due to having to initialize registers with +// values that depend on the bit width. We measured this cost at ~10% throughput +// in synthetic benchmarks compared to generating constant shifts and offsets +// using a macro. Using a single function rather than generating one for each +// bit width has the benefit of reducing the code size, which in practice can +// also yield benefits like reducing CPU cache misses. Not using a macro also +// has other advantages like providing accurate line number of stack traces and +// enabling the use of breakpoints when debugging. Overall, this approach seemed +// to be the right trade off between performance and maintainability. +// +// The algorithm treats chunks of 8 values in 4 iterations to process all 32 +// values of the mini block. Writes to the output buffer are aligned on 128 bits +// since we may write up to 128 bits (8 x 16 bits). Padding is therefore +// required in the output buffer to avoid triggering a segfault. +// The encodeInt32AVX2 method adds enough padding when sizing the output buffer +// to account for this requirement. +// +// We leverage the two lanes of YMM registers to work on two sets of 4 values +// (in the sequence of VMOVDQU/VPSHUFD, VPAND, VPSLLQ, VPOR), resulting in having +// two sets of bit-packed values in the lower 64 bits of each YMM lane. +// The upper lane is then permuted into a lower lane to merge the two results, +// which may not be aligned on byte boundaries so we shift the lower and upper +// bits and compose two sets of 128 bits sequences (VPSLLQ, VPSRLQ, VBLENDPD), +// merge them and write the 16 bytes result to the output buffer. +TEXT ·encodeMiniBlockInt32x3to16bitsAVX2(SB), NOSPLIT, $0-24 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + MOVQ bitWidth+16(FP), CX + + VPBROADCASTQ bitWidth+16(FP), Y6 // [1*bitWidth...] + VPSLLQ $1, Y6, Y7 // [2*bitWidth...] + VPADDQ Y6, Y7, Y8 // [3*bitWidth...] + VPSLLQ $2, Y6, Y9 // [4*bitWidth...] + + VPBROADCASTQ sixtyfour<>(SB), Y10 + VPSUBQ Y6, Y10, Y11 // [64-1*bitWidth...] + VPSUBQ Y9, Y10, Y12 // [64-4*bitWidth...] + VPCMPEQQ Y4, Y4, Y4 + VPSRLVQ Y11, Y4, Y4 + + VPXOR Y5, Y5, Y5 + XORQ SI, SI +loop: + VMOVDQU (BX)(SI*4), Y0 + VPSHUFD $0b01010101, Y0, Y1 + VPSHUFD $0b10101010, Y0, Y2 + VPSHUFD $0b11111111, Y0, Y3 + + VPAND Y4, Y0, Y0 + VPAND Y4, Y1, Y1 + VPAND Y4, Y2, Y2 + VPAND Y4, Y3, Y3 + + VPSLLVQ Y6, Y1, Y1 + VPSLLVQ Y7, Y2, Y2 + VPSLLVQ Y8, Y3, Y3 + + VPOR Y1, Y0, Y0 + VPOR Y3, Y2, Y2 + VPOR Y2, Y0, Y0 + + VPERMQ $0b00001010, Y0, Y1 + + VPSLLVQ X9, X1, X2 + VPSRLQ X12, X1, X3 + VBLENDPD $0b10, X3, X2, X1 + VBLENDPD $0b10, X5, X0, X0 + VPOR X1, X0, X0 + + VMOVDQU X0, (AX) + + ADDQ CX, AX + ADDQ $8, SI + CMPQ SI, $miniBlockSize + JNE loop + VZEROUPPER + RET + +GLOBL sixtyfour<>(SB), RODATA|NOPTR, $32 +DATA sixtyfour<>+0(SB)/8, $64 +DATA sixtyfour<>+8(SB)/8, $64 +DATA sixtyfour<>+16(SB)/8, $64 +DATA sixtyfour<>+24(SB)/8, $64 + +// func decodeBlockInt32Default(dst []int32, minDelta, lastValue int32) int32 +TEXT ·decodeBlockInt32Default(SB), NOSPLIT, $0-36 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVLQZX minDelta+24(FP), CX + MOVLQZX lastValue+28(FP), DX + XORQ SI, SI + JMP test +loop: + MOVL (AX)(SI*4), DI + ADDL CX, DI + ADDL DI, DX + MOVL DX, (AX)(SI*4) + INCQ SI +test: + CMPQ SI, BX + JNE loop +done: + MOVL DX, ret+32(FP) + RET + +// func decodeBlockInt32AVX2(dst []int32, minDelta, lastValue int32) int32 +TEXT ·decodeBlockInt32AVX2(SB), NOSPLIT, $0-36 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVLQZX minDelta+24(FP), CX + MOVLQZX lastValue+28(FP), DX + XORQ SI, SI + + CMPQ BX, $8 + JB test + + MOVQ BX, DI + SHRQ $3, DI + SHLQ $3, DI + + VPXOR X1, X1, X1 + MOVQ CX, X0 + MOVQ DX, X1 + VPBROADCASTD X0, Y0 +loopAVX2: + VMOVDQU (AX)(SI*4), Y2 + VPADDD Y0, Y2, Y2 // Y2[:] += minDelta + VPADDD Y1, Y2, Y2 // Y2[0] += lastValue + + VPSLLDQ $4, Y2, Y3 + VPADDD Y3, Y2, Y2 + + VPSLLDQ $8, Y2, Y3 + VPADDD Y3, Y2, Y2 + + VPSHUFD $0xFF, X2, X1 + VPERM2I128 $1, Y2, Y2, Y3 + VPADDD X1, X3, X3 + + VMOVDQU X2, (AX)(SI*4) + VMOVDQU X3, 16(AX)(SI*4) + VPSRLDQ $12, X3, X1 // lastValue + + ADDQ $8, SI + CMPQ SI, DI + JNE loopAVX2 + VZEROUPPER + MOVQ X1, DX + JMP test +loop: + MOVL (AX)(SI*4), DI + ADDL CX, DI + ADDL DI, DX + MOVL DX, (AX)(SI*4) + INCQ SI +test: + CMPQ SI, BX + JNE loop +done: + MOVL DX, ret+32(FP) + RET + +// ----------------------------------------------------------------------------- +// 64 bits +// ----------------------------------------------------------------------------- + +#define deltaInt64AVX2x4(baseAddr) \ + VMOVDQU baseAddr, Y1 \ // [0,1,2,3] + VPERMQ $0b10010011, Y1, Y2 \ // [3,0,1,2] + VPBLENDD $3, Y0, Y2, Y2 \ // [x,0,1,2] + VPSUBQ Y2, Y1, Y2 \ // [0,1,2,3] - [x,0,1,2] + VMOVDQU Y2, baseAddr \ + VPERMQ $0b10010011, Y1, Y0 + +// func blockDeltaInt64AVX2(block *[blockSize]int64, lastValue int64) int64 +TEXT ·blockDeltaInt64AVX2(SB), NOSPLIT, $0-24 + MOVQ block+0(FP), AX + MOVQ 8*blockSize-8(AX), CX + MOVQ CX, ret+16(FP) + + VPBROADCASTQ lastValue+8(FP), Y0 + XORQ SI, SI +loop: + deltaInt64AVX2x4((AX)(SI*8)) + deltaInt64AVX2x4(32(AX)(SI*8)) + deltaInt64AVX2x4(64(AX)(SI*8)) + deltaInt64AVX2x4(96(AX)(SI*8)) + ADDQ $16, SI + CMPQ SI, $blockSize + JNE loop + VZEROUPPER + RET + +// vpminsq is an emulation of the AVX-512 VPMINSQ instruction with AVX2. +#define vpminsq(ones, tmp, arg2, arg1, ret) \ + VPCMPGTQ arg1, arg2, tmp \ + VPBLENDVB tmp, arg1, arg2, ret + +// func blockMinInt64AVX2(block *[blockSize]int64) int64 +TEXT ·blockMinInt64AVX2(SB), NOSPLIT, $0-16 + MOVQ block+0(FP), AX + XORQ SI, SI + VPCMPEQQ Y9, Y9, Y9 // ones + VPBROADCASTQ (AX), Y0 +loop: + VMOVDQU 0(AX)(SI*8), Y1 + VMOVDQU 32(AX)(SI*8), Y2 + VMOVDQU 64(AX)(SI*8), Y3 + VMOVDQU 96(AX)(SI*8), Y4 + VMOVDQU 128(AX)(SI*8), Y5 + VMOVDQU 160(AX)(SI*8), Y6 + VMOVDQU 192(AX)(SI*8), Y7 + VMOVDQU 224(AX)(SI*8), Y8 + + vpminsq(Y9, Y10, Y0, Y1, Y1) + vpminsq(Y9, Y11, Y0, Y2, Y2) + vpminsq(Y9, Y12, Y0, Y3, Y3) + vpminsq(Y9, Y13, Y0, Y4, Y4) + vpminsq(Y9, Y14, Y0, Y5, Y5) + vpminsq(Y9, Y15, Y0, Y6, Y6) + vpminsq(Y9, Y10, Y0, Y7, Y7) + vpminsq(Y9, Y11, Y0, Y8, Y8) + + vpminsq(Y9, Y12, Y2, Y1, Y1) + vpminsq(Y9, Y13, Y4, Y3, Y3) + vpminsq(Y9, Y14, Y6, Y5, Y5) + vpminsq(Y9, Y15, Y8, Y7, Y7) + + vpminsq(Y9, Y10, Y3, Y1, Y1) + vpminsq(Y9, Y11, Y7, Y5, Y5) + vpminsq(Y9, Y12, Y5, Y1, Y0) + + ADDQ $32, SI + CMPQ SI, $blockSize + JNE loop + + VPERM2I128 $1, Y0, Y0, Y1 + vpminsq(Y9, Y10, Y1, Y0, Y0) + + MOVQ X0, CX + VPEXTRQ $1, X0, BX + CMPQ CX, BX + CMOVQLT CX, BX + MOVQ BX, ret+8(FP) + VZEROUPPER + RET + +#define subInt64AVX2x32(baseAddr, offset) \ + VMOVDQU offset+0(baseAddr), Y1 \ + VMOVDQU offset+32(baseAddr), Y2 \ + VMOVDQU offset+64(baseAddr), Y3 \ + VMOVDQU offset+96(baseAddr), Y4 \ + VMOVDQU offset+128(baseAddr), Y5 \ + VMOVDQU offset+160(baseAddr), Y6 \ + VMOVDQU offset+192(baseAddr), Y7 \ + VMOVDQU offset+224(baseAddr), Y8 \ + VPSUBQ Y0, Y1, Y1 \ + VPSUBQ Y0, Y2, Y2 \ + VPSUBQ Y0, Y3, Y3 \ + VPSUBQ Y0, Y4, Y4 \ + VPSUBQ Y0, Y5, Y5 \ + VPSUBQ Y0, Y6, Y6 \ + VPSUBQ Y0, Y7, Y7 \ + VPSUBQ Y0, Y8, Y8 \ + VMOVDQU Y1, offset+0(baseAddr) \ + VMOVDQU Y2, offset+32(baseAddr) \ + VMOVDQU Y3, offset+64(baseAddr) \ + VMOVDQU Y4, offset+96(baseAddr) \ + VMOVDQU Y5, offset+128(baseAddr) \ + VMOVDQU Y6, offset+160(baseAddr) \ + VMOVDQU Y7, offset+192(baseAddr) \ + VMOVDQU Y8, offset+224(baseAddr) + +// func blockSubInt64AVX2(block *[blockSize]int64, value int64) +TEXT ·blockSubInt64AVX2(SB), NOSPLIT, $0-16 + MOVQ block+0(FP), AX + VPBROADCASTQ value+8(FP), Y0 + subInt64AVX2x32(AX, 0) + subInt64AVX2x32(AX, 256) + subInt64AVX2x32(AX, 512) + subInt64AVX2x32(AX, 768) + VZEROUPPER + RET + +// vpmaxsq is an emulation of the AVX-512 VPMAXSQ instruction with AVX2. +#define vpmaxsq(tmp, arg2, arg1, ret) \ + VPCMPGTQ arg2, arg1, tmp \ + VPBLENDVB tmp, arg1, arg2, ret + +// func blockBitWidthsInt64AVX2(bitWidths *[numMiniBlocks]byte, block *[blockSize]int64) +TEXT ·blockBitWidthsInt64AVX2(SB), NOSPLIT, $0-16 + MOVQ bitWidths+0(FP), AX + MOVQ block+8(FP), BX + + // AVX2 only has signed comparisons (and min/max), we emulate working on + // unsigned values by adding -2^64 to the values. Y9 is a vector of -2^64 + // used to offset 4 packed 64 bits integers in other YMM registers where + // the block data are loaded. + VPCMPEQQ Y9, Y9, Y9 + VPSLLQ $63, Y9, Y9 + + XORQ DI, DI +loop: + VPBROADCASTQ (BX), Y0 // max + VPADDQ Y9, Y0, Y0 + + VMOVDQU (BX), Y1 + VMOVDQU 32(BX), Y2 + VMOVDQU 64(BX), Y3 + VMOVDQU 96(BX), Y4 + VMOVDQU 128(BX), Y5 + VMOVDQU 160(BX), Y6 + VMOVDQU 192(BX), Y7 + VMOVDQU 224(BX), Y8 + + VPADDQ Y9, Y1, Y1 + VPADDQ Y9, Y2, Y2 + VPADDQ Y9, Y3, Y3 + VPADDQ Y9, Y4, Y4 + VPADDQ Y9, Y5, Y5 + VPADDQ Y9, Y6, Y6 + VPADDQ Y9, Y7, Y7 + VPADDQ Y9, Y8, Y8 + + vpmaxsq(Y10, Y2, Y1, Y1) + vpmaxsq(Y11, Y4, Y3, Y3) + vpmaxsq(Y12, Y6, Y5, Y5) + vpmaxsq(Y13, Y8, Y7, Y7) + + vpmaxsq(Y10, Y3, Y1, Y1) + vpmaxsq(Y11, Y7, Y5, Y5) + vpmaxsq(Y12, Y5, Y1, Y1) + vpmaxsq(Y13, Y1, Y0, Y0) + + VPERM2I128 $1, Y0, Y0, Y1 + vpmaxsq(Y10, Y1, Y0, Y0) + VPSUBQ Y9, Y0, Y0 + + MOVQ X0, CX + VPEXTRQ $1, X0, DX + CMPQ CX, DX + CMOVQHI CX, DX + + LZCNTQ DX, DX + NEGQ DX + ADDQ $64, DX + MOVB DX, (AX)(DI*1) + + ADDQ $256, BX + INCQ DI + CMPQ DI, $numMiniBlocks + JNE loop + VZEROUPPER + RET + +// encodeMiniBlockInt64Default is the generic implementation of the algorithm to +// pack 64 bit integers into values of a given bit width (<=64). +// +// This algorithm is much slower than the vectorized versions, but is useful +// as a reference implementation to run the tests against, and as fallback when +// the code runs on a CPU which does not support the AVX2 instruction set. +// +// func encodeMiniBlockInt64Default(dst *byte, src *[miniBlockSize]int64, bitWidth uint) +TEXT ·encodeMiniBlockInt64Default(SB), NOSPLIT, $0-24 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + MOVQ bitWidth+16(FP), R10 + + XORQ R11, R11 // zero + XORQ DI, DI // bitOffset + XORQ SI, SI +loop: + MOVQ DI, CX + MOVQ DI, DX + + ANDQ $0b111111, CX // bitOffset % 64 + SHRQ $6, DX // bitOffset / 64 + + MOVQ (BX)(SI*8), R8 + MOVQ R8, R9 + SHLQ CX, R8 + NEGQ CX + ADDQ $64, CX + SHRQ CX, R9 + CMPQ CX, $64 + CMOVQEQ R11, R9 // needed because shifting by more than 63 is undefined + + ORQ R8, 0(AX)(DX*8) + ORQ R9, 8(AX)(DX*8) + + ADDQ R10, DI + INCQ SI + CMPQ SI, $miniBlockSize + JNE loop + RET + +// func encodeMiniBlockInt64x1bitAVX2(dst *byte, src *[miniBlockSize]int64) +TEXT ·encodeMiniBlockInt64x1bitAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + + VMOVDQU 0(BX), Y0 + VMOVDQU 32(BX), Y1 + VMOVDQU 64(BX), Y2 + VMOVDQU 96(BX), Y3 + VMOVDQU 128(BX), Y4 + VMOVDQU 160(BX), Y5 + VMOVDQU 192(BX), Y6 + VMOVDQU 224(BX), Y7 + + VPSLLQ $63, Y0, Y0 + VPSLLQ $63, Y1, Y1 + VPSLLQ $63, Y2, Y2 + VPSLLQ $63, Y3, Y3 + VPSLLQ $63, Y4, Y4 + VPSLLQ $63, Y5, Y5 + VPSLLQ $63, Y6, Y6 + VPSLLQ $63, Y7, Y7 + + VMOVMSKPD Y0, R8 + VMOVMSKPD Y1, R9 + VMOVMSKPD Y2, R10 + VMOVMSKPD Y3, R11 + VMOVMSKPD Y4, R12 + VMOVMSKPD Y5, R13 + VMOVMSKPD Y6, R14 + VMOVMSKPD Y7, R15 + + SHLL $4, R9 + SHLL $8, R10 + SHLL $12, R11 + SHLL $16, R12 + SHLL $20, R13 + SHLL $24, R14 + SHLL $28, R15 + + ORL R9, R8 + ORL R11, R10 + ORL R13, R12 + ORL R15, R14 + ORL R10, R8 + ORL R14, R12 + ORL R12, R8 + + MOVL R8, (AX) + VZEROUPPER + RET + +// func encodeMiniBlockInt64x2bitsAVX2(dst *byte, src *[miniBlockSize]int64) +TEXT ·encodeMiniBlockInt64x2bitsAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + + VMOVDQU 0(BX), Y8 + VMOVDQU 32(BX), Y9 + VMOVDQU 64(BX), Y10 + VMOVDQU 96(BX), Y11 + VMOVDQU 128(BX), Y12 + VMOVDQU 160(BX), Y13 + VMOVDQU 192(BX), Y14 + VMOVDQU 224(BX), Y15 + + VPSLLQ $63, Y8, Y0 + VPSLLQ $63, Y9, Y1 + VPSLLQ $63, Y10, Y2 + VPSLLQ $63, Y11, Y3 + VPSLLQ $63, Y12, Y4 + VPSLLQ $63, Y13, Y5 + VPSLLQ $63, Y14, Y6 + VPSLLQ $63, Y15, Y7 + + VMOVMSKPD Y0, R8 + VMOVMSKPD Y1, R9 + VMOVMSKPD Y2, R10 + VMOVMSKPD Y3, R11 + VMOVMSKPD Y4, R12 + VMOVMSKPD Y5, R13 + VMOVMSKPD Y6, R14 + VMOVMSKPD Y7, R15 + + SHLQ $4, R9 + SHLQ $8, R10 + SHLQ $12, R11 + SHLQ $16, R12 + SHLQ $20, R13 + SHLQ $24, R14 + SHLQ $28, R15 + + ORQ R9, R8 + ORQ R11, R10 + ORQ R13, R12 + ORQ R15, R14 + ORQ R10, R8 + ORQ R14, R12 + ORQ R12, R8 + + MOVQ $0x5555555555555555, CX // 0b010101... + PDEPQ CX, R8, CX + + VPSLLQ $62, Y8, Y8 + VPSLLQ $62, Y9, Y9 + VPSLLQ $62, Y10, Y10 + VPSLLQ $62, Y11, Y11 + VPSLLQ $62, Y12, Y12 + VPSLLQ $62, Y13, Y13 + VPSLLQ $62, Y14, Y14 + VPSLLQ $62, Y15, Y15 + + VMOVMSKPD Y8, R8 + VMOVMSKPD Y9, R9 + VMOVMSKPD Y10, R10 + VMOVMSKPD Y11, R11 + VMOVMSKPD Y12, R12 + VMOVMSKPD Y13, R13 + VMOVMSKPD Y14, R14 + VMOVMSKPD Y15, R15 + + SHLQ $4, R9 + SHLQ $8, R10 + SHLQ $12, R11 + SHLQ $16, R12 + SHLQ $20, R13 + SHLQ $24, R14 + SHLQ $28, R15 + + ORQ R9, R8 + ORQ R11, R10 + ORQ R13, R12 + ORQ R15, R14 + ORQ R10, R8 + ORQ R14, R12 + ORQ R12, R8 + + MOVQ $0xAAAAAAAAAAAAAAAA, DX // 0b101010... + PDEPQ DX, R8, DX + ORQ DX, CX + MOVQ CX, (AX) + VZEROUPPER + RET + +// func encodeMiniBlockInt64x64bitsAVX2(dst *byte, src *[miniBlockSize]int64) +TEXT ·encodeMiniBlockInt64x64bitsAVX2(SB), NOSPLIT, $0-16 + MOVQ dst+0(FP), AX + MOVQ src+8(FP), BX + VMOVDQU 0(BX), Y0 + VMOVDQU 32(BX), Y1 + VMOVDQU 64(BX), Y2 + VMOVDQU 96(BX), Y3 + VMOVDQU 128(BX), Y4 + VMOVDQU 160(BX), Y5 + VMOVDQU 192(BX), Y6 + VMOVDQU 224(BX), Y7 + VMOVDQU Y0, 0(AX) + VMOVDQU Y1, 32(AX) + VMOVDQU Y2, 64(AX) + VMOVDQU Y3, 96(AX) + VMOVDQU Y4, 128(AX) + VMOVDQU Y5, 160(AX) + VMOVDQU Y6, 192(AX) + VMOVDQU Y7, 224(AX) + VZEROUPPER + RET + +// func decodeBlockInt64Default(dst []int64, minDelta, lastValue int64) int64 +TEXT ·decodeBlockInt64Default(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVQ minDelta+24(FP), CX + MOVQ lastValue+32(FP), DX + XORQ SI, SI + JMP test +loop: + MOVQ (AX)(SI*8), DI + ADDQ CX, DI + ADDQ DI, DX + MOVQ DX, (AX)(SI*8) + INCQ SI +test: + CMPQ SI, BX + JNE loop +done: + MOVQ DX, ret+40(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_purego.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_purego.go new file mode 100644 index 00000000000..aa1f62367bd --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/binary_packed_purego.go @@ -0,0 +1,105 @@ +//go:build purego || !amd64 + +package delta + +import ( + "encoding/binary" +) + +func encodeMiniBlockInt32(dst []byte, src *[miniBlockSize]int32, bitWidth uint) { + bitMask := uint32(1<> (32 - j)) + + binary.LittleEndian.PutUint32(dst[(i+0)*4:], lo) + binary.LittleEndian.PutUint32(dst[(i+1)*4:], hi) + + bitOffset += bitWidth + } +} + +func encodeMiniBlockInt64(dst []byte, src *[miniBlockSize]int64, bitWidth uint) { + bitMask := uint64(1<> (64 - j)) + + binary.LittleEndian.PutUint64(dst[(i+0)*8:], lo) + binary.LittleEndian.PutUint64(dst[(i+1)*8:], hi) + + bitOffset += bitWidth + } +} + +func decodeBlockInt32(block []int32, minDelta, lastValue int32) int32 { + for i := range block { + block[i] += minDelta + block[i] += lastValue + lastValue = block[i] + } + return lastValue +} + +func decodeBlockInt64(block []int64, minDelta, lastValue int64) int64 { + for i := range block { + block[i] += minDelta + block[i] += lastValue + lastValue = block[i] + } + return lastValue +} + +func decodeMiniBlockInt32(dst []int32, src []uint32, bitWidth uint) { + bitMask := uint32(1<> j + if j+bitWidth > 32 { + k := 32 - j + d |= (src[i+1] & (bitMask >> k)) << k + } + dst[n] = int32(d) + bitOffset += bitWidth + } +} + +func decodeMiniBlockInt64(dst []int64, src []uint32, bitWidth uint) { + bitMask := uint64(1<> j + if j+bitWidth > 32 { + k := 32 - j + d |= (uint64(src[i+1]) & (bitMask >> k)) << k + if j+bitWidth > 64 { + k := 64 - j + d |= (uint64(src[i+2]) & (bitMask >> k)) << k + } + } + dst[n] = int64(d) + bitOffset += bitWidth + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array.go new file mode 100644 index 00000000000..7033dc95b3e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array.go @@ -0,0 +1,212 @@ +package delta + +import ( + "bytes" + "sort" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +const ( + maxLinearSearchPrefixLength = 64 // arbitrary +) + +type ByteArrayEncoding struct { + encoding.NotSupported +} + +func (e *ByteArrayEncoding) String() string { + return "DELTA_BYTE_ARRAY" +} + +func (e *ByteArrayEncoding) Encoding() format.Encoding { + return format.DeltaByteArray +} + +func (e *ByteArrayEncoding) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { + prefix := getInt32Buffer() + defer putInt32Buffer(prefix) + + length := getInt32Buffer() + defer putInt32Buffer(length) + + totalSize := 0 + if len(offsets) > 0 { + lastValue := ([]byte)(nil) + baseOffset := offsets[0] + + for _, endOffset := range offsets[1:] { + v := src[baseOffset:endOffset:endOffset] + n := int(endOffset - baseOffset) + p := 0 + baseOffset = endOffset + + if len(v) <= maxLinearSearchPrefixLength { + p = linearSearchPrefixLength(lastValue, v) + } else { + p = binarySearchPrefixLength(lastValue, v) + } + + prefix.values = append(prefix.values, int32(p)) + length.values = append(length.values, int32(n-p)) + lastValue = v + totalSize += n - p + } + } + + dst = dst[:0] + dst = encodeInt32(dst, prefix.values) + dst = encodeInt32(dst, length.values) + dst = resize(dst, len(dst)+totalSize) + + if len(offsets) > 0 { + b := dst[len(dst)-totalSize:] + i := int(offsets[0]) + j := 0 + + _ = length.values[:len(prefix.values)] + + for k, p := range prefix.values { + n := p + length.values[k] + j += copy(b[j:], src[i+int(p):i+int(n)]) + i += int(n) + } + } + + return dst, nil +} + +func (e *ByteArrayEncoding) EncodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + // The parquet specs say that this encoding is only supported for BYTE_ARRAY + // values, but the reference Java implementation appears to support + // FIXED_LEN_BYTE_ARRAY as well: + // https://github.com/apache/parquet-java/blob/5608695f5777de1eb0899d9075ec9411cfdf31d3/parquet-column/src/main/java/org/apache/parquet/column/Encoding.java#L211 + if size < 0 || size > encoding.MaxFixedLenByteArraySize { + return dst[:0], encoding.Error(e, encoding.ErrInvalidArgument) + } + if (len(src) % size) != 0 { + return dst[:0], encoding.ErrEncodeInvalidInputSize(e, "FIXED_LEN_BYTE_ARRAY", len(src)) + } + + prefix := getInt32Buffer() + defer putInt32Buffer(prefix) + + length := getInt32Buffer() + defer putInt32Buffer(length) + + totalSize := 0 + lastValue := ([]byte)(nil) + + for i := size; i <= len(src); i += size { + v := src[i-size : i : i] + p := linearSearchPrefixLength(lastValue, v) + n := size - p + prefix.values = append(prefix.values, int32(p)) + length.values = append(length.values, int32(n)) + lastValue = v + totalSize += n + } + + dst = dst[:0] + dst = encodeInt32(dst, prefix.values) + dst = encodeInt32(dst, length.values) + dst = resize(dst, len(dst)+totalSize) + + b := dst[len(dst)-totalSize:] + i := 0 + j := 0 + + for _, p := range prefix.values { + j += copy(b[j:], src[i+int(p):i+size]) + i += size + } + + return dst, nil +} + +func (e *ByteArrayEncoding) DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) { + dst, offsets = dst[:0], offsets[:0] + + prefix := getInt32Buffer() + defer putInt32Buffer(prefix) + + suffix := getInt32Buffer() + defer putInt32Buffer(suffix) + + var err error + src, err = prefix.decode(src) + if err != nil { + return dst, offsets, e.wrapf("decoding prefix lengths: %w", err) + } + src, err = suffix.decode(src) + if err != nil { + return dst, offsets, e.wrapf("decoding suffix lengths: %w", err) + } + if len(prefix.values) != len(suffix.values) { + return dst, offsets, e.wrap(errPrefixAndSuffixLengthMismatch(len(prefix.values), len(suffix.values))) + } + return decodeByteArray(dst, src, prefix.values, suffix.values, offsets) +} + +func (e *ByteArrayEncoding) DecodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + dst = dst[:0] + + if size < 0 || size > encoding.MaxFixedLenByteArraySize { + return dst, e.wrap(encoding.ErrInvalidArgument) + } + + prefix := getInt32Buffer() + defer putInt32Buffer(prefix) + + suffix := getInt32Buffer() + defer putInt32Buffer(suffix) + + var err error + src, err = prefix.decode(src) + if err != nil { + return dst, e.wrapf("decoding prefix lengths: %w", err) + } + src, err = suffix.decode(src) + if err != nil { + return dst, e.wrapf("decoding suffix lengths: %w", err) + } + if len(prefix.values) != len(suffix.values) { + return dst, e.wrap(errPrefixAndSuffixLengthMismatch(len(prefix.values), len(suffix.values))) + } + return decodeFixedLenByteArray(dst[:0], src, size, prefix.values, suffix.values) +} + +func (e *ByteArrayEncoding) EstimateDecodeByteArraySize(src []byte) int { + length := getInt32Buffer() + defer putInt32Buffer(length) + src, _ = length.decode(src) + sum := int(length.sum()) + length.decode(src) + return sum + int(length.sum()) +} + +func (e *ByteArrayEncoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} + +func (e *ByteArrayEncoding) wrapf(msg string, args ...any) error { + return encoding.Errorf(e, msg, args...) +} + +func linearSearchPrefixLength(base, data []byte) (n int) { + for n < len(base) && n < len(data) && base[n] == data[n] { + n++ + } + return n +} + +func binarySearchPrefixLength(base, data []byte) int { + n := min(len(base), len(data)) + return sort.Search(n, func(i int) bool { + return !bytes.Equal(base[:i+1], data[:i+1]) + }) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.go new file mode 100644 index 00000000000..71a077ee881 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.go @@ -0,0 +1,164 @@ +//go:build !purego + +package delta + +import ( + "golang.org/x/sys/cpu" +) + +//go:noescape +func validatePrefixAndSuffixLengthValuesAVX2(prefix, suffix []int32, maxLength int) (totalPrefixLength, totalSuffixLength int, ok bool) + +func validatePrefixAndSuffixLengthValues(prefix, suffix []int32, maxLength int) (totalPrefixLength, totalSuffixLength int, err error) { + if cpu.X86.HasAVX2 { + totalPrefixLength, totalSuffixLength, ok := validatePrefixAndSuffixLengthValuesAVX2(prefix, suffix, maxLength) + if ok { + return totalPrefixLength, totalSuffixLength, nil + } + } + + lastValueLength := 0 + + for i := range prefix { + p := int(prefix[i]) + n := int(suffix[i]) + if p < 0 { + err = errInvalidNegativePrefixLength(p) + return + } + if n < 0 { + err = errInvalidNegativeValueLength(n) + return + } + if p > lastValueLength { + err = errPrefixLengthOutOfBounds(p, lastValueLength) + return + } + totalPrefixLength += p + totalSuffixLength += n + lastValueLength = p + n + } + + if totalSuffixLength > maxLength { + err = errValueLengthOutOfBounds(totalSuffixLength, maxLength) + return + } + + return totalPrefixLength, totalSuffixLength, nil +} + +//go:noescape +func decodeByteArrayOffsets(offsets []uint32, prefix, suffix []int32) + +//go:noescape +func decodeByteArrayAVX2(dst, src []byte, prefix, suffix []int32) int + +func decodeByteArray(dst, src []byte, prefix, suffix []int32, offsets []uint32) ([]byte, []uint32, error) { + totalPrefixLength, totalSuffixLength, err := validatePrefixAndSuffixLengthValues(prefix, suffix, len(src)) + if err != nil { + return dst, offsets, err + } + + totalLength := totalPrefixLength + totalSuffixLength + dst = resizeNoMemclr(dst, totalLength+padding) + + if size := len(prefix) + 1; cap(offsets) < size { + offsets = make([]uint32, size) + } else { + offsets = offsets[:size] + } + + _ = prefix[:len(suffix)] + _ = suffix[:len(prefix)] + decodeByteArrayOffsets(offsets, prefix, suffix) + + var lastValue []byte + var i int + var j int + + if cpu.X86.HasAVX2 && len(src) > padding { + k := len(suffix) + n := 0 + + for k > 0 && n < padding { + k-- + n += int(suffix[k]) + } + + if k > 0 && n >= padding { + i = decodeByteArrayAVX2(dst, src, prefix[:k], suffix[:k]) + j = len(src) - n + lastValue = dst[i-(int(prefix[k-1])+int(suffix[k-1])):] + prefix = prefix[k:] + suffix = suffix[k:] + } + } + + for k := range prefix { + p := int(prefix[k]) + n := int(suffix[k]) + lastValueOffset := i + i += copy(dst[i:], lastValue[:p]) + i += copy(dst[i:], src[j:j+n]) + j += n + lastValue = dst[lastValueOffset:] + } + + return dst[:totalLength], offsets, nil +} + +//go:noescape +func decodeByteArrayAVX2x128bits(dst, src []byte, prefix, suffix []int32) int + +func decodeFixedLenByteArray(dst, src []byte, size int, prefix, suffix []int32) ([]byte, error) { + totalPrefixLength, totalSuffixLength, err := validatePrefixAndSuffixLengthValues(prefix, suffix, len(src)) + if err != nil { + return dst, err + } + + totalLength := totalPrefixLength + totalSuffixLength + dst = resizeNoMemclr(dst, totalLength+padding) + + _ = prefix[:len(suffix)] + _ = suffix[:len(prefix)] + + var lastValue []byte + var i int + var j int + + if cpu.X86.HasAVX2 && len(src) > padding { + k := len(suffix) + n := 0 + + for k > 0 && n < padding { + k-- + n += int(suffix[k]) + } + + if k > 0 && n >= padding { + if size == 16 { + i = decodeByteArrayAVX2x128bits(dst, src, prefix[:k], suffix[:k]) + } else { + i = decodeByteArrayAVX2(dst, src, prefix[:k], suffix[:k]) + } + j = len(src) - n + prefix = prefix[k:] + suffix = suffix[k:] + if i >= size { + lastValue = dst[i-size:] + } + } + } + + for k := range prefix { + p := int(prefix[k]) + n := int(suffix[k]) + k := i + i += copy(dst[i:], lastValue[:p]) + i += copy(dst[i:], src[j:j+n]) + j += n + lastValue = dst[k:] + } + + return dst[:totalLength], nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.s new file mode 100644 index 00000000000..b8b70983210 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_amd64.s @@ -0,0 +1,243 @@ +//go:build !purego + +#include "funcdata.h" +#include "textflag.h" + +// func validatePrefixAndSuffixLengthValuesAVX2(prefix, suffix []int32, maxLength int) (totalPrefixLength, totalSuffixLength int, ok bool) +TEXT ·validatePrefixAndSuffixLengthValuesAVX2(SB), NOSPLIT, $0-73 + MOVQ prefix_base+0(FP), AX + MOVQ suffix_base+24(FP), BX + MOVQ suffix_len+32(FP), CX + MOVQ maxLength+48(FP), DX + + XORQ SI, SI + XORQ DI, DI // lastValueLength + XORQ R8, R8 + XORQ R9, R9 + XORQ R10, R10 // totalPrefixLength + XORQ R11, R11 // totalSuffixLength + XORQ R12, R12 // ok + + CMPQ CX, $8 + JB test + + MOVQ CX, R13 + SHRQ $3, R13 + SHLQ $3, R13 + + VPXOR X0, X0, X0 // lastValueLengths + VPXOR X1, X1, X1 // totalPrefixLengths + VPXOR X2, X2, X2 // totalSuffixLengths + VPXOR X3, X3, X3 // negative prefix length sentinels + VPXOR X4, X4, X4 // negative suffix length sentinels + VPXOR X5, X5, X5 // prefix length overflow sentinels + VMOVDQU ·rotateLeft32(SB), Y6 + +loopAVX2: + VMOVDQU (AX)(SI*4), Y7 // p + VMOVDQU (BX)(SI*4), Y8 // n + + VPADDD Y7, Y1, Y1 + VPADDD Y8, Y2, Y2 + + VPOR Y7, Y3, Y3 + VPOR Y8, Y4, Y4 + + VPADDD Y7, Y8, Y9 // p + n + VPERMD Y0, Y6, Y10 + VPBLENDD $1, Y10, Y9, Y10 + VPCMPGTD Y10, Y7, Y10 + VPOR Y10, Y5, Y5 + + VMOVDQU Y9, Y0 + ADDQ $8, SI + CMPQ SI, R13 + JNE loopAVX2 + + // If any of the sentinel values has its most significant bit set then one + // of the values was negative or one of the prefixes was greater than the + // length of the previous value, return false. + VPOR Y4, Y3, Y3 + VPOR Y5, Y3, Y3 + VMOVMSKPS Y3, R13 + CMPQ R13, $0 + JNE done + + // We computed 8 sums in parallel for the prefix and suffix arrays, they + // need to be accumulated into single values, which is what these reduction + // steps do. + VPSRLDQ $4, Y1, Y5 + VPSRLDQ $8, Y1, Y6 + VPSRLDQ $12, Y1, Y7 + VPADDD Y5, Y1, Y1 + VPADDD Y6, Y1, Y1 + VPADDD Y7, Y1, Y1 + VPERM2I128 $1, Y1, Y1, Y0 + VPADDD Y0, Y1, Y1 + MOVQ X1, R10 + ANDQ $0x7FFFFFFF, R10 + + VPSRLDQ $4, Y2, Y5 + VPSRLDQ $8, Y2, Y6 + VPSRLDQ $12, Y2, Y7 + VPADDD Y5, Y2, Y2 + VPADDD Y6, Y2, Y2 + VPADDD Y7, Y2, Y2 + VPERM2I128 $1, Y2, Y2, Y0 + VPADDD Y0, Y2, Y2 + MOVQ X2, R11 + ANDQ $0x7FFFFFFF, R11 + + JMP test +loop: + MOVLQSX (AX)(SI*4), R8 + MOVLQSX (BX)(SI*4), R9 + + CMPQ R8, $0 // p < 0 ? + JL done + + CMPQ R9, $0 // n < 0 ? + JL done + + CMPQ R8, DI // p > lastValueLength ? + JG done + + ADDQ R8, R10 + ADDQ R9, R11 + ADDQ R8, DI + ADDQ R9, DI + + INCQ SI +test: + CMPQ SI, CX + JNE loop + + CMPQ R11, DX // totalSuffixLength > maxLength ? + JG done + + MOVB $1, R12 +done: + MOVQ R10, totalPrefixLength+56(FP) + MOVQ R11, totalSuffixLength+64(FP) + MOVB R12, ok+72(FP) + RET + +// func decodeByteArrayOffsets(offsets []uint32, prefix, suffix []int32) +TEXT ·decodeByteArrayOffsets(SB), NOSPLIT, $0-72 + MOVQ offsets_base+0(FP), AX + MOVQ prefix_base+24(FP), BX + MOVQ suffix_base+48(FP), CX + MOVQ suffix_len+56(FP), DX + + XORQ SI, SI + XORQ R10, R10 + JMP test +loop: + MOVL (BX)(SI*4), R8 + MOVL (CX)(SI*4), R9 + MOVL R10, (AX)(SI*4) + ADDL R8, R10 + ADDL R9, R10 + INCQ SI +test: + CMPQ SI, DX + JNE loop + MOVL R10, (AX)(SI*4) + RET + +// func decodeByteArrayAVX2(dst, src []byte, prefix, suffix []int32) int +TEXT ·decodeByteArrayAVX2(SB), NOSPLIT, $0-104 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), BX + MOVQ prefix_base+48(FP), CX + MOVQ suffix_base+72(FP), DX + MOVQ suffix_len+80(FP), DI + + XORQ SI, SI + XORQ R8, R8 + XORQ R9, R9 + MOVQ AX, R10 // last value + + JMP test +loop: + MOVLQZX (CX)(SI*4), R8 // prefix length + MOVLQZX (DX)(SI*4), R9 // suffix length +prefix: + VMOVDQU (R10), Y0 + VMOVDQU Y0, (AX) + CMPQ R8, $32 + JA copyPrefix +suffix: + VMOVDQU (BX), Y1 + VMOVDQU Y1, (AX)(R8*1) + CMPQ R9, $32 + JA copySuffix +next: + MOVQ AX, R10 + ADDQ R9, R8 + LEAQ (AX)(R8*1), AX + LEAQ (BX)(R9*1), BX + INCQ SI +test: + CMPQ SI, DI + JNE loop + MOVQ dst_base+0(FP), BX + SUBQ BX, AX + MOVQ AX, ret+96(FP) + VZEROUPPER + RET +copyPrefix: + MOVQ $32, R12 +copyPrefixLoop: + VMOVDQU (R10)(R12*1), Y0 + VMOVDQU Y0, (AX)(R12*1) + ADDQ $32, R12 + CMPQ R12, R8 + JB copyPrefixLoop + JMP suffix +copySuffix: + MOVQ $32, R12 + LEAQ (AX)(R8*1), R13 +copySuffixLoop: + VMOVDQU (BX)(R12*1), Y1 + VMOVDQU Y1, (R13)(R12*1) + ADDQ $32, R12 + CMPQ R12, R9 + JB copySuffixLoop + JMP next + +// func decodeByteArrayAVX2x128bits(dst, src []byte, prefix, suffix []int32) int +TEXT ·decodeByteArrayAVX2x128bits(SB), NOSPLIT, $0-104 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), BX + MOVQ prefix_base+48(FP), CX + MOVQ suffix_base+72(FP), DX + MOVQ suffix_len+80(FP), DI + + XORQ SI, SI + XORQ R8, R8 + XORQ R9, R9 + VPXOR X0, X0, X0 + + JMP test +loop: + MOVLQZX (CX)(SI*4), R8 // prefix length + MOVLQZX (DX)(SI*4), R9 // suffix length + + VMOVDQU (BX), X1 + VMOVDQU X0, (AX) + VMOVDQU X1, (AX)(R8*1) + VMOVDQU (AX), X0 + + ADDQ R9, R8 + LEAQ (AX)(R8*1), AX + LEAQ (BX)(R9*1), BX + INCQ SI +test: + CMPQ SI, DI + JNE loop + MOVQ dst_base+0(FP), BX + SUBQ BX, AX + MOVQ AX, ret+96(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_purego.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_purego.go new file mode 100644 index 00000000000..972c1feca21 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/byte_array_purego.go @@ -0,0 +1,63 @@ +//go:build purego || !amd64 + +package delta + +func decodeByteArray(dst, src []byte, prefix, suffix []int32, offsets []uint32) ([]byte, []uint32, error) { + _ = prefix[:len(suffix)] + _ = suffix[:len(prefix)] + + var lastValue []byte + for i := range suffix { + n := int(suffix[i]) + p := int(prefix[i]) + if n < 0 { + return dst, offsets, errInvalidNegativeValueLength(n) + } + if n > len(src) { + return dst, offsets, errValueLengthOutOfBounds(n, len(src)) + } + if p < 0 { + return dst, offsets, errInvalidNegativePrefixLength(p) + } + if p > len(lastValue) { + return dst, offsets, errPrefixLengthOutOfBounds(p, len(lastValue)) + } + j := len(dst) + offsets = append(offsets, uint32(j)) + dst = append(dst, lastValue[:p]...) + dst = append(dst, src[:n]...) + lastValue = dst[j:] + src = src[n:] + } + + return dst, append(offsets, uint32(len(dst))), nil +} + +func decodeFixedLenByteArray(dst, src []byte, size int, prefix, suffix []int32) ([]byte, error) { + _ = prefix[:len(suffix)] + _ = suffix[:len(prefix)] + + var lastValue []byte + for i := range suffix { + n := int(suffix[i]) + p := int(prefix[i]) + if n < 0 { + return dst, errInvalidNegativeValueLength(n) + } + if n > len(src) { + return dst, errValueLengthOutOfBounds(n, len(src)) + } + if p < 0 { + return dst, errInvalidNegativePrefixLength(p) + } + if p > len(lastValue) { + return dst, errPrefixLengthOutOfBounds(p, len(lastValue)) + } + j := len(dst) + dst = append(dst, lastValue[:p]...) + dst = append(dst, src[:n]...) + lastValue = dst[j:] + src = src[n:] + } + return dst, nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta.go new file mode 100644 index 00000000000..75a20c8914a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta.go @@ -0,0 +1,99 @@ +package delta + +import ( + "fmt" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type int32Buffer struct { + values []int32 +} + +func (buf *int32Buffer) resize(size int) { + if cap(buf.values) < size { + buf.values = make([]int32, size, 2*size) + } else { + buf.values = buf.values[:size] + } +} + +func (buf *int32Buffer) decode(src []byte) ([]byte, error) { + values, remain, err := decodeInt32(unsafecast.Slice[byte](buf.values[:0]), src) + buf.values = unsafecast.Slice[int32](values) + return remain, err +} + +func (buf *int32Buffer) sum() (sum int32) { + for _, v := range buf.values { + sum += v + } + return sum +} + +var ( + int32BufferPool memory.Pool[int32Buffer] +) + +func getInt32Buffer() *int32Buffer { + return int32BufferPool.Get( + func() *int32Buffer { + return &int32Buffer{ + values: make([]int32, 0, 1024), + } + }, + func(b *int32Buffer) { b.values = b.values[:0] }, + ) +} + +func putInt32Buffer(b *int32Buffer) { + int32BufferPool.Put(b) +} + +func resizeNoMemclr(buf []byte, size int) []byte { + if cap(buf) < size { + return grow(buf, size) + } + return buf[:size] +} + +func resize(buf []byte, size int) []byte { + if cap(buf) < size { + return grow(buf, size) + } + if size > len(buf) { + clear := buf[len(buf):size] + for i := range clear { + clear[i] = 0 + } + } + return buf[:size] +} + +func grow(buf []byte, size int) []byte { + newCap := max(2*cap(buf), size) + newBuf := make([]byte, size, newCap) + copy(newBuf, buf) + return newBuf +} + +func errPrefixAndSuffixLengthMismatch(prefixLength, suffixLength int) error { + return fmt.Errorf("length of prefix and suffix mismatch: %d != %d", prefixLength, suffixLength) +} + +func errInvalidNegativeValueLength(length int) error { + return fmt.Errorf("invalid negative value length: %d", length) +} + +func errInvalidNegativePrefixLength(length int) error { + return fmt.Errorf("invalid negative prefix length: %d", length) +} + +func errValueLengthOutOfBounds(length, maxLength int) error { + return fmt.Errorf("value length is larger than the input size: %d > %d", length, maxLength) +} + +func errPrefixLengthOutOfBounds(length, maxLength int) error { + return fmt.Errorf("prefix length %d is larger than the last value of size %d", length, maxLength) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.go new file mode 100644 index 00000000000..864aeac136f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.go @@ -0,0 +1,16 @@ +//go:build !purego + +package delta + +const ( + padding = 64 +) + +func findNegativeLength(lengths []int32) int { + for _, n := range lengths { + if n < 0 { + return int(n) + } + } + return -1 +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.s new file mode 100644 index 00000000000..e8748a263dc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/delta_amd64.s @@ -0,0 +1,13 @@ +//go:build !purego + +#include "textflag.h" + +GLOBL ·rotateLeft32(SB), RODATA|NOPTR, $32 +DATA ·rotateLeft32+0(SB)/4, $7 +DATA ·rotateLeft32+4(SB)/4, $0 +DATA ·rotateLeft32+8(SB)/4, $1 +DATA ·rotateLeft32+12(SB)/4, $2 +DATA ·rotateLeft32+16(SB)/4, $3 +DATA ·rotateLeft32+20(SB)/4, $4 +DATA ·rotateLeft32+24(SB)/4, $5 +DATA ·rotateLeft32+28(SB)/4, $6 diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array.go new file mode 100644 index 00000000000..65ed6f79be0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array.go @@ -0,0 +1,81 @@ +package delta + +import ( + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type LengthByteArrayEncoding struct { + encoding.NotSupported +} + +func (e *LengthByteArrayEncoding) String() string { + return "DELTA_LENGTH_BYTE_ARRAY" +} + +func (e *LengthByteArrayEncoding) Encoding() format.Encoding { + return format.DeltaLengthByteArray +} + +func (e *LengthByteArrayEncoding) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { + if len(offsets) == 0 { + return dst[:0], nil + } + + length := getInt32Buffer() + defer putInt32Buffer(length) + + length.resize(len(offsets) - 1) + encodeByteArrayLengths(length.values, offsets) + + dst = dst[:0] + dst = encodeInt32(dst, length.values) + dst = append(dst, src...) + return dst, nil +} + +func (e *LengthByteArrayEncoding) DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) { + dst, offsets = dst[:0], offsets[:0] + + length := getInt32Buffer() + defer putInt32Buffer(length) + + src, err := length.decode(src) + if err != nil { + return dst, offsets, e.wrap(err) + } + + if size := len(length.values) + 1; cap(offsets) < size { + offsets = make([]uint32, size, 2*size) + } else { + offsets = offsets[:size] + } + + lastOffset, invalidLength := decodeByteArrayLengths(offsets, length.values) + if invalidLength != 0 { + return dst, offsets, e.wrap(errInvalidNegativeValueLength(int(invalidLength))) + } + if int(lastOffset) > len(src) { + return dst, offsets, e.wrap(errValueLengthOutOfBounds(int(lastOffset), len(src))) + } + + return append(dst, src[:lastOffset]...), offsets, nil +} + +func (e *LengthByteArrayEncoding) EstimateDecodeByteArraySize(src []byte) int { + length := getInt32Buffer() + defer putInt32Buffer(length) + length.decode(src) + return int(length.sum()) +} + +func (e *LengthByteArrayEncoding) CanDecodeInPlace() bool { + return true +} + +func (e *LengthByteArrayEncoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.go new file mode 100644 index 00000000000..905e8516ee9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.go @@ -0,0 +1,9 @@ +//go:build !purego + +package delta + +//go:noescape +func encodeByteArrayLengths(lengths []int32, offsets []uint32) + +//go:noescape +func decodeByteArrayLengths(offsets []uint32, lengths []int32) (lastOffset uint32, invalidLength int32) diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.s new file mode 100644 index 00000000000..bc6292e2a14 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_amd64.s @@ -0,0 +1,122 @@ +//go:build !purego + +#include "textflag.h" + +// func encodeByteArrayLengths(lengths []int32, offsets []uint32) +TEXT ·encodeByteArrayLengths(SB), NOSPLIT, $0-48 + MOVQ lengths_base+0(FP), AX + MOVQ lengths_len+8(FP), CX + MOVQ offsets_base+24(FP), BX + XORQ SI, SI + + CMPQ CX, $4 + JB test + + MOVQ CX, DX + SHRQ $2, DX + SHLQ $2, DX + +loopSSE2: + MOVOU 0(BX)(SI*4), X0 + MOVOU 4(BX)(SI*4), X1 + PSUBL X0, X1 + MOVOU X1, (AX)(SI*4) + ADDQ $4, SI + CMPQ SI, DX + JNE loopSSE2 + JMP test +loop: + MOVL 0(BX)(SI*4), R8 + MOVL 4(BX)(SI*4), R9 + SUBL R8, R9 + MOVL R9, (AX)(SI*4) + INCQ SI +test: + CMPQ SI, CX + JNE loop + RET + +// func decodeByteArrayLengths(offsets []uint32, length []int32) (lastOffset uint32, invalidLength int32) +TEXT ·decodeByteArrayLengths(SB), NOSPLIT, $0-56 + MOVQ offsets_base+0(FP), AX + MOVQ lengths_base+24(FP), BX + MOVQ lengths_len+32(FP), CX + + XORQ DX, DX // lastOffset + XORQ DI, DI // invalidLength + XORQ SI, SI + + CMPQ CX, $4 + JL test + + MOVQ CX, R8 + SHRQ $2, R8 + SHLQ $2, R8 + + MOVL $0, (AX) + PXOR X0, X0 + PXOR X3, X3 + // This loop computes the prefix sum of the lengths array in order to + // generate values of the offsets array. + // + // We stick to SSE2 to keep the code simple (the Go compiler appears to + // assume that SSE2 must be supported on AMD64) which already yields most + // of the performance that we could get on this subroutine if we were using + // AVX2. + // + // The X3 register also accumulates a mask of all length values, which is + // checked after the loop to determine whether any of the lengths were + // negative. + // + // The following article contains a description of the prefix sum algorithm + // used in this function: https://en.algorithmica.org/hpc/algorithms/prefix/ +loopSSE2: + MOVOU (BX)(SI*4), X1 + POR X1, X3 + + MOVOA X1, X2 + PSLLDQ $4, X2 + PADDD X2, X1 + + MOVOA X1, X2 + PSLLDQ $8, X2 + PADDD X2, X1 + + PADDD X1, X0 + MOVOU X0, 4(AX)(SI*4) + + PSHUFD $0b11111111, X0, X0 + + ADDQ $4, SI + CMPQ SI, R8 + JNE loopSSE2 + + // If any of the most significant bits of double words in the X3 register + // are set to 1, it indicates that one of the lengths was negative and + // therefore the prefix sum is invalid. + // + // TODO: we report the invalid length as -1, effectively losing the original + // value due to the aggregation within X3. This is something that we might + // want to address in the future to provide better error reporting. + MOVMSKPS X3, R8 + MOVL $-1, R9 + CMPL R8, $0 + CMOVLNE R9, DI + + MOVQ X0, DX + JMP test +loop: + MOVL (BX)(SI*4), R8 + MOVL DX, (AX)(SI*4) + ADDL R8, DX + CMPL R8, $0 + CMOVLLT R8, DI + INCQ SI +test: + CMPQ SI, CX + JNE loop + + MOVL DX, (AX)(SI*4) + MOVL DX, lastOffset+48(FP) + MOVL DI, invalidLength+52(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_purego.go b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_purego.go new file mode 100644 index 00000000000..0c0fb6baeb9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/delta/length_byte_array_purego.go @@ -0,0 +1,24 @@ +//go:build purego || !amd64 + +package delta + +func encodeByteArrayLengths(lengths []int32, offsets []uint32) { + for i := range lengths { + lengths[i] = int32(offsets[i+1] - offsets[i]) + } +} + +func decodeByteArrayLengths(offsets []uint32, lengths []int32) (uint32, int32) { + lastOffset := uint32(0) + + for i, n := range lengths { + if n < 0 { + return lastOffset, n + } + offsets[i] = lastOffset + lastOffset += uint32(n) + } + + offsets[len(lengths)] = lastOffset + return lastOffset, 0 +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/encoding.go b/vendor/github.com/parquet-go/parquet-go/encoding/encoding.go new file mode 100644 index 00000000000..a919f591729 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/encoding.go @@ -0,0 +1,72 @@ +// Package encoding provides the generic APIs implemented by parquet encodings +// in its sub-packages. +package encoding + +import ( + "math" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/format" +) + +const ( + MaxFixedLenByteArraySize = math.MaxInt16 +) + +// The Encoding interface is implemented by types representing parquet column +// encodings. +// +// Encoding instances must be safe to use concurrently from multiple goroutines. +type Encoding interface { + // Returns a human-readable name for the encoding. + String() string + + // Returns the parquet code representing the encoding. + Encoding() format.Encoding + + // Encode methods serialize the source sequence of values into the + // destination buffer, potentially reallocating it if it was too short to + // contain the output. + // + // The methods panic if the type of src values differ from the type of + // values being encoded. + EncodeLevels(dst []byte, src []uint8) ([]byte, error) + EncodeBoolean(dst []byte, src []byte) ([]byte, error) + EncodeInt32(dst []byte, src []int32) ([]byte, error) + EncodeInt64(dst []byte, src []int64) ([]byte, error) + EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) + EncodeFloat(dst []byte, src []float32) ([]byte, error) + EncodeDouble(dst []byte, src []float64) ([]byte, error) + EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) + EncodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) + + // Decode methods deserialize from the source buffer into the destination + // slice, potentially growing it if it was too short to contain the result. + // + // The methods panic if the type of dst values differ from the type of + // values being decoded. + DecodeLevels(dst []uint8, src []byte) ([]uint8, error) + DecodeBoolean(dst []byte, src []byte) ([]byte, error) + DecodeInt32(dst []int32, src []byte) ([]int32, error) + DecodeInt64(dst []int64, src []byte) ([]int64, error) + DecodeInt96(dst []deprecated.Int96, src []byte) ([]deprecated.Int96, error) + DecodeFloat(dst []float32, src []byte) ([]float32, error) + DecodeDouble(dst []float64, src []byte) ([]float64, error) + DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) + DecodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) + + // Computes an estimation of the output size of decoding the encoded page + // of values passed as argument. + // + // Note that this is an estimate, it is useful to preallocate the output + // buffer that will be passed to the decode method, but the actual output + // size may be different. + // + // The estimate never errors since it is not intended to be used as an + // input validation method. + EstimateDecodeByteArraySize(src []byte) int + + // When this method returns true, the encoding supports receiving the same + // buffer as source and destination. + CanDecodeInPlace() bool +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/notsupported.go b/vendor/github.com/parquet-go/parquet-go/encoding/notsupported.go new file mode 100644 index 00000000000..59215330d1f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/notsupported.go @@ -0,0 +1,213 @@ +package encoding + +import ( + "errors" + "fmt" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/format" +) + +var ( + // ErrNotSupported is an error returned when the underlying encoding does + // not support the type of values being encoded or decoded. + // + // This error may be wrapped with type information, applications must use + // errors.Is rather than equality comparisons to test the error values + // returned by encoders and decoders. + ErrNotSupported = errors.New("encoding not supported") + + // ErrInvalidArgument is an error returned one or more arguments passed to + // the encoding functions are incorrect. + // + // As with ErrNotSupported, this error may be wrapped with specific + // information about the problem and applications are expected to use + // errors.Is for comparisons. + ErrInvalidArgument = errors.New("invalid argument") +) + +// Error constructs an error which wraps err and indicates that it originated +// from the given encoding. +func Error(e Encoding, err error) error { + return fmt.Errorf("%s: %w", e, err) +} + +// Errorf is like Error but constructs the error message from the given format +// and arguments. +func Errorf(e Encoding, msg string, args ...any) error { + return Error(e, fmt.Errorf(msg, args...)) +} + +// ErrEncodeInvalidInputSize constructs an error indicating that encoding failed +// due to the size of the input. +func ErrEncodeInvalidInputSize(e Encoding, typ string, size int) error { + return errInvalidInputSize(e, "encode", typ, size) +} + +// ErrDecodeInvalidInputSize constructs an error indicating that decoding failed +// due to the size of the input. +func ErrDecodeInvalidInputSize(e Encoding, typ string, size int) error { + return errInvalidInputSize(e, "decode", typ, size) +} + +func errInvalidInputSize(e Encoding, op, typ string, size int) error { + return Errorf(e, "cannot %s %s from input of size %d: %w", op, typ, size, ErrInvalidArgument) +} + +// CanEncodeInt8 reports whether e can encode LEVELS values. +func CanEncodeLevels(e Encoding) bool { + _, err := e.EncodeLevels(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeBoolean reports whether e can encode BOOLEAN values. +func CanEncodeBoolean(e Encoding) bool { + _, err := e.EncodeBoolean(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeInt32 reports whether e can encode INT32 values. +func CanEncodeInt32(e Encoding) bool { + _, err := e.EncodeInt32(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeInt64 reports whether e can encode INT64 values. +func CanEncodeInt64(e Encoding) bool { + _, err := e.EncodeInt64(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeInt96 reports whether e can encode INT96 values. +func CanEncodeInt96(e Encoding) bool { + _, err := e.EncodeInt96(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeFloat reports whether e can encode FLOAT values. +func CanEncodeFloat(e Encoding) bool { + _, err := e.EncodeFloat(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeDouble reports whether e can encode DOUBLE values. +func CanEncodeDouble(e Encoding) bool { + _, err := e.EncodeDouble(nil, nil) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeByteArray reports whether e can encode BYTE_ARRAY values. +func CanEncodeByteArray(e Encoding) bool { + _, err := e.EncodeByteArray(nil, nil, zeroOffsets[:]) + return !errors.Is(err, ErrNotSupported) +} + +// CanEncodeFixedLenByteArray reports whether e can encode +// FIXED_LEN_BYTE_ARRAY values. +func CanEncodeFixedLenByteArray(e Encoding) bool { + _, err := e.EncodeFixedLenByteArray(nil, nil, 1) + return !errors.Is(err, ErrNotSupported) +} + +var zeroOffsets [1]uint32 + +// NotSupported is a type satisfying the Encoding interface which does not +// support encoding nor decoding any value types. +type NotSupported struct { +} + +func (NotSupported) String() string { + return "NOT_SUPPORTED" +} + +func (NotSupported) Encoding() format.Encoding { + return -1 +} + +func (NotSupported) EncodeLevels(dst []byte, src []uint8) ([]byte, error) { + return dst[:0], errNotSupported("LEVELS") +} + +func (NotSupported) EncodeBoolean(dst []byte, src []byte) ([]byte, error) { + return dst[:0], errNotSupported("BOOLEAN") +} + +func (NotSupported) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + return dst[:0], errNotSupported("INT32") +} + +func (NotSupported) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + return dst[:0], errNotSupported("INT64") +} + +func (NotSupported) EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) { + return dst[:0], errNotSupported("INT96") +} + +func (NotSupported) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + return dst[:0], errNotSupported("FLOAT") +} + +func (NotSupported) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + return dst[:0], errNotSupported("DOUBLE") +} + +func (NotSupported) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { + return dst[:0], errNotSupported("BYTE_ARRAY") +} + +func (NotSupported) EncodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + return dst[:0], errNotSupported("FIXED_LEN_BYTE_ARRAY") +} + +func (NotSupported) DecodeLevels(dst []uint8, src []byte) ([]uint8, error) { + return dst, errNotSupported("LEVELS") +} + +func (NotSupported) DecodeBoolean(dst []byte, src []byte) ([]byte, error) { + return dst, errNotSupported("BOOLEAN") +} + +func (NotSupported) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + return dst, errNotSupported("INT32") +} + +func (NotSupported) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + return dst, errNotSupported("INT64") +} + +func (NotSupported) DecodeInt96(dst []deprecated.Int96, src []byte) ([]deprecated.Int96, error) { + return dst, errNotSupported("INT96") +} + +func (NotSupported) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + return dst, errNotSupported("FLOAT") +} + +func (NotSupported) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + return dst, errNotSupported("DOUBLE") +} + +func (NotSupported) DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) { + return dst, offsets, errNotSupported("BYTE_ARRAY") +} + +func (NotSupported) DecodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + return dst, errNotSupported("FIXED_LEN_BYTE_ARRAY") +} + +func (NotSupported) EstimateDecodeByteArraySize(src []byte) int { + return 0 +} + +func (NotSupported) CanDecodeInPlace() bool { + return false +} + +func errNotSupported(typ string) error { + return fmt.Errorf("%w for type %s", ErrNotSupported, typ) +} + +var ( + _ Encoding = NotSupported{} +) diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/dictionary.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/dictionary.go new file mode 100644 index 00000000000..4946a79293d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/dictionary.go @@ -0,0 +1,27 @@ +package plain + +import ( + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type DictionaryEncoding struct { + encoding.NotSupported + plain Encoding +} + +func (e *DictionaryEncoding) String() string { + return "PLAIN_DICTIONARY" +} + +func (e *DictionaryEncoding) Encoding() format.Encoding { + return format.PlainDictionary +} + +func (e *DictionaryEncoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + return e.plain.EncodeInt32(dst, src) +} + +func (e *DictionaryEncoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + return e.plain.DecodeInt32(dst, src) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go new file mode 100644 index 00000000000..c16aa2b7af5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go @@ -0,0 +1,246 @@ +// Package plain implements the PLAIN parquet encoding. +// +// https://github.com/apache/parquet-format/blob/master/Encodings.md#plain-plain--0 +package plain + +import ( + "encoding/binary" + "fmt" + "io" + "math" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +const ( + ByteArrayLengthSize = 4 + MaxByteArrayLength = math.MaxInt32 +) + +type Encoding struct { + encoding.NotSupported +} + +func (e *Encoding) String() string { + return "PLAIN" +} + +func (e *Encoding) Encoding() format.Encoding { + return format.Plain +} + +func (e *Encoding) EncodeBoolean(dst []byte, src []byte) ([]byte, error) { + return append(dst[:0], src...), nil +} + +func (e *Encoding) EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { + dst = dst[:0] + + if len(offsets) > 0 { + baseOffset := offsets[0] + + for _, endOffset := range offsets[1:] { + dst = AppendByteArray(dst, src[baseOffset:endOffset:endOffset]) + baseOffset = endOffset + } + } + + return dst, nil +} + +func (e *Encoding) EncodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + if size < 0 || size > encoding.MaxFixedLenByteArraySize { + return dst[:0], encoding.Error(e, encoding.ErrInvalidArgument) + } + return append(dst[:0], src...), nil +} + +func (e *Encoding) DecodeBoolean(dst []byte, src []byte) ([]byte, error) { + return append(dst[:0], src...), nil +} + +func (e *Encoding) DecodeInt96(dst []deprecated.Int96, src []byte) ([]deprecated.Int96, error) { + if (len(src) % 12) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT96", len(src)) + } + return append(dst[:0], unsafecast.Slice[deprecated.Int96](src)...), nil +} + +func (e *Encoding) DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) { + dst, offsets = dst[:0], offsets[:0] + + for i := 0; i < len(src); { + if (len(src) - i) < ByteArrayLengthSize { + return dst, offsets, ErrTooShort(len(src)) + } + n := ByteArrayLength(src[i:]) + if n > (len(src) - ByteArrayLengthSize) { + return dst, offsets, ErrTooShort(len(src)) + } + i += ByteArrayLengthSize + offsets = append(offsets, uint32(len(dst))) + dst = append(dst, src[i:i+n]...) + i += n + } + + return dst, append(offsets, uint32(len(dst))), nil +} + +func (e *Encoding) DecodeFixedLenByteArray(dst []byte, src []byte, size int) ([]byte, error) { + if size < 0 || size > encoding.MaxFixedLenByteArraySize { + return dst, encoding.Error(e, encoding.ErrInvalidArgument) + } + if (len(src) % size) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FIXED_LEN_BYTE_ARRAY", len(src)) + } + return append(dst[:0], src...), nil +} + +func (e *Encoding) EstimateDecodeByteArraySize(src []byte) int { + return len(src) +} + +func (e *Encoding) CanDecodeInPlace() bool { + return true +} + +func Boolean(v bool) []byte { return AppendBoolean(nil, 0, v) } + +func Int32(v int32) []byte { return AppendInt32(nil, v) } + +func Int64(v int64) []byte { return AppendInt64(nil, v) } + +func Int96(v deprecated.Int96) []byte { return AppendInt96(nil, v) } + +func Float(v float32) []byte { return AppendFloat(nil, v) } + +func Double(v float64) []byte { return AppendDouble(nil, v) } + +func ByteArray(v []byte) []byte { return AppendByteArray(nil, v) } + +func AppendBoolean(b []byte, n int, v bool) []byte { + i := n / 8 + j := n % 8 + + if cap(b) > i { + b = b[:i+1] + } else { + tmp := make([]byte, i+1, 2*(i+1)) + copy(tmp, b) + b = tmp + } + + k := uint(j) + x := byte(0) + if v { + x = 1 + } + + b[i] = (b[i] & ^(1 << k)) | (x << k) + return b +} + +func AppendInt32(b []byte, v int32) []byte { + x := [4]byte{} + binary.LittleEndian.PutUint32(x[:], uint32(v)) + return append(b, x[:]...) +} + +func AppendInt64(b []byte, v int64) []byte { + x := [8]byte{} + binary.LittleEndian.PutUint64(x[:], uint64(v)) + return append(b, x[:]...) +} + +func AppendInt96(b []byte, v deprecated.Int96) []byte { + x := [12]byte{} + binary.LittleEndian.PutUint32(x[0:4], v[0]) + binary.LittleEndian.PutUint32(x[4:8], v[1]) + binary.LittleEndian.PutUint32(x[8:12], v[2]) + return append(b, x[:]...) +} + +func AppendFloat(b []byte, v float32) []byte { + x := [4]byte{} + binary.LittleEndian.PutUint32(x[:], math.Float32bits(v)) + return append(b, x[:]...) +} + +func AppendDouble(b []byte, v float64) []byte { + x := [8]byte{} + binary.LittleEndian.PutUint64(x[:], math.Float64bits(v)) + return append(b, x[:]...) +} + +func AppendByteArray(b, v []byte) []byte { + length := [ByteArrayLengthSize]byte{} + PutByteArrayLength(length[:], len(v)) + b = append(b, length[:]...) + b = append(b, v...) + return b +} + +func AppendByteArrayString(b []byte, v string) []byte { + length := [ByteArrayLengthSize]byte{} + PutByteArrayLength(length[:], len(v)) + b = append(b, length[:]...) + b = append(b, v...) + return b +} + +func AppendByteArrayLength(b []byte, n int) []byte { + length := [ByteArrayLengthSize]byte{} + PutByteArrayLength(length[:], n) + return append(b, length[:]...) +} + +func ByteArrayLength(b []byte) int { + return int(binary.LittleEndian.Uint32(b)) +} + +func PutByteArrayLength(b []byte, n int) { + binary.LittleEndian.PutUint32(b, uint32(n)) +} + +func RangeByteArray(b []byte, do func([]byte) error) (err error) { + for len(b) > 0 { + var v []byte + if v, b, err = NextByteArray(b); err != nil { + return err + } + if err = do(v); err != nil { + return err + } + } + return nil +} + +func NextByteArray(b []byte) (v, r []byte, err error) { + if len(b) < ByteArrayLengthSize { + return nil, b, ErrTooShort(len(b)) + } + n := ByteArrayLength(b) + if n > (len(b) - ByteArrayLengthSize) { + return nil, b, ErrTooShort(len(b)) + } + if n > MaxByteArrayLength { + return nil, b, ErrTooLarge(n) + } + n += ByteArrayLengthSize + return b[ByteArrayLengthSize:n:n], b[n:len(b):len(b)], nil +} + +func ErrTooShort(length int) error { + return fmt.Errorf("input of length %d is too short to contain a PLAIN encoded byte array value: %w", length, io.ErrUnexpectedEOF) +} + +func ErrTooLarge(length int) error { + return fmt.Errorf("byte array of length %d is too large to be encoded", length) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go new file mode 100644 index 00000000000..6c8c9000b52 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go @@ -0,0 +1,113 @@ +//go:build s390x + +package plain + +import ( + "encoding/binary" + "math" + + "github.com/parquet-go/parquet-go/encoding" +) + +// TODO: optimize by doing the byte swap in the output slice instead of +// allocating a temporay buffer. + +func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 4)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], uint32((src)[k])) + idx += 4 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 8)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], uint64((src)[k])) + idx += 8 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 4)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], math.Float32bits((src)[k])) + idx += 4 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 8)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], math.Float64bits((src)[k])) + idx += 8 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT32", len(src)) + } + srcLen := (len(src) / 4) + byteDec := make([]int32, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = int32(binary.LittleEndian.Uint32((src)[idx:(4 + idx)])) + idx += 4 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT64", len(src)) + } + srcLen := (len(src) / 8) + byteDec := make([]int64, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = int64(binary.LittleEndian.Uint64((src)[idx:(8 + idx)])) + idx += 8 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) + } + srcLen := (len(src) / 4) + byteDec := make([]float32, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = float32(math.Float32frombits(binary.LittleEndian.Uint32((src)[idx:(4 + idx)]))) + idx += 4 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) + } + srcLen := (len(src) / 8) + byteDec := make([]float64, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = float64(math.Float64frombits(binary.LittleEndian.Uint64((src)[idx:(8 + idx)]))) + idx += 8 + } + return append(dst[:0], (byteDec)...), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go new file mode 100644 index 00000000000..97772a1cdcc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go @@ -0,0 +1,52 @@ +//go:build !s390x + +package plain + +import ( + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" +) + +func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT32", len(src)) + } + return append(dst[:0], unsafecast.Slice[int32](src)...), nil +} + +func (e *Encoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT64", len(src)) + } + return append(dst[:0], unsafecast.Slice[int64](src)...), nil +} + +func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) + } + return append(dst[:0], unsafecast.Slice[float32](src)...), nil +} + +func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) + } + return append(dst[:0], unsafecast.Slice[float64](src)...), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/rle/dictionary.go b/vendor/github.com/parquet-go/parquet-go/encoding/rle/dictionary.go new file mode 100644 index 00000000000..b21fda912ca --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/rle/dictionary.go @@ -0,0 +1,59 @@ +package rle + +import ( + "math/bits" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type DictionaryEncoding struct { + encoding.NotSupported +} + +func (e *DictionaryEncoding) String() string { + return "RLE_DICTIONARY" +} + +func (e *DictionaryEncoding) Encoding() format.Encoding { + return format.RLEDictionary +} + +func (e *DictionaryEncoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + bitWidth := maxLenInt32(src) + dst = append(dst[:0], byte(bitWidth)) + dst, err := encodeInt32(dst, src, uint(bitWidth)) + return dst, e.wrap(err) +} + +func (e *DictionaryEncoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + if len(src) == 0 { + return dst[:0], nil + } + buf := unsafecast.Slice[byte](dst) + buf, err := decodeInt32(buf[:0], src[1:], uint(src[0])) + return unsafecast.Slice[int32](buf), e.wrap(err) +} + +func (e *DictionaryEncoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} + +func clearInt32(data []int32) { + for i := range data { + data[i] = 0 + } +} + +func maxLenInt32(data []int32) (max int) { + for _, v := range data { + if n := bits.Len32(uint32(v)); n > max { + max = n + } + } + return max +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle.go b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle.go new file mode 100644 index 00000000000..1ef0e43370c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle.go @@ -0,0 +1,570 @@ +// Package rle implements the hybrid RLE/Bit-Packed encoding employed in +// repetition and definition levels, dictionary indexed data pages, and +// boolean values in the PLAIN encoding. +// +// https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 +package rle + +import ( + "encoding/binary" + "fmt" + "io" + "unsafe" + + "golang.org/x/sys/cpu" + + "github.com/parquet-go/bitpack" + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/bytealg" +) + +const ( + // This limit is intended to prevent unbounded memory allocations when + // decoding runs. + // + // We use a generous limit which allows for over 16 million values per page + // if there is only one run to encode the repetition or definition levels + // (this should be uncommon). + maxSupportedValueCount = 16 * 1024 * 1024 +) + +type Encoding struct { + encoding.NotSupported + BitWidth int +} + +func (e *Encoding) String() string { + return "RLE" +} + +func (e *Encoding) Encoding() format.Encoding { + return format.RLE +} + +func (e *Encoding) EncodeLevels(dst []byte, src []uint8) ([]byte, error) { + dst, err := encodeBytes(dst[:0], src, uint(e.BitWidth)) + return dst, e.wrap(err) +} + +func (e *Encoding) EncodeBoolean(dst []byte, src []byte) ([]byte, error) { + // In the case of encoding a boolean values, the 4 bytes length of the + // output is expected by the parquet format. We add the bytes as placeholder + // before appending the encoded data. + dst = append(dst[:0], 0, 0, 0, 0) + dst, err := encodeBits(dst, src) + binary.LittleEndian.PutUint32(dst, uint32(len(dst))-4) + return dst, e.wrap(err) +} + +func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + dst, err := encodeInt32(dst[:0], src, uint(e.BitWidth)) + return dst, e.wrap(err) +} + +func (e *Encoding) DecodeLevels(dst []uint8, src []byte) ([]uint8, error) { + dst, err := decodeBytes(dst[:0], src, uint(e.BitWidth)) + return dst, e.wrap(err) +} + +func (e *Encoding) DecodeBoolean(dst []byte, src []byte) ([]byte, error) { + if len(src) == 4 { + return dst[:0], nil + } + if len(src) < 4 { + return dst[:0], fmt.Errorf("input shorter than 4 bytes: %w", io.ErrUnexpectedEOF) + } + n := int(binary.LittleEndian.Uint32(src)) + src = src[4:] + if n > len(src) { + return dst[:0], fmt.Errorf("input shorter than length prefix: %d < %d: %w", len(src), n, io.ErrUnexpectedEOF) + } + dst, err := decodeBits(dst[:0], src[:n]) + return dst, e.wrap(err) +} + +func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + buf := unsafecast.Slice[byte](dst) + buf, err := decodeInt32(buf[:0], src, uint(e.BitWidth)) + return unsafecast.Slice[int32](buf), e.wrap(err) +} + +func (e *Encoding) wrap(err error) error { + if err != nil { + err = encoding.Error(e, err) + } + return err +} + +func encodeBits(dst, src []byte) ([]byte, error) { + if len(src) == 0 || isZero(src) || isOnes(src) { + dst = appendUvarint(dst, uint64(8*len(src))<<1) + if len(src) > 0 { + dst = append(dst, src[0]) + } + return dst, nil + } + + for i := 0; i < len(src); { + j := i + 1 + + // Look for contiguous sections of 8 bits, all zeros or ones; these + // are run-length encoded as it only takes 2 or 3 bytes to store these + // sequences. + if src[i] == 0 || src[i] == 0xFF { + for j < len(src) && src[i] == src[j] { + j++ + } + + if n := j - i; n > 1 { + dst = appendRunLengthBits(dst, 8*n, src[i]) + i = j + continue + } + } + + // Sequences of bits that are neither all zeroes or ones are bit-packed, + // which is a simple copy of the input to the output preceded with the + // bit-pack header. + for j < len(src) && (src[j-1] != src[j] || (src[j] != 0 && src[j] == 0xFF)) { + j++ + } + + if (j-i) > 1 && j < len(src) { + j-- + } + + dst = appendBitPackedBits(dst, src[i:j]) + i = j + } + return dst, nil +} + +func encodeBytes(dst, src []byte, bitWidth uint) ([]byte, error) { + if bitWidth > 8 { + return dst, errEncodeInvalidBitWidth("INT8", bitWidth) + } + if bitWidth == 0 { + if !isZero(src) { + return dst, errEncodeInvalidBitWidth("INT8", bitWidth) + } + return appendUvarint(dst, uint64(len(src))<<1), nil + } + + if len(src) >= 8 { + words := unsafecast.Slice[uint64](src) + if cpu.IsBigEndian { + srcLen := (len(src) / 8) + idx := 0 + for k := range srcLen { + words[k] = binary.LittleEndian.Uint64((src)[idx:(8 + idx)]) + idx += 8 + } + } else { + words = unsafe.Slice((*uint64)(unsafe.Pointer(&src[0])), len(src)/8) + } + + for i := 0; i < len(words); { + j := i + pattern := broadcast8x1(words[i]) + + for j < len(words) && words[j] == pattern { + j++ + } + + if i < j { + dst = appendRunLengthBytes(dst, 8*(j-i), byte(pattern)) + } else { + j++ + + for j < len(words) && words[j] != broadcast8x1(words[j-1]) { + j++ + } + + dst = appendBitPackedBytes(dst, words[i:j], bitWidth) + } + + i = j + } + } + + for i := (len(src) / 8) * 8; i < len(src); { + j := i + 1 + + for j < len(src) && src[i] == src[j] { + j++ + } + + dst = appendRunLengthBytes(dst, j-i, src[i]) + i = j + } + + return dst, nil +} + +func encodeInt32(dst []byte, src []int32, bitWidth uint) ([]byte, error) { + if bitWidth > 32 { + return dst, errEncodeInvalidBitWidth("INT32", bitWidth) + } + if bitWidth == 0 { + if !isZero(unsafecast.Slice[byte](src)) { + return dst, errEncodeInvalidBitWidth("INT32", bitWidth) + } + return appendUvarint(dst, uint64(len(src))<<1), nil + } + + if len(src) >= 8 { + words := unsafecast.Slice[[8]int32](src) + + for i := 0; i < len(words); { + j := i + pattern := broadcast8x4(words[i][0]) + + for j < len(words) && words[j] == pattern { + j++ + } + + if i < j { + dst = appendRunLengthInt32(dst, 8*(j-i), pattern[0], bitWidth) + } else { + j += 1 + j += encodeInt32IndexEqual8Contiguous(words[j:]) + dst = appendBitPackedInt32(dst, words[i:j], bitWidth) + } + + i = j + } + } + + for i := (len(src) / 8) * 8; i < len(src); { + j := i + 1 + + for j < len(src) && src[i] == src[j] { + j++ + } + + dst = appendRunLengthInt32(dst, j-i, src[i], bitWidth) + i = j + } + + return dst, nil +} + +func decodeBits(dst, src []byte) ([]byte, error) { + for i := 0; i < len(src); { + u, n := binary.Uvarint(src[i:]) + if n == 0 { + return dst, fmt.Errorf("decoding run-length block header: %w", io.ErrUnexpectedEOF) + } + if n < 0 { + return dst, fmt.Errorf("overflow after decoding %d/%d bytes of run-length block header", -n+i, len(src)) + } + i += n + + count, bitpacked := uint(u>>1), (u&1) != 0 + if count > maxSupportedValueCount { + return dst, fmt.Errorf("decoded run-length block cannot have more than %d values", maxSupportedValueCount) + } + if bitpacked { + n := int(count) + j := i + n + + if j > len(src) { + return dst, fmt.Errorf("decoding bit-packed block of %d values: %w", n, io.ErrUnexpectedEOF) + } + + dst = append(dst, src[i:j]...) + i = j + } else { + word := byte(0) + if i < len(src) { + word = src[i] + i++ + } + + offset := len(dst) + length := bitpack.ByteCount(count) + dst = resize(dst, offset+length) + bytealg.Broadcast(dst[offset:], word) + } + } + return dst, nil +} + +func decodeBytes(dst, src []byte, bitWidth uint) ([]byte, error) { + if bitWidth > 8 { + return dst, errDecodeInvalidBitWidth("INT8", bitWidth) + } + + for i := 0; i < len(src); { + u, n := binary.Uvarint(src[i:]) + if n == 0 { + return dst, fmt.Errorf("decoding run-length block header: %w", io.ErrUnexpectedEOF) + } + if n < 0 { + return dst, fmt.Errorf("overflow after decoding %d/%d bytes of run-length block header", -n+i, len(src)) + } + i += n + + count, bitpacked := uint(u>>1), (u&1) != 0 + if count > maxSupportedValueCount { + return dst, fmt.Errorf("decoded run-length block cannot have more than %d values", maxSupportedValueCount) + } + if bitpacked { + count *= 8 + j := i + bitpack.ByteCount(count*bitWidth) + + if j > len(src) { + return dst, fmt.Errorf("decoding bit-packed block of %d values: %w", 8*count, io.ErrUnexpectedEOF) + } + + offset := len(dst) + length := int(count) + dst = resize(dst, offset+length) + decodeBytesBitpack(dst[offset:], src[i:j], count, bitWidth) + + i = j + } else { + if bitWidth != 0 && (i+1) > len(src) { + return dst, fmt.Errorf("decoding run-length block of %d values: %w", count, io.ErrUnexpectedEOF) + } + + word := byte(0) + if bitWidth != 0 { + word = src[i] + i++ + } + + offset := len(dst) + length := int(count) + dst = resize(dst, offset+length) + bytealg.Broadcast(dst[offset:], word) + } + } + + return dst, nil +} + +func decodeInt32(dst, src []byte, bitWidth uint) ([]byte, error) { + if bitWidth > 32 { + return dst, errDecodeInvalidBitWidth("INT32", bitWidth) + } + + buf := make([]byte, 2*bitpack.PaddingInt32) + + for i := 0; i < len(src); { + u, n := binary.Uvarint(src[i:]) + if n == 0 { + return dst, fmt.Errorf("decoding run-length block header: %w", io.ErrUnexpectedEOF) + } + if n < 0 { + return dst, fmt.Errorf("overflow after decoding %d/%d bytes of run-length block header", -n+i, len(src)) + } + i += n + + count, bitpacked := uint(u>>1), (u&1) != 0 + if count > maxSupportedValueCount { + return dst, fmt.Errorf("decoded run-length block cannot have more than %d values", maxSupportedValueCount) + } + if bitpacked { + offset := len(dst) + length := int(count * bitWidth) + dst = resize(dst, offset+4*8*int(count)) + + // The bitpack.UnpackInt32 function requires the input to be padded + // or the function panics. If there is enough room in the input + // buffer we can use it, otherwise we have to copy it to a larger + // location (which should rarely happen). + in := src[i : i+length] + if (cap(in) - len(in)) >= bitpack.PaddingInt32 { + in = in[:cap(in)] + } else { + buf = resize(buf, len(in)+bitpack.PaddingInt32) + copy(buf, in) + in = buf + } + + out := unsafecast.Slice[int32](dst[offset:]) + bitpack.Unpack(out, in, bitWidth) + i += length + } else { + j := i + bitpack.ByteCount(bitWidth) + + if j > len(src) { + return dst, fmt.Errorf("decoding run-length block of %d values: %w", count, io.ErrUnexpectedEOF) + } + + bits := [4]byte{} + copy(bits[:], src[i:j]) + + //swap the bytes in the "bits" array to take care of big endian arch + if cpu.IsBigEndian { + for m, n := 0, 3; m < n; m, n = m+1, n-1 { + bits[m], bits[n] = bits[n], bits[m] + } + } + dst = appendRepeat(dst, bits[:], count) + i = j + } + } + + return dst, nil +} + +func errEncodeInvalidBitWidth(typ string, bitWidth uint) error { + return errInvalidBitWidth("encode", typ, bitWidth) +} + +func errDecodeInvalidBitWidth(typ string, bitWidth uint) error { + return errInvalidBitWidth("decode", typ, bitWidth) +} + +func errInvalidBitWidth(op, typ string, bitWidth uint) error { + return fmt.Errorf("cannot %s %s with invalid bit-width=%d", op, typ, bitWidth) +} + +func appendRepeat(dst, pattern []byte, count uint) []byte { + offset := len(dst) + length := int(count) * len(pattern) + dst = resize(dst, offset+length) + i := offset + copy(dst[offset:], pattern) + for i < len(dst) { + i += copy(dst[i:], dst[offset:i]) + } + return dst +} + +func appendUvarint(dst []byte, u uint64) []byte { + var b [binary.MaxVarintLen64]byte + var n = binary.PutUvarint(b[:], u) + return append(dst, b[:n]...) +} + +func appendRunLengthBits(dst []byte, count int, value byte) []byte { + return appendRunLengthBytes(dst, count, value) +} + +func appendBitPackedBits(dst []byte, words []byte) []byte { + n := len(dst) + dst = resize(dst, n+binary.MaxVarintLen64+len(words)) + n += binary.PutUvarint(dst[n:], uint64(len(words)<<1)|1) + n += copy(dst[n:], words) + return dst[:n] +} + +func appendRunLengthBytes(dst []byte, count int, value byte) []byte { + n := len(dst) + dst = resize(dst, n+binary.MaxVarintLen64+1) + n += binary.PutUvarint(dst[n:], uint64(count)<<1) + dst[n] = value + return dst[:n+1] +} + +func appendBitPackedBytes(dst []byte, words []uint64, bitWidth uint) []byte { + n := len(dst) + dst = resize(dst, n+binary.MaxVarintLen64+(len(words)*int(bitWidth))+8) + n += binary.PutUvarint(dst[n:], uint64(len(words)<<1)|1) + n += encodeBytesBitpack(dst[n:], words, bitWidth) + return dst[:n] +} + +func appendRunLengthInt32(dst []byte, count int, value int32, bitWidth uint) []byte { + n := len(dst) + dst = resize(dst, n+binary.MaxVarintLen64+4) + n += binary.PutUvarint(dst[n:], uint64(count)<<1) + binary.LittleEndian.PutUint32(dst[n:], uint32(value)) + return dst[:n+bitpack.ByteCount(bitWidth)] +} + +func appendBitPackedInt32(dst []byte, words [][8]int32, bitWidth uint) []byte { + n := len(dst) + dst = resize(dst, n+binary.MaxVarintLen64+(len(words)*int(bitWidth))+32) + n += binary.PutUvarint(dst[n:], uint64(len(words))<<1|1) + n += encodeInt32Bitpack(dst[n:], words, bitWidth) + return dst[:n] +} + +func broadcast8x1(v uint64) uint64 { + return (v & 0xFF) * 0x0101010101010101 +} + +func broadcast8x4(v int32) [8]int32 { + return [8]int32{v, v, v, v, v, v, v, v} +} + +func isZero(data []byte) bool { + return bytealg.Count(data, 0x00) == len(data) +} + +func isOnes(data []byte) bool { + return bytealg.Count(data, 0xFF) == len(data) +} + +func resize(buf []byte, size int) []byte { + if cap(buf) < size { + return grow(buf, size) + } + return buf[:size] +} + +func grow(buf []byte, size int) []byte { + newCap := max(2*cap(buf), size) + newBuf := make([]byte, size, newCap) + copy(newBuf, buf) + return newBuf +} + +func encodeInt32BitpackDefault(dst []byte, src [][8]int32, bitWidth uint) int { + bits := unsafecast.Slice[int32](src) + bitpack.Pack(dst, bits, bitWidth) + return bitpack.ByteCount(uint(len(src)*8) * bitWidth) +} + +func encodeBytesBitpackDefault(dst []byte, src []uint64, bitWidth uint) int { + bitMask := uint64(1<> 8) & bitMask) << (1 * bitWidth)) | + (((word >> 16) & bitMask) << (2 * bitWidth)) | + (((word >> 24) & bitMask) << (3 * bitWidth)) | + (((word >> 32) & bitMask) << (4 * bitWidth)) | + (((word >> 40) & bitMask) << (5 * bitWidth)) | + (((word >> 48) & bitMask) << (6 * bitWidth)) | + (((word >> 56) & bitMask) << (7 * bitWidth)) + binary.LittleEndian.PutUint64(dst[n:], word) + n += int(bitWidth) + } + + return n +} + +func decodeBytesBitpackDefault(dst, src []byte, count, bitWidth uint) { + dst = dst[:0] + + bitMask := uint64(1< 0; count -= 8 { + j := i + byteCount + + bits := [8]byte{} + copy(bits[:], src[i:j]) + word := binary.LittleEndian.Uint64(bits[:]) + + dst = append(dst, + byte((word>>(0*bitWidth))&bitMask), + byte((word>>(1*bitWidth))&bitMask), + byte((word>>(2*bitWidth))&bitMask), + byte((word>>(3*bitWidth))&bitMask), + byte((word>>(4*bitWidth))&bitMask), + byte((word>>(5*bitWidth))&bitMask), + byte((word>>(6*bitWidth))&bitMask), + byte((word>>(7*bitWidth))&bitMask), + ) + + i = j + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.go b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.go new file mode 100644 index 00000000000..960a2b3e163 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.go @@ -0,0 +1,60 @@ +//go:build !purego + +package rle + +import ( + "golang.org/x/sys/cpu" +) + +var ( + encodeInt32IndexEqual8Contiguous func(words [][8]int32) int + encodeInt32Bitpack func(dst []byte, src [][8]int32, bitWidth uint) int + encodeBytesBitpack func(dst []byte, src []uint64, bitWidth uint) int + decodeBytesBitpack func(dst, src []byte, count, bitWidth uint) +) + +func init() { + switch { + case cpu.X86.HasAVX2: + encodeInt32IndexEqual8Contiguous = encodeInt32IndexEqual8ContiguousAVX2 + encodeInt32Bitpack = encodeInt32BitpackAVX2 + default: + encodeInt32IndexEqual8Contiguous = encodeInt32IndexEqual8ContiguousSSE + encodeInt32Bitpack = encodeInt32BitpackDefault + } + + switch { + case cpu.X86.HasBMI2: + encodeBytesBitpack = encodeBytesBitpackBMI2 + decodeBytesBitpack = decodeBytesBitpackBMI2 + default: + encodeBytesBitpack = encodeBytesBitpackDefault + decodeBytesBitpack = decodeBytesBitpackDefault + } +} + +//go:noescape +func encodeBytesBitpackBMI2(dst []byte, src []uint64, bitWidth uint) int + +//go:noescape +func encodeInt32IndexEqual8ContiguousAVX2(words [][8]int32) int + +//go:noescape +func encodeInt32IndexEqual8ContiguousSSE(words [][8]int32) int + +//go:noescape +func encodeInt32Bitpack1to16bitsAVX2(dst []byte, src [][8]int32, bitWidth uint) int + +func encodeInt32BitpackAVX2(dst []byte, src [][8]int32, bitWidth uint) int { + switch { + case bitWidth == 0: + return 0 + case bitWidth <= 16: + return encodeInt32Bitpack1to16bitsAVX2(dst, src, bitWidth) + default: + return encodeInt32BitpackDefault(dst, src, bitWidth) + } +} + +//go:noescape +func decodeBytesBitpackBMI2(dst, src []byte, count, bitWidth uint) diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.s b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.s new file mode 100644 index 00000000000..a03a559d6b9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_amd64.s @@ -0,0 +1,174 @@ +//go:build !purego + +#include "textflag.h" + +GLOBL bitMasks<>(SB), RODATA|NOPTR, $64 +DATA bitMasks<>+0(SB)/8, $0b0000000100000001000000010000000100000001000000010000000100000001 +DATA bitMasks<>+8(SB)/8, $0b0000001100000011000000110000001100000011000000110000001100000011 +DATA bitMasks<>+16(SB)/8, $0b0000011100000111000001110000011100000111000001110000011100000111 +DATA bitMasks<>+24(SB)/8, $0b0000111100001111000011110000111100001111000011110000111100001111 +DATA bitMasks<>+32(SB)/8, $0b0001111100011111000111110001111100011111000111110001111100011111 +DATA bitMasks<>+40(SB)/8, $0b0011111100111111001111110011111100111111001111110011111100111111 +DATA bitMasks<>+48(SB)/8, $0b0111111101111111011111110111111101111111011111110111111101111111 +DATA bitMasks<>+56(SB)/8, $0b1111111111111111111111111111111111111111111111111111111111111111 + +// func decodeBytesBitpackBMI2(dst, src []byte, count, bitWidth uint) +TEXT ·decodeBytesBitpackBMI2(SB), NOSPLIT, $0-64 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), BX + MOVQ count+48(FP), CX + MOVQ bitWidth+56(FP), DX + LEAQ bitMasks<>(SB), DI + MOVQ -8(DI)(DX*8), DI + XORQ SI, SI + SHRQ $3, CX + JMP test +loop: + MOVQ (BX), R8 + PDEPQ DI, R8, R8 + MOVQ R8, (AX)(SI*8) + ADDQ DX, BX + INCQ SI +test: + CMPQ SI, CX + JNE loop + RET + +// func encodeBytesBitpackBMI2(dst []byte, src []uint64, bitWidth uint) int +TEXT ·encodeBytesBitpackBMI2(SB), NOSPLIT, $0-64 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), BX + MOVQ src_len+32(FP), CX + MOVQ bitWidth+48(FP), DX + LEAQ bitMasks<>(SB), DI + MOVQ -8(DI)(DX*8), DI + XORQ SI, SI + JMP test +loop: + MOVQ (BX)(SI*8), R8 + PEXTQ DI, R8, R8 + MOVQ R8, (AX) + ADDQ DX, AX + INCQ SI +test: + CMPQ SI, CX + JNE loop +done: + SUBQ dst+0(FP), AX + MOVQ AX, ret+56(FP) + RET + +// func encodeInt32IndexEqual8ContiguousAVX2(words [][8]int32) int +TEXT ·encodeInt32IndexEqual8ContiguousAVX2(SB), NOSPLIT, $0-32 + MOVQ words_base+0(FP), AX + MOVQ words_len+8(FP), BX + XORQ SI, SI + SHLQ $5, BX + JMP test +loop: + VMOVDQU (AX)(SI*1), Y0 + VPSHUFD $0, Y0, Y1 + VPCMPEQD Y1, Y0, Y0 + VMOVMSKPS Y0, CX + CMPL CX, $0xFF + JE done + ADDQ $32, SI +test: + CMPQ SI, BX + JNE loop +done: + VZEROUPPER + SHRQ $5, SI + MOVQ SI, ret+24(FP) + RET + +// func encodeInt32IndexEqual8ContiguousSSE(words [][8]int32) int +TEXT ·encodeInt32IndexEqual8ContiguousSSE(SB), NOSPLIT, $0-32 + MOVQ words_base+0(FP), AX + MOVQ words_len+8(FP), BX + XORQ SI, SI + SHLQ $5, BX + JMP test +loop: + MOVOU (AX)(SI*1), X0 + MOVOU 16(AX)(SI*1), X1 + PSHUFD $0, X0, X2 + PCMPEQL X2, X0 + PCMPEQL X2, X1 + MOVMSKPS X0, CX + MOVMSKPS X1, DX + ANDL DX, CX + CMPL CX, $0xF + JE done + ADDQ $32, SI +test: + CMPQ SI, BX + JNE loop +done: + SHRQ $5, SI + MOVQ SI, ret+24(FP) + RET + +// func encodeInt32Bitpack1to16bitsAVX2(dst []byte, src [][8]int32, bitWidth uint) int +TEXT ·encodeInt32Bitpack1to16bitsAVX2(SB), NOSPLIT, $0-64 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), BX + MOVQ src_len+32(FP), CX + MOVQ bitWidth+48(FP), DX + + MOVQ DX, X0 + VPBROADCASTQ X0, Y6 // [1*bitWidth...] + VPSLLQ $1, Y6, Y7 // [2*bitWidth...] + VPADDQ Y6, Y7, Y8 // [3*bitWidth...] + VPSLLQ $2, Y6, Y9 // [4*bitWidth...] + + MOVQ $64, DI + MOVQ DI, X1 + VPBROADCASTQ X1, Y10 + VPSUBQ Y6, Y10, Y11 // [64-1*bitWidth...] + VPSUBQ Y9, Y10, Y12 // [64-4*bitWidth...] + VPCMPEQQ Y4, Y4, Y4 + VPSRLVQ Y11, Y4, Y4 + + VPXOR Y5, Y5, Y5 + XORQ SI, SI + SHLQ $5, CX + JMP test +loop: + VMOVDQU (BX)(SI*1), Y0 + VPSHUFD $0b01010101, Y0, Y1 + VPSHUFD $0b10101010, Y0, Y2 + VPSHUFD $0b11111111, Y0, Y3 + + VPAND Y4, Y0, Y0 + VPAND Y4, Y1, Y1 + VPAND Y4, Y2, Y2 + VPAND Y4, Y3, Y3 + + VPSLLVQ Y6, Y1, Y1 + VPSLLVQ Y7, Y2, Y2 + VPSLLVQ Y8, Y3, Y3 + + VPOR Y1, Y0, Y0 + VPOR Y3, Y2, Y2 + VPOR Y2, Y0, Y0 + + VPERMQ $0b00001010, Y0, Y1 + + VPSLLVQ X9, X1, X2 + VPSRLQ X12, X1, X3 + VBLENDPD $0b10, X3, X2, X1 + VBLENDPD $0b10, X5, X0, X0 + VPOR X1, X0, X0 + + VMOVDQU X0, (AX) + + ADDQ DX, AX + ADDQ $32, SI +test: + CMPQ SI, CX + JNE loop + VZEROUPPER + SUBQ dst+0(FP), AX + MOVQ AX, ret+56(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_purego.go b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_purego.go new file mode 100644 index 00000000000..8f3462d14c0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/rle/rle_purego.go @@ -0,0 +1,22 @@ +//go:build purego || !amd64 + +package rle + +func encodeBytesBitpack(dst []byte, src []uint64, bitWidth uint) int { + return encodeBytesBitpackDefault(dst, src, bitWidth) +} + +func encodeInt32IndexEqual8Contiguous(words [][8]int32) (n int) { + for n < len(words) && words[n] != broadcast8x4(words[n][0]) { + n++ + } + return n +} + +func encodeInt32Bitpack(dst []byte, src [][8]int32, bitWidth uint) int { + return encodeInt32BitpackDefault(dst, src, bitWidth) +} + +func decodeBytesBitpack(dst, src []byte, count, bitWidth uint) { + decodeBytesBitpackDefault(dst, src, count, bitWidth) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/LICENSE b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/LICENSE new file mode 100644 index 00000000000..1fbffdf72ad --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Segment.io, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/binary.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/binary.go new file mode 100644 index 00000000000..82d7fe610f5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/binary.go @@ -0,0 +1,369 @@ +package thrift + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + + "github.com/parquet-go/bitpack/unsafecast" +) + +// BinaryProtocol is a Protocol implementation for the binary thrift protocol. +// +// https://github.com/apache/thrift/blob/master/doc/specs/thrift-binary-protocol.md +type BinaryProtocol struct { + NonStrict bool +} + +func (p *BinaryProtocol) NewReader(r io.Reader) Reader { + return &binaryReader{p: p, r: r} +} + +func (p *BinaryProtocol) NewWriter(w io.Writer) Writer { + return &binaryWriter{p: p, w: w} +} + +func (p *BinaryProtocol) Features() Features { + return 0 +} + +type binaryReader struct { + p *BinaryProtocol + r io.Reader + b [8]byte +} + +func (r *binaryReader) Protocol() Protocol { + return r.p +} + +func (r *binaryReader) Reader() io.Reader { + return r.r +} + +func (r *binaryReader) ReadBool() (bool, error) { + v, err := r.ReadByte() + return v != 0, err +} + +func (r *binaryReader) ReadInt8() (int8, error) { + b, err := r.ReadByte() + return int8(b), err +} + +func (r *binaryReader) ReadInt16() (int16, error) { + b, err := r.read(2) + if len(b) < 2 { + return 0, err + } + return int16(binary.BigEndian.Uint16(b)), nil +} + +func (r *binaryReader) ReadInt32() (int32, error) { + b, err := r.read(4) + if len(b) < 4 { + return 0, err + } + return int32(binary.BigEndian.Uint32(b)), nil +} + +func (r *binaryReader) ReadInt64() (int64, error) { + b, err := r.read(8) + if len(b) < 8 { + return 0, err + } + return int64(binary.BigEndian.Uint64(b)), nil +} + +func (r *binaryReader) ReadFloat64() (float64, error) { + b, err := r.read(8) + if len(b) < 8 { + return 0, err + } + return math.Float64frombits(binary.BigEndian.Uint64(b)), nil +} + +func (r *binaryReader) ReadBytes() ([]byte, error) { + n, err := r.ReadLength() + if err != nil { + return nil, err + } + b := make([]byte, n) + _, err = io.ReadFull(r.r, b) + return b, err +} + +func (r *binaryReader) ReadString() (string, error) { + b, err := r.ReadBytes() + return unsafecast.String(b), err +} + +func (r *binaryReader) ReadLength() (int, error) { + b, err := r.read(4) + if len(b) < 4 { + return 0, err + } + n := binary.BigEndian.Uint32(b) + if n > math.MaxInt32 { + return 0, fmt.Errorf("length out of range: %d", n) + } + return int(n), nil +} + +func (r *binaryReader) ReadMessage() (Message, error) { + m := Message{} + + b, err := r.read(4) + if len(b) < 4 { + return m, err + } + + if (b[0] >> 7) == 0 { // non-strict + n := int(binary.BigEndian.Uint32(b)) + s := make([]byte, n) + _, err := io.ReadFull(r.r, s) + if err != nil { + return m, dontExpectEOF(err) + } + m.Name = unsafecast.String(s) + + t, err := r.ReadInt8() + if err != nil { + return m, dontExpectEOF(err) + } + + m.Type = MessageType(t & 0x7) + } else { + m.Type = MessageType(b[3] & 0x7) + + if m.Name, err = r.ReadString(); err != nil { + return m, dontExpectEOF(err) + } + } + + m.SeqID, err = r.ReadInt32() + return m, err +} + +func (r *binaryReader) ReadField() (Field, error) { + t, err := r.ReadInt8() + if err != nil { + return Field{}, err + } + i, err := r.ReadInt16() + if err != nil { + return Field{}, err + } + return Field{ID: i, Type: Type(t)}, nil +} + +func (r *binaryReader) ReadList() (List, error) { + t, err := r.ReadInt8() + if err != nil { + return List{}, err + } + n, err := r.ReadInt32() + if err != nil { + return List{}, dontExpectEOF(err) + } + return List{Size: n, Type: Type(t)}, nil +} + +func (r *binaryReader) ReadSet() (Set, error) { + l, err := r.ReadList() + return Set(l), err +} + +func (r *binaryReader) ReadMap() (Map, error) { + k, err := r.ReadByte() + if err != nil { + return Map{}, err + } + v, err := r.ReadByte() + if err != nil { + return Map{}, dontExpectEOF(err) + } + n, err := r.ReadInt32() + if err != nil { + return Map{}, dontExpectEOF(err) + } + return Map{Size: n, Key: Type(k), Value: Type(v)}, nil +} + +func (r *binaryReader) ReadByte() (byte, error) { + switch x := r.r.(type) { + case *bytes.Buffer: + return x.ReadByte() + case *bytes.Reader: + return x.ReadByte() + case *bufio.Reader: + return x.ReadByte() + case io.ByteReader: + return x.ReadByte() + default: + b, err := r.read(1) + if err != nil { + return 0, err + } + return b[0], nil + } +} + +func (r *binaryReader) read(n int) ([]byte, error) { + _, err := io.ReadFull(r.r, r.b[:n]) + return r.b[:n], err +} + +type binaryWriter struct { + p *BinaryProtocol + b [8]byte + w io.Writer +} + +func (w *binaryWriter) Protocol() Protocol { + return w.p +} + +func (w *binaryWriter) Writer() io.Writer { + return w.w +} + +func (w *binaryWriter) WriteBool(v bool) error { + var b byte + if v { + b = 1 + } + return w.writeByte(b) +} + +func (w *binaryWriter) WriteInt8(v int8) error { + return w.writeByte(byte(v)) +} + +func (w *binaryWriter) WriteInt16(v int16) error { + binary.BigEndian.PutUint16(w.b[:2], uint16(v)) + return w.write(w.b[:2]) +} + +func (w *binaryWriter) WriteInt32(v int32) error { + binary.BigEndian.PutUint32(w.b[:4], uint32(v)) + return w.write(w.b[:4]) +} + +func (w *binaryWriter) WriteInt64(v int64) error { + binary.BigEndian.PutUint64(w.b[:8], uint64(v)) + return w.write(w.b[:8]) +} + +func (w *binaryWriter) WriteFloat64(v float64) error { + binary.BigEndian.PutUint64(w.b[:8], math.Float64bits(v)) + return w.write(w.b[:8]) +} + +func (w *binaryWriter) WriteBytes(v []byte) error { + if err := w.WriteLength(len(v)); err != nil { + return err + } + return w.write(v) +} + +func (w *binaryWriter) WriteString(v string) error { + if err := w.WriteLength(len(v)); err != nil { + return err + } + return w.writeString(v) +} + +func (w *binaryWriter) WriteLength(n int) error { + if n < 0 { + return fmt.Errorf("negative length cannot be encoded in thrift: %d", n) + } + if n > math.MaxInt32 { + return fmt.Errorf("length is too large to be encoded in thrift: %d", n) + } + return w.WriteInt32(int32(n)) +} + +func (w *binaryWriter) WriteMessage(m Message) error { + if w.p.NonStrict { + if err := w.WriteString(m.Name); err != nil { + return err + } + if err := w.writeByte(byte(m.Type)); err != nil { + return err + } + } else { + w.b[0] = 1 << 7 + w.b[1] = 0 + w.b[2] = 0 + w.b[3] = byte(m.Type) & 0x7 + binary.BigEndian.PutUint32(w.b[4:], uint32(len(m.Name))) + + if err := w.write(w.b[:8]); err != nil { + return err + } + if err := w.writeString(m.Name); err != nil { + return err + } + } + return w.WriteInt32(m.SeqID) +} + +func (w *binaryWriter) WriteField(f Field) error { + if err := w.writeByte(byte(f.Type)); err != nil { + return err + } + return w.WriteInt16(f.ID) +} + +func (w *binaryWriter) WriteList(l List) error { + if err := w.writeByte(byte(l.Type)); err != nil { + return err + } + return w.WriteInt32(l.Size) +} + +func (w *binaryWriter) WriteSet(s Set) error { + return w.WriteList(List(s)) +} + +func (w *binaryWriter) WriteMap(m Map) error { + if err := w.writeByte(byte(m.Key)); err != nil { + return err + } + if err := w.writeByte(byte(m.Value)); err != nil { + return err + } + return w.WriteInt32(m.Size) +} + +func (w *binaryWriter) write(b []byte) error { + _, err := w.w.Write(b) + return err +} + +func (w *binaryWriter) writeString(s string) error { + _, err := io.WriteString(w.w, s) + return err +} + +func (w *binaryWriter) writeByte(b byte) error { + // The special cases are intended to reduce the runtime overheadof testing + // for the io.ByteWriter interface for common types. Type assertions on a + // concrete type is just a pointer comparison, instead of requiring a + // complex lookup in the type metadata. + switch x := w.w.(type) { + case *bytes.Buffer: + return x.WriteByte(b) + case *bufio.Writer: + return x.WriteByte(b) + case io.ByteWriter: + return x.WriteByte(b) + default: + w.b[0] = b + return w.write(w.b[:1]) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/compact.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/compact.go new file mode 100644 index 00000000000..65d98c2b386 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/compact.go @@ -0,0 +1,348 @@ +package thrift + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + + "github.com/parquet-go/bitpack/unsafecast" +) + +// CompactProtocol is a Protocol implementation for the compact thrift protocol. +// +// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md#integer-encoding +type CompactProtocol struct{} + +func (p *CompactProtocol) NewReader(r io.Reader) Reader { + return &compactReader{protocol: p, binary: binaryReader{r: r}} +} + +func (p *CompactProtocol) NewWriter(w io.Writer) Writer { + return &compactWriter{protocol: p, binary: binaryWriter{w: w}} +} + +func (p *CompactProtocol) Features() Features { + return UseDeltaEncoding | CoalesceBoolFields +} + +type compactReader struct { + protocol *CompactProtocol + binary binaryReader +} + +func (r *compactReader) Protocol() Protocol { + return r.protocol +} + +func (r *compactReader) Reader() io.Reader { + return r.binary.Reader() +} + +func (r *compactReader) ReadBool() (bool, error) { + return r.binary.ReadBool() +} + +func (r *compactReader) ReadInt8() (int8, error) { + return r.binary.ReadInt8() +} + +func (r *compactReader) ReadInt16() (int16, error) { + v, err := r.readVarint("int16", math.MinInt16, math.MaxInt16) + return int16(v), err +} + +func (r *compactReader) ReadInt32() (int32, error) { + v, err := r.readVarint("int32", math.MinInt32, math.MaxInt32) + return int32(v), err +} + +func (r *compactReader) ReadInt64() (int64, error) { + return r.readVarint("int64", math.MinInt64, math.MaxInt64) +} + +func (r *compactReader) ReadFloat64() (float64, error) { + return r.binary.ReadFloat64() +} + +func (r *compactReader) ReadBytes() ([]byte, error) { + n, err := r.ReadLength() + if err != nil { + return nil, err + } + b := make([]byte, n) + _, err = io.ReadFull(r.Reader(), b) + return b, err +} + +func (r *compactReader) ReadString() (string, error) { + b, err := r.ReadBytes() + return unsafecast.String(b), err +} + +func (r *compactReader) ReadLength() (int, error) { + n, err := r.readUvarint("length", math.MaxInt32) + return int(n), err +} + +func (r *compactReader) ReadMessage() (Message, error) { + m := Message{} + + b0, err := r.ReadByte() + if err != nil { + return m, err + } + if b0 != 0x82 { + return m, fmt.Errorf("invalid protocol id found when reading thrift message: %#x", b0) + } + + b1, err := r.ReadByte() + if err != nil { + return m, dontExpectEOF(err) + } + + seqID, err := r.readUvarint("seq id", math.MaxInt32) + if err != nil { + return m, dontExpectEOF(err) + } + + m.Type = MessageType(b1) & 0x7 + m.SeqID = int32(seqID) + m.Name, err = r.ReadString() + return m, dontExpectEOF(err) +} + +func (r *compactReader) ReadField() (Field, error) { + f := Field{} + + b, err := r.ReadByte() + if err != nil { + return f, err + } + + if Type(b) == STOP { + return f, nil + } + + if (b >> 4) != 0 { + f = Field{ID: int16(b >> 4), Type: Type(b & 0xF), Delta: true} + } else { + i, err := r.ReadInt16() + if err != nil { + return f, dontExpectEOF(err) + } + f = Field{ID: i, Type: Type(b)} + } + + return f, nil +} + +func (r *compactReader) ReadList() (List, error) { + b, err := r.ReadByte() + if err != nil { + return List{}, err + } + if (b >> 4) != 0xF { + return List{Size: int32(b >> 4), Type: Type(b & 0xF)}, nil + } + n, err := r.readUvarint("list size", math.MaxInt32) + if err != nil { + return List{}, dontExpectEOF(err) + } + return List{Size: int32(n), Type: Type(b & 0xF)}, nil +} + +func (r *compactReader) ReadSet() (Set, error) { + l, err := r.ReadList() + return Set(l), err +} + +func (r *compactReader) ReadMap() (Map, error) { + n, err := r.readUvarint("map size", math.MaxInt32) + if err != nil { + return Map{}, err + } + if n == 0 { // empty map + return Map{}, nil + } + b, err := r.ReadByte() + if err != nil { + return Map{}, dontExpectEOF(err) + } + return Map{Size: int32(n), Key: Type(b >> 4), Value: Type(b & 0xF)}, nil +} + +func (r *compactReader) ReadByte() (byte, error) { + return r.binary.ReadByte() +} + +func (r *compactReader) readUvarint(typ string, max uint64) (uint64, error) { + var br io.ByteReader + + switch x := r.Reader().(type) { + case *bytes.Buffer: + br = x + case *bytes.Reader: + br = x + case *bufio.Reader: + br = x + case io.ByteReader: + br = x + default: + br = &r.binary + } + + u, err := binary.ReadUvarint(br) + if err == nil { + if u > max { + err = fmt.Errorf("%s varint out of range: %d > %d", typ, u, max) + } + } + return u, err +} + +func (r *compactReader) readVarint(typ string, min, max int64) (int64, error) { + var br io.ByteReader + + switch x := r.Reader().(type) { + case *bytes.Buffer: + br = x + case *bytes.Reader: + br = x + case *bufio.Reader: + br = x + case io.ByteReader: + br = x + default: + br = &r.binary + } + + v, err := binary.ReadVarint(br) + if err == nil { + if v < min || v > max { + err = fmt.Errorf("%s varint out of range: %d not in [%d;%d]", typ, v, min, max) + } + } + return v, err +} + +type compactWriter struct { + protocol *CompactProtocol + binary binaryWriter + varint [binary.MaxVarintLen64]byte +} + +func (w *compactWriter) Protocol() Protocol { + return w.protocol +} + +func (w *compactWriter) Writer() io.Writer { + return w.binary.Writer() +} + +func (w *compactWriter) WriteBool(v bool) error { + return w.binary.WriteBool(v) +} + +func (w *compactWriter) WriteInt8(v int8) error { + return w.binary.WriteInt8(v) +} + +func (w *compactWriter) WriteInt16(v int16) error { + return w.writeVarint(int64(v)) +} + +func (w *compactWriter) WriteInt32(v int32) error { + return w.writeVarint(int64(v)) +} + +func (w *compactWriter) WriteInt64(v int64) error { + return w.writeVarint(v) +} + +func (w *compactWriter) WriteFloat64(v float64) error { + return w.binary.WriteFloat64(v) +} + +func (w *compactWriter) WriteBytes(v []byte) error { + if err := w.WriteLength(len(v)); err != nil { + return err + } + return w.binary.write(v) +} + +func (w *compactWriter) WriteString(v string) error { + if err := w.WriteLength(len(v)); err != nil { + return err + } + return w.binary.writeString(v) +} + +func (w *compactWriter) WriteLength(n int) error { + if n < 0 { + return fmt.Errorf("negative length cannot be encoded in thrift: %d", n) + } + if n > math.MaxInt32 { + return fmt.Errorf("length is too large to be encoded in thrift: %d", n) + } + return w.writeUvarint(uint64(n)) +} + +func (w *compactWriter) WriteMessage(m Message) error { + if err := w.binary.writeByte(0x82); err != nil { + return err + } + if err := w.binary.writeByte(byte(m.Type)); err != nil { + return err + } + if err := w.writeUvarint(uint64(m.SeqID)); err != nil { + return err + } + return w.WriteString(m.Name) +} + +func (w *compactWriter) WriteField(f Field) error { + if f.Type == STOP { + return w.binary.writeByte(0) + } + if f.ID <= 15 { + return w.binary.writeByte(byte(f.ID<<4) | byte(f.Type)) + } + if err := w.binary.writeByte(byte(f.Type)); err != nil { + return err + } + return w.WriteInt16(f.ID) +} + +func (w *compactWriter) WriteList(l List) error { + if l.Size <= 14 { + return w.binary.writeByte(byte(l.Size<<4) | byte(l.Type)) + } + if err := w.binary.writeByte(0xF0 | byte(l.Type)); err != nil { + return err + } + return w.writeUvarint(uint64(l.Size)) +} + +func (w *compactWriter) WriteSet(s Set) error { + return w.WriteList(List(s)) +} + +func (w *compactWriter) WriteMap(m Map) error { + if err := w.writeUvarint(uint64(m.Size)); err != nil || m.Size == 0 { + return err + } + return w.binary.writeByte((byte(m.Key) << 4) | byte(m.Value)) +} + +func (w *compactWriter) writeUvarint(v uint64) error { + n := binary.PutUvarint(w.varint[:], v) + return w.binary.write(w.varint[:n]) +} + +func (w *compactWriter) writeVarint(v int64) error { + n := binary.PutVarint(w.varint[:], v) + return w.binary.write(w.varint[:n]) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/debug.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/debug.go new file mode 100644 index 00000000000..25fe7da2f09 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/debug.go @@ -0,0 +1,230 @@ +package thrift + +import ( + "io" + "log" +) + +func NewDebugReader(r Reader, l *log.Logger) Reader { + return &debugReader{ + r: r, + l: l, + } +} + +func NewDebugWriter(w Writer, l *log.Logger) Writer { + return &debugWriter{ + w: w, + l: l, + } +} + +type debugReader struct { + r Reader + l *log.Logger +} + +func (d *debugReader) log(method string, res any, err error) { + if err != nil { + d.l.Printf("(%T).%s() → ERROR: %v", d.r, method, err) + } else { + d.l.Printf("(%T).%s() → %#v", d.r, method, res) + } +} + +func (d *debugReader) Protocol() Protocol { + return d.r.Protocol() +} + +func (d *debugReader) Reader() io.Reader { + return d.r.Reader() +} + +func (d *debugReader) ReadBool() (bool, error) { + v, err := d.r.ReadBool() + d.log("ReadBool", v, err) + return v, err +} + +func (d *debugReader) ReadInt8() (int8, error) { + v, err := d.r.ReadInt8() + d.log("ReadInt8", v, err) + return v, err +} + +func (d *debugReader) ReadInt16() (int16, error) { + v, err := d.r.ReadInt16() + d.log("ReadInt16", v, err) + return v, err +} + +func (d *debugReader) ReadInt32() (int32, error) { + v, err := d.r.ReadInt32() + d.log("ReadInt32", v, err) + return v, err +} + +func (d *debugReader) ReadInt64() (int64, error) { + v, err := d.r.ReadInt64() + d.log("ReadInt64", v, err) + return v, err +} + +func (d *debugReader) ReadFloat64() (float64, error) { + v, err := d.r.ReadFloat64() + d.log("ReadFloat64", v, err) + return v, err +} + +func (d *debugReader) ReadBytes() ([]byte, error) { + v, err := d.r.ReadBytes() + d.log("ReadBytes", v, err) + return v, err +} + +func (d *debugReader) ReadString() (string, error) { + v, err := d.r.ReadString() + d.log("ReadString", v, err) + return v, err +} + +func (d *debugReader) ReadLength() (int, error) { + v, err := d.r.ReadLength() + d.log("ReadLength", v, err) + return v, err +} + +func (d *debugReader) ReadMessage() (Message, error) { + v, err := d.r.ReadMessage() + d.log("ReadMessage", v, err) + return v, err +} + +func (d *debugReader) ReadField() (Field, error) { + v, err := d.r.ReadField() + d.log("ReadField", v, err) + return v, err +} + +func (d *debugReader) ReadList() (List, error) { + v, err := d.r.ReadList() + d.log("ReadList", v, err) + return v, err +} + +func (d *debugReader) ReadSet() (Set, error) { + v, err := d.r.ReadSet() + d.log("ReadSet", v, err) + return v, err +} + +func (d *debugReader) ReadMap() (Map, error) { + v, err := d.r.ReadMap() + d.log("ReadMap", v, err) + return v, err +} + +type debugWriter struct { + w Writer + l *log.Logger +} + +func (d *debugWriter) log(method string, arg any, err error) { + if err != nil { + d.l.Printf("(%T).%s(%#v) → ERROR: %v", d.w, method, arg, err) + } else { + d.l.Printf("(%T).%s(%#v)", d.w, method, arg) + } +} + +func (d *debugWriter) Protocol() Protocol { + return d.w.Protocol() +} + +func (d *debugWriter) Writer() io.Writer { + return d.w.Writer() +} + +func (d *debugWriter) WriteBool(v bool) error { + err := d.w.WriteBool(v) + d.log("WriteBool", v, err) + return err +} + +func (d *debugWriter) WriteInt8(v int8) error { + err := d.w.WriteInt8(v) + d.log("WriteInt8", v, err) + return err +} + +func (d *debugWriter) WriteInt16(v int16) error { + err := d.w.WriteInt16(v) + d.log("WriteInt16", v, err) + return err +} + +func (d *debugWriter) WriteInt32(v int32) error { + err := d.w.WriteInt32(v) + d.log("WriteInt32", v, err) + return err +} + +func (d *debugWriter) WriteInt64(v int64) error { + err := d.w.WriteInt64(v) + d.log("WriteInt64", v, err) + return err +} + +func (d *debugWriter) WriteFloat64(v float64) error { + err := d.w.WriteFloat64(v) + d.log("WriteFloat64", v, err) + return err +} + +func (d *debugWriter) WriteBytes(v []byte) error { + err := d.w.WriteBytes(v) + d.log("WriteBytes", v, err) + return err +} + +func (d *debugWriter) WriteString(v string) error { + err := d.w.WriteString(v) + d.log("WriteString", v, err) + return err +} + +func (d *debugWriter) WriteLength(n int) error { + err := d.w.WriteLength(n) + d.log("WriteLength", n, err) + return err +} + +func (d *debugWriter) WriteMessage(m Message) error { + err := d.w.WriteMessage(m) + d.log("WriteMessage", m, err) + return err +} + +func (d *debugWriter) WriteField(f Field) error { + err := d.w.WriteField(f) + d.log("WriteField", f, err) + return err +} + +func (d *debugWriter) WriteList(l List) error { + err := d.w.WriteList(l) + d.log("WriteList", l, err) + return err +} + +func (d *debugWriter) WriteSet(s Set) error { + err := d.w.WriteSet(s) + d.log("WriteSet", s, err) + return err +} + +func (d *debugWriter) WriteMap(m Map) error { + err := d.w.WriteMap(m) + d.log("WriteMap", m, err) + return err +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/decode.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/decode.go new file mode 100644 index 00000000000..5db34091b66 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/decode.go @@ -0,0 +1,689 @@ +package thrift + +import ( + "bufio" + "bytes" + "fmt" + "io" + "maps" + "reflect" + "sync/atomic" +) + +// Unmarshal deserializes the thrift data from b to v using to the protocol p. +// +// The function errors if the data in b does not match the type of v. +// +// The function panics if v cannot be converted to a thrift representation. +// +// As an optimization, the value passed in v may be reused across multiple calls +// to Unmarshal, allowing the function to reuse objects referenced by pointer +// fields of struct values. When reusing objects, the application is responsible +// for resetting the state of v before calling Unmarshal again. +func Unmarshal(p Protocol, b []byte, v any) error { + br := bytes.NewReader(b) + pr := p.NewReader(br) + + if err := NewDecoder(pr).Decode(v); err != nil { + return err + } + + if n := br.Len(); n != 0 { + return fmt.Errorf("unexpected trailing bytes at the end of thrift input: %d", n) + } + + return nil +} + +type Decoder struct { + r Reader + f flags +} + +func NewDecoder(r Reader) *Decoder { + return &Decoder{r: r, f: decoderFlags(r)} +} + +func (d *Decoder) Decode(v any) error { + t := reflect.TypeOf(v) + p := reflect.ValueOf(v) + + if t.Kind() != reflect.Ptr { + panic("thrift.(*Decoder).Decode: expected pointer type but got " + t.String()) + } + + t = t.Elem() + p = p.Elem() + + cache, _ := decoderCache.Load().(map[typeID]decodeFunc) + decode, _ := cache[makeTypeID(t)] + + if decode == nil { + decode = decodeFuncOf(t, make(decodeFuncCache)) + + newCache := make(map[typeID]decodeFunc, len(cache)+1) + newCache[makeTypeID(t)] = decode + maps.Copy(newCache, cache) + + decoderCache.Store(newCache) + } + + return decode(d.r, p, d.f) +} + +func (d *Decoder) Reset(r Reader) { + d.r = r + d.f = d.f.without(protocolFlags).with(decoderFlags(r)) +} + +func (d *Decoder) SetStrict(enabled bool) { + if enabled { + d.f = d.f.with(strict) + } else { + d.f = d.f.without(strict) + } +} + +func decoderFlags(r Reader) flags { + return flags(r.Protocol().Features() << featuresBitOffset) +} + +var decoderCache atomic.Value // map[typeID]decodeFunc + +type decodeFunc func(Reader, reflect.Value, flags) error + +type decodeFuncCache map[reflect.Type]decodeFunc + +func decodeFuncOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + f := seen[t] + if f != nil { + return f + } + switch t.Kind() { + case reflect.Bool: + f = decodeBool + case reflect.Int8: + f = decodeInt8 + case reflect.Int16: + f = decodeInt16 + case reflect.Int32: + f = decodeInt32 + case reflect.Int64, reflect.Int: + f = decodeInt64 + case reflect.Float32, reflect.Float64: + f = decodeFloat64 + case reflect.String: + f = decodeString + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { // []byte + f = decodeBytes + } else { + f = decodeFuncSliceOf(t, seen) + } + case reflect.Map: + f = decodeFuncMapOf(t, seen) + case reflect.Struct: + f = decodeFuncStructOf(t, seen) + case reflect.Ptr: + f = decodeFuncPtrOf(t, seen) + default: + panic("type cannot be decoded in thrift: " + t.String()) + } + seen[t] = f + return f +} + +func decodeBool(r Reader, v reflect.Value, _ flags) error { + b, err := r.ReadBool() + if err != nil { + return err + } + v.SetBool(b) + return nil +} + +func decodeInt8(r Reader, v reflect.Value, _ flags) error { + i, err := r.ReadInt8() + if err != nil { + return err + } + v.SetInt(int64(i)) + return nil +} + +func decodeInt16(r Reader, v reflect.Value, _ flags) error { + i, err := r.ReadInt16() + if err != nil { + return err + } + v.SetInt(int64(i)) + return nil +} + +func decodeInt32(r Reader, v reflect.Value, _ flags) error { + i, err := r.ReadInt32() + if err != nil { + return err + } + v.SetInt(int64(i)) + return nil +} + +func decodeInt64(r Reader, v reflect.Value, _ flags) error { + i, err := r.ReadInt64() + if err != nil { + return err + } + v.SetInt(int64(i)) + return nil +} + +func decodeFloat64(r Reader, v reflect.Value, _ flags) error { + f, err := r.ReadFloat64() + if err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeString(r Reader, v reflect.Value, _ flags) error { + s, err := r.ReadString() + if err != nil { + return err + } + v.SetString(s) + return nil +} + +func decodeBytes(r Reader, v reflect.Value, _ flags) error { + b, err := r.ReadBytes() + if err != nil { + return err + } + v.SetBytes(b) + return nil +} + +func decodeFuncSliceOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + elem := t.Elem() + typ := TypeOf(elem) + dec := decodeFuncOf(elem, seen) + + return func(r Reader, v reflect.Value, flags flags) error { + l, err := r.ReadList() + if err != nil { + return err + } + + // Sometimes the list type is set to TRUE when the list contains only + // TRUE values. Thrift does not seem to optimize the encoding by + // omitting the boolean values that are known to all be TRUE, we still + // need to decode them. + switch l.Type { + case TRUE: + l.Type = BOOL + } + + // TODO: implement type conversions? + if typ != l.Type { + if flags.have(strict) { + return &TypeMismatch{item: "list item", Expect: typ, Found: l.Type} + } + return nil + } + + v.Set(reflect.MakeSlice(t, int(l.Size), int(l.Size))) + flags = flags.only(decodeFlags) + + for i := range int(l.Size) { + if err := dec(r, v.Index(i), flags); err != nil { + return with(dontExpectEOF(err), &decodeErrorList{cause: l, index: i}) + } + } + + return nil + } +} + +func decodeFuncMapOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + key, elem := t.Key(), t.Elem() + if elem.Size() == 0 { // map[?]struct{} + return decodeFuncMapAsSetOf(t, seen) + } + + mapType := reflect.MapOf(key, elem) + keyZero := reflect.Zero(key) + elemZero := reflect.Zero(elem) + keyType := TypeOf(key) + elemType := TypeOf(elem) + decodeKey := decodeFuncOf(key, seen) + decodeElem := decodeFuncOf(elem, seen) + + return func(r Reader, v reflect.Value, flags flags) error { + m, err := r.ReadMap() + if err != nil { + return err + } + + v.Set(reflect.MakeMapWithSize(mapType, int(m.Size))) + + if m.Size == 0 { // empty map + return nil + } + + // TODO: implement type conversions? + if keyType != m.Key { + if flags.have(strict) { + return &TypeMismatch{item: "map key", Expect: keyType, Found: m.Key} + } + return nil + } + + if elemType != m.Value { + if flags.have(strict) { + return &TypeMismatch{item: "map value", Expect: elemType, Found: m.Value} + } + return nil + } + + tmpKey := reflect.New(key).Elem() + tmpElem := reflect.New(elem).Elem() + flags = flags.only(decodeFlags) + + for i := range int(m.Size) { + if err := decodeKey(r, tmpKey, flags); err != nil { + return with(dontExpectEOF(err), &decodeErrorMap{cause: m, index: i}) + } + if err := decodeElem(r, tmpElem, flags); err != nil { + return with(dontExpectEOF(err), &decodeErrorMap{cause: m, index: i}) + } + v.SetMapIndex(tmpKey, tmpElem) + tmpKey.Set(keyZero) + tmpElem.Set(elemZero) + } + + return nil + } +} + +func decodeFuncMapAsSetOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + key, elem := t.Key(), t.Elem() + keyZero := reflect.Zero(key) + elemZero := reflect.Zero(elem) + typ := TypeOf(key) + dec := decodeFuncOf(key, seen) + + return func(r Reader, v reflect.Value, flags flags) error { + s, err := r.ReadSet() + if err != nil { + return err + } + + // See decodeFuncSliceOf for details about why this type conversion + // needs to be done. + switch s.Type { + case TRUE: + s.Type = BOOL + } + + v.Set(reflect.MakeMapWithSize(t, int(s.Size))) + + if s.Size == 0 { + return nil + } + + // TODO: implement type conversions? + if typ != s.Type { + if flags.have(strict) { + return &TypeMismatch{item: "list item", Expect: typ, Found: s.Type} + } + return nil + } + + tmp := reflect.New(key).Elem() + flags = flags.only(decodeFlags) + + for i := range int(s.Size) { + if err := dec(r, tmp, flags); err != nil { + return with(dontExpectEOF(err), &decodeErrorSet{cause: s, index: i}) + } + v.SetMapIndex(tmp, elemZero) + tmp.Set(keyZero) + } + + return nil + } +} + +type structDecoder struct { + fields []structDecoderField + union []int + minID int16 + zero reflect.Value + required []uint64 +} + +func (dec *structDecoder) decode(r Reader, v reflect.Value, flags flags) error { + flags = flags.only(decodeFlags) + coalesceBoolFields := flags.have(coalesceBoolFields) + + lastField := reflect.Value{} + union := len(dec.union) > 0 + seen := make([]uint64, 1) + if len(dec.required) > len(seen) { + seen = make([]uint64, len(dec.required)) + } + + err := readStruct(r, func(r Reader, f Field) error { + i := int(f.ID) - int(dec.minID) + if i < 0 || i >= len(dec.fields) || dec.fields[i].decode == nil { + return skipField(r, f) + } + field := &dec.fields[i] + seen[i/64] |= 1 << (i % 64) + + // TODO: implement type conversions? + if f.Type != field.typ && !(f.Type == TRUE && field.typ == BOOL) { + if flags.have(strict) { + return &TypeMismatch{item: "field value", Expect: field.typ, Found: f.Type} + } + return nil + } + + x := v + for _, i := range field.index { + if x.Kind() == reflect.Ptr { + x = x.Elem() + } + if x = x.Field(i); x.Kind() == reflect.Ptr { + if x.IsNil() { + x.Set(reflect.New(x.Type().Elem())) + } + } + } + + if union { + v.Set(dec.zero) + } + + lastField = x + + if coalesceBoolFields && (f.Type == TRUE || f.Type == FALSE) { + for x.Kind() == reflect.Ptr { + if x.IsNil() { + x.Set(reflect.New(x.Type().Elem())) + } + x = x.Elem() + } + x.SetBool(f.Type == TRUE) + return nil + } + + return field.decode(r, x, flags.with(field.flags)) + }) + if err != nil { + return err + } + + for i, required := range dec.required { + if mask := required & seen[i]; mask != required { + missing := required &^ seen[i] + i *= 64 + for (missing & 1) == 0 { + missing >>= 1 + i++ + } + field := &dec.fields[i] + return &MissingField{Field: Field{ID: field.id, Type: field.typ}} + } + } + + if union && lastField.IsValid() { + v.FieldByIndex(dec.union).Set(lastField.Addr()) + } + + return nil +} + +type structDecoderField struct { + index []int + id int16 + flags flags + typ Type + decode decodeFunc +} + +func decodeFuncStructOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + dec := &structDecoder{ + zero: reflect.Zero(t), + } + decode := dec.decode + seen[t] = decode + + fields := make([]structDecoderField, 0, t.NumField()) + forEachStructField(t, nil, func(f structField) { + if f.flags.have(union) { + dec.union = f.index + } else { + fields = append(fields, structDecoderField{ + index: f.index, + id: f.id, + flags: f.flags, + typ: TypeOf(f.typ), + decode: decodeFuncStructFieldOf(f, seen), + }) + } + }) + + minID := int16(0) + maxID := int16(0) + + for _, f := range fields { + if f.id < minID || minID == 0 { + minID = f.id + } + if f.id > maxID { + maxID = f.id + } + } + + dec.fields = make([]structDecoderField, (maxID-minID)+1) + dec.minID = minID + dec.required = make([]uint64, len(fields)/64+1) + + for _, f := range fields { + i := f.id - minID + p := dec.fields[i] + if p.decode != nil { + panic(fmt.Errorf("thrift struct field id %d is present multiple times in %s with types %s and %s", f.id, t, p.typ, f.typ)) + } + dec.fields[i] = f + if f.flags.have(required) { + dec.required[i/64] |= 1 << (i % 64) + } + } + + return decode +} + +func decodeFuncStructFieldOf(f structField, seen decodeFuncCache) decodeFunc { + if f.flags.have(enum) { + switch f.typ.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return decodeInt32 + } + } + return decodeFuncOf(f.typ, seen) +} + +func decodeFuncPtrOf(t reflect.Type, seen decodeFuncCache) decodeFunc { + elem := t.Elem() + decode := decodeFuncOf(t.Elem(), seen) + return func(r Reader, v reflect.Value, f flags) error { + if v.IsNil() { + v.Set(reflect.New(elem)) + } + return decode(r, v.Elem(), f) + } +} + +func readBinary(r Reader, f func(io.Reader) error) error { + n, err := r.ReadLength() + if err != nil { + return err + } + return dontExpectEOF(f(io.LimitReader(r.Reader(), int64(n)))) +} + +func readList(r Reader, f func(Reader, Type) error) error { + l, err := r.ReadList() + if err != nil { + return err + } + + for i := range int(l.Size) { + if err := f(r, l.Type); err != nil { + return with(dontExpectEOF(err), &decodeErrorList{cause: l, index: i}) + } + } + + return nil +} + +func readSet(r Reader, f func(Reader, Type) error) error { + s, err := r.ReadSet() + if err != nil { + return err + } + + for i := range int(s.Size) { + if err := f(r, s.Type); err != nil { + return with(dontExpectEOF(err), &decodeErrorSet{cause: s, index: i}) + } + } + + return nil +} + +func readMap(r Reader, f func(Reader, Type, Type) error) error { + m, err := r.ReadMap() + if err != nil { + return err + } + + for i := range int(m.Size) { + if err := f(r, m.Key, m.Value); err != nil { + return with(dontExpectEOF(err), &decodeErrorMap{cause: m, index: i}) + } + } + + return nil +} + +func readStruct(r Reader, f func(Reader, Field) error) error { + lastFieldID := int16(0) + numFields := 0 + + for { + x, err := r.ReadField() + if err != nil { + if numFields > 0 { + err = dontExpectEOF(err) + } + return err + } + + if x.Type == STOP { + return nil + } + + if x.Delta { + x.ID += lastFieldID + x.Delta = false + } + + if err := f(r, x); err != nil { + return with(dontExpectEOF(err), &decodeErrorField{cause: x}) + } + + lastFieldID = x.ID + numFields++ + } +} + +func skip(r Reader, t Type) error { + var err error + switch t { + case TRUE, FALSE: + _, err = r.ReadBool() + case I8: + _, err = r.ReadInt8() + case I16: + _, err = r.ReadInt16() + case I32: + _, err = r.ReadInt32() + case I64: + _, err = r.ReadInt64() + case DOUBLE: + _, err = r.ReadFloat64() + case BINARY: + err = skipBinary(r) + case LIST: + err = skipList(r) + case SET: + err = skipSet(r) + case MAP: + err = skipMap(r) + case STRUCT: + err = skipStruct(r) + default: + return fmt.Errorf("skipping unsupported thrift type %d", t) + } + return err +} + +func skipBinary(r Reader) error { + n, err := r.ReadLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + switch x := r.Reader().(type) { + case *bufio.Reader: + _, err = x.Discard(int(n)) + default: + _, err = io.CopyN(io.Discard, x, int64(n)) + } + return dontExpectEOF(err) +} + +func skipList(r Reader) error { + return readList(r, skip) +} + +func skipSet(r Reader) error { + return readSet(r, skip) +} + +func skipMap(r Reader) error { + return readMap(r, func(r Reader, k, v Type) error { + if err := skip(r, k); err != nil { + return dontExpectEOF(err) + } + if err := skip(r, v); err != nil { + return dontExpectEOF(err) + } + return nil + }) +} + +func skipStruct(r Reader) error { + return readStruct(r, skipField) +} + +func skipField(r Reader, f Field) error { + return skip(r, f.Type) +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/encode.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/encode.go new file mode 100644 index 00000000000..01193742706 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/encode.go @@ -0,0 +1,399 @@ +package thrift + +import ( + "bytes" + "cmp" + "fmt" + "maps" + "math" + "reflect" + "slices" + "sync/atomic" +) + +// Marshal serializes v into a thrift representation according to the the +// protocol p. +// +// The function panics if v cannot be converted to a thrift representation. +func Marshal(p Protocol, v any) ([]byte, error) { + buf := new(bytes.Buffer) + enc := NewEncoder(p.NewWriter(buf)) + err := enc.Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w Writer + f flags +} + +func NewEncoder(w Writer) *Encoder { + return &Encoder{w: w, f: encoderFlags(w)} +} + +func (e *Encoder) Encode(v any) error { + t := reflect.TypeOf(v) + cache, _ := encoderCache.Load().(map[typeID]encodeFunc) + encode, _ := cache[makeTypeID(t)] + + if encode == nil { + encode = encodeFuncOf(t, make(encodeFuncCache)) + + newCache := make(map[typeID]encodeFunc, len(cache)+1) + newCache[makeTypeID(t)] = encode + maps.Copy(newCache, cache) + + encoderCache.Store(newCache) + } + + return encode(e.w, reflect.ValueOf(v), e.f) +} + +func (e *Encoder) Reset(w Writer) { + e.w = w + e.f = e.f.without(protocolFlags).with(encoderFlags(w)) +} + +func encoderFlags(w Writer) flags { + return flags(w.Protocol().Features() << featuresBitOffset) +} + +var encoderCache atomic.Value // map[typeID]encodeFunc + +type encodeFunc func(Writer, reflect.Value, flags) error + +type encodeFuncCache map[reflect.Type]encodeFunc + +func encodeFuncOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + f := seen[t] + if f != nil { + return f + } + switch t.Kind() { + case reflect.Bool: + f = encodeBool + case reflect.Int8: + f = encodeInt8 + case reflect.Int16: + f = encodeInt16 + case reflect.Int32: + f = encodeInt32 + case reflect.Int64, reflect.Int: + f = encodeInt64 + case reflect.Float32, reflect.Float64: + f = encodeFloat64 + case reflect.String: + f = encodeString + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + f = encodeBytes + } else { + f = encodeFuncSliceOf(t, seen) + } + case reflect.Map: + f = encodeFuncMapOf(t, seen) + case reflect.Struct: + f = encodeFuncStructOf(t, seen) + case reflect.Ptr: + f = encodeFuncPtrOf(t, seen) + default: + panic("type cannot be encoded in thrift: " + t.String()) + } + seen[t] = f + return f +} + +func encodeBool(w Writer, v reflect.Value, _ flags) error { + return w.WriteBool(v.Bool()) +} + +func encodeInt8(w Writer, v reflect.Value, _ flags) error { + return w.WriteInt8(int8(v.Int())) +} + +func encodeInt16(w Writer, v reflect.Value, _ flags) error { + return w.WriteInt16(int16(v.Int())) +} + +func encodeInt32(w Writer, v reflect.Value, _ flags) error { + return w.WriteInt32(int32(v.Int())) +} + +func encodeInt64(w Writer, v reflect.Value, _ flags) error { + return w.WriteInt64(v.Int()) +} + +func encodeFloat64(w Writer, v reflect.Value, _ flags) error { + return w.WriteFloat64(v.Float()) +} + +func encodeString(w Writer, v reflect.Value, _ flags) error { + return w.WriteString(v.String()) +} + +func encodeBytes(w Writer, v reflect.Value, _ flags) error { + return w.WriteBytes(v.Bytes()) +} + +func encodeFuncSliceOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + elem := t.Elem() + typ := TypeOf(elem) + enc := encodeFuncOf(elem, seen) + + return func(w Writer, v reflect.Value, flags flags) error { + n := v.Len() + if n > math.MaxInt32 { + return fmt.Errorf("slice length is too large to be represented in thrift: %d > max(int32)", n) + } + + err := w.WriteList(List{ + Size: int32(n), + Type: typ, + }) + if err != nil { + return err + } + + for i := range n { + if err := enc(w, v.Index(i), flags); err != nil { + return err + } + } + + return nil + } +} + +func encodeFuncMapOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + key, elem := t.Key(), t.Elem() + if elem.Size() == 0 { // map[?]struct{} + return encodeFuncMapAsSetOf(t, seen) + } + + keyType := TypeOf(key) + elemType := TypeOf(elem) + encodeKey := encodeFuncOf(key, seen) + encodeElem := encodeFuncOf(elem, seen) + + return func(w Writer, v reflect.Value, flags flags) error { + n := v.Len() + if n > math.MaxInt32 { + return fmt.Errorf("map length is too large to be represented in thrift: %d > max(int32)", n) + } + + err := w.WriteMap(Map{ + Size: int32(n), + Key: keyType, + Value: elemType, + }) + if err != nil { + return err + } + if n == 0 { // empty map + return nil + } + + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := encodeKey(w, iter.Key(), flags); err != nil { + return err + } + if err := encodeElem(w, iter.Value(), flags); err != nil { + return err + } + } + + return nil + } +} + +func encodeFuncMapAsSetOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + key := t.Key() + typ := TypeOf(key) + enc := encodeFuncOf(key, seen) + + return func(w Writer, v reflect.Value, flags flags) error { + n := v.Len() + if n > math.MaxInt32 { + return fmt.Errorf("map length is too large to be represented in thrift: %d > max(int32)", n) + } + + err := w.WriteSet(Set{ + Size: int32(n), + Type: typ, + }) + if err != nil { + return err + } + if n == 0 { // empty map + return nil + } + + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := enc(w, iter.Key(), flags); err != nil { + return err + } + } + + return nil + } +} + +type structEncoder struct { + fields []structEncoderField + union bool +} + +func dereference(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return v + } + v = v.Elem() + } + return v +} + +func isTrue(v reflect.Value) bool { + v = dereference(v) + return v.IsValid() && v.Kind() == reflect.Bool && v.Bool() +} + +func (enc *structEncoder) encode(w Writer, v reflect.Value, flags flags) error { + useDeltaEncoding := flags.have(useDeltaEncoding) + coalesceBoolFields := flags.have(coalesceBoolFields) + numFields := int16(0) + lastFieldID := int16(0) + +encodeFields: + for _, f := range enc.fields { + x := v + for _, i := range f.index { + if x.Kind() == reflect.Ptr { + x = x.Elem() + } + if x = x.Field(i); x.Kind() == reflect.Ptr { + if x.IsNil() { + continue encodeFields + } + } + } + + if !f.flags.have(required) && !f.flags.have(writeZero) && x.IsZero() { + continue encodeFields + } + + field := Field{ + ID: f.id, + Type: f.typ, + } + + if useDeltaEncoding { + if delta := field.ID - lastFieldID; delta <= 15 { + field.ID = delta + field.Delta = true + } + } + + skipValue := coalesceBoolFields && field.Type == BOOL + if skipValue && isTrue(x) == true { + field.Type = TRUE + } + + if err := w.WriteField(field); err != nil { + return err + } + + if !skipValue { + if err := f.encode(w, x, flags); err != nil { + return err + } + } + + numFields++ + lastFieldID = f.id + } + + if err := w.WriteField(Field{Type: STOP}); err != nil { + return err + } + + if numFields > 1 && enc.union { + return fmt.Errorf("thrift union had more than one field with a non-zero value (%d)", numFields) + } + + return nil +} + +func (enc *structEncoder) String() string { + if enc.union { + return "union" + } + return "struct" +} + +type structEncoderField struct { + index []int + id int16 + flags flags + typ Type + encode encodeFunc +} + +func encodeFuncStructOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + enc := &structEncoder{ + fields: make([]structEncoderField, 0, t.NumField()), + } + encode := enc.encode + seen[t] = encode + + forEachStructField(t, nil, func(f structField) { + if f.flags.have(union) { + enc.union = true + } else { + enc.fields = append(enc.fields, structEncoderField{ + index: f.index, + id: f.id, + flags: f.flags, + typ: TypeOf(f.typ), + encode: encodeFuncStructFieldOf(f, seen), + }) + } + }) + + slices.SortStableFunc(enc.fields, func(a, b structEncoderField) int { + return cmp.Compare(a.id, b.id) + }) + + for i := len(enc.fields) - 1; i > 0; i-- { + if enc.fields[i-1].id == enc.fields[i].id { + panic(fmt.Errorf("thrift struct field id %d is present multiple times", enc.fields[i].id)) + } + } + + return encode +} + +func encodeFuncStructFieldOf(f structField, seen encodeFuncCache) encodeFunc { + if f.flags.have(enum) { + switch f.typ.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt32 + } + } + return encodeFuncOf(f.typ, seen) +} + +func encodeFuncPtrOf(t reflect.Type, seen encodeFuncCache) encodeFunc { + typ := t.Elem() + enc := encodeFuncOf(typ, seen) + zero := reflect.Zero(typ) + + return func(w Writer, v reflect.Value, f flags) error { + if v.IsNil() { + v = zero + } else { + v = v.Elem() + } + return enc(w, v, f) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/error.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/error.go new file mode 100644 index 00000000000..ceeb1ba09b7 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/error.go @@ -0,0 +1,111 @@ +package thrift + +import ( + "errors" + "fmt" + "io" + "strings" +) + +type MissingField struct { + Field Field +} + +func (e *MissingField) Error() string { + return fmt.Sprintf("missing required field: %s", e.Field) +} + +type TypeMismatch struct { + Expect Type + Found Type + item string +} + +func (e *TypeMismatch) Error() string { + return fmt.Sprintf("%s type mismatch: expected %s but found %s", e.item, e.Expect, e.Found) +} + +type decodeError struct { + base error + path []error +} + +func (e *decodeError) Error() string { + s := strings.Builder{} + s.Grow(256) + s.WriteString("decoding thrift payload: ") + + if len(e.path) != 0 { + n := len(e.path) - 1 + for i := n; i >= 0; i-- { + if i < n { + s.WriteString(" → ") + } + s.WriteString(e.path[i].Error()) + } + s.WriteString(": ") + } + + s.WriteString(e.base.Error()) + return s.String() +} + +func (e *decodeError) Unwrap() error { return e.base } + +func with(base, elem error) error { + if errors.Is(base, io.EOF) { + return base + } + e, _ := base.(*decodeError) + if e == nil { + e = &decodeError{base: base} + } + e.path = append(e.path, elem) + return e +} + +type decodeErrorField struct { + cause Field +} + +func (d *decodeErrorField) Error() string { + return d.cause.String() +} + +type decodeErrorList struct { + cause List + index int +} + +func (d *decodeErrorList) Error() string { + return fmt.Sprintf("%d/%d:%s", d.index, d.cause.Size, d.cause) +} + +type decodeErrorSet struct { + cause Set + index int +} + +func (d *decodeErrorSet) Error() string { + return fmt.Sprintf("%d/%d:%s", d.index, d.cause.Size, d.cause) +} + +type decodeErrorMap struct { + cause Map + index int +} + +func (d *decodeErrorMap) Error() string { + return fmt.Sprintf("%d/%d:%s", d.index, d.cause.Size, d.cause) +} + +func dontExpectEOF(err error) error { + switch err { + case nil: + return nil + case io.EOF: + return io.ErrUnexpectedEOF + default: + return err + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/protocol.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/protocol.go new file mode 100644 index 00000000000..7c31338cf6f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/protocol.go @@ -0,0 +1,73 @@ +package thrift + +import ( + "io" +) + +// Features is a bitset describing the thrift encoding features supported by +// protocol implementations. +type Features uint + +const ( + // DeltaEncoding is advertised by protocols that allow encoders to apply + // delta encoding on struct fields. + UseDeltaEncoding Features = 1 << iota + + // CoalesceBoolFields is advertised by protocols that allow encoders to + // coalesce boolean values into field types. + CoalesceBoolFields +) + +// The Protocol interface abstracts the creation of low-level thrift readers and +// writers implementing the various protocols that the encoding supports. +// +// Protocol instances must be safe to use concurrently from multiple gourintes. +// However, the readers and writer that they instantiates are intended to be +// used by a single goroutine. +type Protocol interface { + NewReader(r io.Reader) Reader + NewWriter(w io.Writer) Writer + Features() Features +} + +// Reader represents a low-level reader of values encoded according to one of +// the thrift protocols. +type Reader interface { + Protocol() Protocol + Reader() io.Reader + ReadBool() (bool, error) + ReadInt8() (int8, error) + ReadInt16() (int16, error) + ReadInt32() (int32, error) + ReadInt64() (int64, error) + ReadFloat64() (float64, error) + ReadBytes() ([]byte, error) + ReadString() (string, error) + ReadLength() (int, error) + ReadMessage() (Message, error) + ReadField() (Field, error) + ReadList() (List, error) + ReadSet() (Set, error) + ReadMap() (Map, error) +} + +// Writer represents a low-level writer of values encoded according to one of +// the thrift protocols. +type Writer interface { + Protocol() Protocol + Writer() io.Writer + WriteBool(bool) error + WriteInt8(int8) error + WriteInt16(int16) error + WriteInt32(int32) error + WriteInt64(int64) error + WriteFloat64(float64) error + WriteBytes([]byte) error + WriteString(string) error + WriteLength(int) error + WriteMessage(Message) error + WriteField(Field) error + WriteList(List) error + WriteSet(Set) error + WriteMap(Map) error +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/struct.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/struct.go new file mode 100644 index 00000000000..03698b45016 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/struct.go @@ -0,0 +1,143 @@ +package thrift + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type flags int16 + +const ( + enum flags = 1 << 0 + union flags = 1 << 1 + required flags = 1 << 2 + optional flags = 1 << 3 + strict flags = 1 << 4 + writeZero flags = 1 << 5 + + featuresBitOffset = 8 + useDeltaEncoding = flags(UseDeltaEncoding) << featuresBitOffset + coalesceBoolFields = flags(CoalesceBoolFields) << featuresBitOffset + + structFlags flags = enum | union | required | optional | writeZero + encodeFlags flags = strict | protocolFlags + decodeFlags flags = strict | protocolFlags + protocolFlags flags = useDeltaEncoding | coalesceBoolFields +) + +func (f flags) have(x flags) bool { + return (f & x) == x +} + +func (f flags) only(x flags) flags { + return f & x +} + +func (f flags) with(x flags) flags { + return f | x +} + +func (f flags) without(x flags) flags { + return f & ^x +} + +type structField struct { + typ reflect.Type + index []int + id int16 + flags flags +} + +func forEachStructField(t reflect.Type, index []int, do func(structField)) { + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + if f.PkgPath != "" && !f.Anonymous { // unexported + continue + } + + fieldIndex := append(index, i) + fieldIndex = fieldIndex[:len(fieldIndex):len(fieldIndex)] + + if f.Anonymous { + fieldType := f.Type + + for fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + } + + if fieldType.Kind() == reflect.Struct { + forEachStructField(fieldType, fieldIndex, do) + continue + } + } + + tag := f.Tag.Get("thrift") + if tag == "" { + continue + } + tags := strings.Split(tag, ",") + flags := flags(0) + + for _, opt := range tags[1:] { + switch opt { + case "enum": + flags = flags.with(enum) + case "union": + flags = flags.with(union) + case "required": + flags = flags.with(required) + case "optional": + flags = flags.with(optional) + case "writezero": + flags = flags.with(writeZero) + default: + panic(fmt.Errorf("thrift struct field contains an unknown tag option %q in `thrift:\"%s\"`", opt, tag)) + } + } + + if flags.have(optional | required) { + panic(fmt.Errorf("thrift struct field cannot be both optional and required in `thrift:\"%s\"`", tag)) + } + + if flags.have(union) { + if f.Type.Kind() != reflect.Interface { + panic(fmt.Errorf("thrift union tag found on a field which is not an interface type `thrift:\"%s\"`", tag)) + } + + if tags[0] != "" { + panic(fmt.Errorf("invalid thrift field id on union field `thrift:\"%s\"`", tag)) + } + + do(structField{ + typ: f.Type, + index: fieldIndex, + flags: flags, + }) + } else { + if flags.have(enum) { + switch f.Type.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + panic(fmt.Errorf("thrift enum tag found on a field which is not an integer type `thrift:\"%s\"`", tag)) + } + } + + if id, err := strconv.ParseInt(tags[0], 10, 16); err != nil { + panic(fmt.Errorf("invalid thrift field id found in struct tag `thrift:\"%s\"`: %w", tag, err)) + } else if id <= 0 { + panic(fmt.Errorf("invalid thrift field id found in struct tag `thrift:\"%s\"`: %d <= 0", tag, id)) + } else { + do(structField{ + typ: f.Type, + index: fieldIndex, + id: int16(id), + flags: flags, + }) + } + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/thrift.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/thrift.go new file mode 100644 index 00000000000..b3682e4d630 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/thrift.go @@ -0,0 +1,164 @@ +package thrift + +import ( + "fmt" + "reflect" +) + +type Message struct { + Type MessageType + Name string + SeqID int32 +} + +type MessageType int8 + +const ( + Call MessageType = iota + Reply + Exception + Oneway +) + +func (m MessageType) String() string { + switch m { + case Call: + return "Call" + case Reply: + return "Reply" + case Exception: + return "Exception" + case Oneway: + return "Oneway" + default: + return "?" + } +} + +type Field struct { + ID int16 + Type Type + Delta bool // whether the field id is a delta +} + +func (f Field) String() string { + return fmt.Sprintf("%d:FIELD<%s>", f.ID, f.Type) +} + +type Type int8 + +const ( + STOP Type = iota + TRUE + FALSE + I8 + I16 + I32 + I64 + DOUBLE + BINARY + LIST + SET + MAP + STRUCT + BOOL = FALSE +) + +func (t Type) String() string { + switch t { + case STOP: + return "STOP" + case TRUE: + return "TRUE" + case BOOL: + return "BOOL" + case I8: + return "I8" + case I16: + return "I16" + case I32: + return "I32" + case I64: + return "I64" + case DOUBLE: + return "DOUBLE" + case BINARY: + return "BINARY" + case LIST: + return "LIST" + case SET: + return "SET" + case MAP: + return "MAP" + case STRUCT: + return "STRUCT" + default: + return "?" + } +} + +func (t Type) GoString() string { + return "thrift." + t.String() +} + +type List struct { + Size int32 + Type Type +} + +func (l List) String() string { + return fmt.Sprintf("LIST<%s>", l.Type) +} + +type Set List + +func (s Set) String() string { + return fmt.Sprintf("SET<%s>", s.Type) +} + +type Map struct { + Size int32 + Key Type + Value Type +} + +func (m Map) String() string { + return fmt.Sprintf("MAP<%s,%s>", m.Key, m.Value) +} + +func TypeOf(t reflect.Type) Type { + switch t.Kind() { + case reflect.Bool: + return BOOL + case reflect.Int8, reflect.Uint8: + return I8 + case reflect.Int16, reflect.Uint16: + return I16 + case reflect.Int32, reflect.Uint32: + return I32 + case reflect.Int64, reflect.Uint64, reflect.Int, reflect.Uint, reflect.Uintptr: + return I64 + case reflect.Float32, reflect.Float64: + return DOUBLE + case reflect.String: + return BINARY + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { // []byte + return BINARY + } else { + return LIST + } + case reflect.Map: + if t.Elem().Size() == 0 { + return SET + } else { + return MAP + } + case reflect.Struct: + return STRUCT + case reflect.Ptr: + return TypeOf(t.Elem()) + default: + panic("type cannot be represented in thrift: " + t.String()) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/thrift/unsafe.go b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/unsafe.go new file mode 100644 index 00000000000..b27c6489d8d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/thrift/unsafe.go @@ -0,0 +1,20 @@ +package thrift + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +type typeID struct{ ptr unsafe.Pointer } + +func makeTypeID(t reflect.Type) typeID { + return typeID{ + ptr: (*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1], + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/values.go b/vendor/github.com/parquet-go/parquet-go/encoding/values.go new file mode 100644 index 00000000000..9e82eb7cc0a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/values.go @@ -0,0 +1,276 @@ +package encoding + +import ( + "fmt" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" +) + +type Kind int32 + +const ( + Undefined Kind = iota + Boolean + Int32 + Int64 + Int96 + Float + Double + ByteArray + FixedLenByteArray +) + +func (kind Kind) String() string { + switch kind { + case Boolean: + return "BOOLEAN" + case Int32: + return "INT32" + case Int64: + return "INT64" + case Int96: + return "INT96" + case Float: + return "FLOAT" + case Double: + return "DOUBLE" + case ByteArray: + return "BYTE_ARRAY" + case FixedLenByteArray: + return "FIXED_LEN_BYTE_ARRAY" + default: + return "UNDEFINED" + } +} + +type Values struct { + kind Kind + size int32 + data []byte + offsets []uint32 +} + +func (v *Values) assertKind(kind Kind) { + if kind != v.kind { + panic(fmt.Sprintf("cannot convert values of type %s to type %s", v.kind, kind)) + } +} + +func (v *Values) assertSize(size int) { + if size != int(v.size) { + panic(fmt.Sprintf("cannot convert values of size %d to size %d", v.size, size)) + } +} + +func (v *Values) Size() int64 { + return int64(len(v.data)) +} + +func (v *Values) Kind() Kind { + return v.kind +} + +func (v *Values) Data() (data []byte, offsets []uint32) { + return v.data, v.offsets +} + +func (v *Values) Boolean() []byte { + v.assertKind(Boolean) + return v.data +} + +func (v *Values) Int32() []int32 { + v.assertKind(Int32) + return unsafecast.Slice[int32](v.data) +} + +func (v *Values) Int64() []int64 { + v.assertKind(Int64) + return unsafecast.Slice[int64](v.data) +} + +func (v *Values) Int96() []deprecated.Int96 { + v.assertKind(Int96) + return unsafecast.Slice[deprecated.Int96](v.data) +} + +func (v *Values) Float() []float32 { + v.assertKind(Float) + return unsafecast.Slice[float32](v.data) +} + +func (v *Values) Double() []float64 { + v.assertKind(Double) + return unsafecast.Slice[float64](v.data) +} + +func (v *Values) ByteArray() (data []byte, offsets []uint32) { + v.assertKind(ByteArray) + return v.data, v.offsets +} + +func (v *Values) FixedLenByteArray() (data []byte, size int) { + v.assertKind(FixedLenByteArray) + return v.data, int(v.size) +} + +func (v *Values) Uint32() []uint32 { + v.assertKind(Int32) + return unsafecast.Slice[uint32](v.data) +} + +func (v *Values) Uint64() []uint64 { + v.assertKind(Int64) + return unsafecast.Slice[uint64](v.data) +} + +func (v *Values) Uint128() [][16]byte { + v.assertKind(FixedLenByteArray) + v.assertSize(16) + return unsafecast.Slice[[16]byte](v.data) +} + +func makeValues[T any](kind Kind, values []T) Values { + return Values{kind: kind, data: unsafecast.Slice[byte](values)} +} + +func BooleanValues(values []byte) Values { + return makeValues(Boolean, values) +} + +func Int32Values(values []int32) Values { + return makeValues(Int32, values) +} + +func Int64Values(values []int64) Values { + return makeValues(Int64, values) +} + +func Int96Values(values []deprecated.Int96) Values { + return makeValues(Int96, values) +} + +func FloatValues(values []float32) Values { + return makeValues(Float, values) +} + +func DoubleValues(values []float64) Values { + return makeValues(Double, values) +} + +func ByteArrayValues(values []byte, offsets []uint32) Values { + return Values{kind: ByteArray, data: values, offsets: offsets} +} + +func FixedLenByteArrayValues(values []byte, size int) Values { + return Values{kind: FixedLenByteArray, size: int32(size), data: values} +} + +func Uint32Values(values []uint32) Values { + return Int32Values(unsafecast.Slice[int32](values)) +} + +func Uint64Values(values []uint64) Values { + return Int64Values(unsafecast.Slice[int64](values)) +} + +func Uint128Values(values [][16]byte) Values { + return FixedLenByteArrayValues(unsafecast.Slice[byte](values), 16) +} + +func Int32ValuesFromBytes(values []byte) Values { + return Values{kind: Int32, data: values} +} + +func Int64ValuesFromBytes(values []byte) Values { + return Values{kind: Int64, data: values} +} + +func Int96ValuesFromBytes(values []byte) Values { + return Values{kind: Int96, data: values} +} + +func FloatValuesFromBytes(values []byte) Values { + return Values{kind: Float, data: values} +} + +func DoubleValuesFromBytes(values []byte) Values { + return Values{kind: Double, data: values} +} + +func EncodeBoolean(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeBoolean(dst, src.Boolean()) +} + +func EncodeInt32(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeInt32(dst, src.Int32()) +} + +func EncodeInt64(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeInt64(dst, src.Int64()) +} + +func EncodeInt96(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeInt96(dst, src.Int96()) +} + +func EncodeFloat(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeFloat(dst, src.Float()) +} + +func EncodeDouble(dst []byte, src Values, enc Encoding) ([]byte, error) { + return enc.EncodeDouble(dst, src.Double()) +} + +func EncodeByteArray(dst []byte, src Values, enc Encoding) ([]byte, error) { + values, offsets := src.ByteArray() + return enc.EncodeByteArray(dst, values, offsets) +} + +func EncodeFixedLenByteArray(dst []byte, src Values, enc Encoding) ([]byte, error) { + data, size := src.FixedLenByteArray() + return enc.EncodeFixedLenByteArray(dst, data, size) +} + +func DecodeBoolean(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeBoolean(dst.Boolean(), src) + return BooleanValues(values), err +} + +func DecodeInt32(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeInt32(dst.Int32(), src) + return Int32Values(values), err +} + +func DecodeInt64(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeInt64(dst.Int64(), src) + return Int64Values(values), err +} + +func DecodeInt96(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeInt96(dst.Int96(), src) + return Int96Values(values), err +} + +func DecodeFloat(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeFloat(dst.Float(), src) + return FloatValues(values), err +} + +func DecodeDouble(dst Values, src []byte, enc Encoding) (Values, error) { + values, err := enc.DecodeDouble(dst.Double(), src) + return DoubleValues(values), err +} + +func DecodeByteArray(dst Values, src []byte, enc Encoding) (Values, error) { + values, offsets := dst.ByteArray() + values, offsets, err := enc.DecodeByteArray(values, src, offsets) + return ByteArrayValues(values, offsets), err +} + +func DecodeFixedLenByteArray(dst Values, src []byte, enc Encoding) (Values, error) { + data, size := dst.FixedLenByteArray() + values, err := enc.DecodeFixedLenByteArray(data, src, size) + return FixedLenByteArrayValues(values, size), err +} diff --git a/vendor/github.com/parquet-go/parquet-go/errors.go b/vendor/github.com/parquet-go/parquet-go/errors.go new file mode 100644 index 00000000000..651fe740c88 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/errors.go @@ -0,0 +1,87 @@ +package parquet + +import ( + "errors" + "fmt" +) + +var ( + // ErrCorrupted is an error returned by the Err method of ColumnPages + // instances when they encountered a mismatch between the CRC checksum + // recorded in a page header and the one computed while reading the page + // data. + ErrCorrupted = errors.New("corrupted parquet page") + + // ErrMissingRootColumn is an error returned when opening an invalid parquet + // file which does not have a root column. + ErrMissingRootColumn = errors.New("parquet file is missing a root column") + + // ErrRowGroupSchemaMissing is an error returned when attempting to write a + // row group but the source has no schema. + ErrRowGroupSchemaMissing = errors.New("cannot write rows to a row group which has no schema") + + // ErrRowGroupSchemaMismatch is an error returned when attempting to write a + // row group but the source and destination schemas differ. + ErrRowGroupSchemaMismatch = errors.New("cannot write row groups with mismatching schemas") + + // ErrRowGroupSortingColumnsMismatch is an error returned when attempting to + // write a row group but the sorting columns differ in the source and + // destination. + ErrRowGroupSortingColumnsMismatch = errors.New("cannot write row groups with mismatching sorting columns") + + // ErrSeekOutOfRange is an error returned when seeking to a row index which + // is less than the first row of a page. + ErrSeekOutOfRange = errors.New("seek to row index out of page range") + + // ErrUnexpectedDictionaryPage is an error returned when a page reader + // encounters a dictionary page after the first page, or in a column + // which does not use a dictionary encoding. + ErrUnexpectedDictionaryPage = errors.New("unexpected dictionary page") + + // ErrMissingPageHeader is an error returned when a page reader encounters + // a malformed page header which is missing page-type-specific information. + ErrMissingPageHeader = errors.New("missing page header") + + // ErrUnexpectedRepetitionLevels is an error returned when attempting to + // decode repetition levels into a page which is not part of a repeated + // column. + ErrUnexpectedRepetitionLevels = errors.New("unexpected repetition levels") + + // ErrUnexpectedDefinitionLevels is an error returned when attempting to + // decode definition levels into a page which is part of a required column. + ErrUnexpectedDefinitionLevels = errors.New("unexpected definition levels") + + // ErrTooManyRowGroups is returned when attempting to generate a parquet + // file with more than MaxRowGroups row groups. + ErrTooManyRowGroups = errors.New("the limit of 32767 row groups has been reached") + + // ErrConversion is used to indicate that a conversion betwen two values + // cannot be done because there are no rules to translate between their + // physical types. + ErrInvalidConversion = errors.New("invalid conversion between parquet values") + + // ErrMalformedRepetitionLevel is returned when a page reader encounters + // a repetition level which does not start at the beginning of a row. + ErrMalformedRepetitionLevel = errors.New("parquet-go encountered a malformed data page which does not start at the beginning of a row") +) + +type errno int + +const ( + ok errno = iota + indexOutOfBounds +) + +func (e errno) check() { + switch e { + case ok: + case indexOutOfBounds: + panic("index out of bounds") + default: + panic("BUG: unknown error code") + } +} + +func errRowIndexOutOfBounds(rowIndex, rowCount int64) error { + return fmt.Errorf("row index out of bounds: %d/%d", rowIndex, rowCount) +} diff --git a/vendor/github.com/parquet-go/parquet-go/file.go b/vendor/github.com/parquet-go/parquet-go/file.go new file mode 100644 index 00000000000..c4da074da31 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/file.go @@ -0,0 +1,1188 @@ +package parquet + +import ( + "bufio" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "slices" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/parquet-go/parquet-go/encoding/thrift" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" +) + +const ( + defaultDictBufferSize = 8192 + defaultReadBufferSize = 4096 +) + +// File represents a parquet file. The layout of a Parquet file can be found +// here: https://github.com/apache/parquet-format#file-format +type File struct { + metadata format.FileMetaData + protocol thrift.CompactProtocol + reader io.ReaderAt + size int64 + schema *Schema + root *Column + columnIndexes []format.ColumnIndex + offsetIndexes []format.OffsetIndex + rowGroups []RowGroup + config *FileConfig +} + +type FileView interface { + Metadata() *format.FileMetaData + Schema() *Schema + NumRows() int64 + Lookup(key string) (string, bool) + Size() int64 + Root() *Column + RowGroups() []RowGroup + ColumnIndexes() []format.ColumnIndex + OffsetIndexes() []format.OffsetIndex +} + +// OpenFile opens a parquet file and reads the content between offset 0 and the given +// size in r. +// +// Only the parquet magic bytes and footer are read, column chunks and other +// parts of the file are left untouched; this means that successfully opening +// a file does not validate that the pages have valid checksums. +func OpenFile(r io.ReaderAt, size int64, options ...FileOption) (*File, error) { + c, err := NewFileConfig(options...) + if err != nil { + return nil, err + } + f := &File{reader: r, size: size, config: c} + + if !c.SkipMagicBytes { + var b [4]byte + if _, err := readAt(r, b[:4], 0); err != nil { + return nil, fmt.Errorf("reading magic header of parquet file: %w", err) + } + if string(b[:4]) != "PAR1" { + return nil, fmt.Errorf("invalid magic header of parquet file: %q", b[:4]) + } + } + + if cast, ok := f.reader.(interface{ SetMagicFooterSection(offset, length int64) }); ok { + cast.SetMagicFooterSection(size-8, 8) + } + + optimisticRead := c.OptimisticRead + optimisticFooterSize := min(int64(c.ReadBufferSize), size) + if !optimisticRead || optimisticFooterSize < 8 { + optimisticFooterSize = 8 + } + optimisticFooterData := make([]byte, optimisticFooterSize) + if optimisticRead { + f.reader = &optimisticFileReaderAt{ + reader: f.reader, + offset: size - optimisticFooterSize, + footer: optimisticFooterData, + } + } + + if n, err := readAt(r, optimisticFooterData, size-optimisticFooterSize); n != len(optimisticFooterData) { + return nil, fmt.Errorf("reading magic footer of parquet file: %w (read: %d)", err, n) + } + optimisticFooterSize -= 8 + b := optimisticFooterData[optimisticFooterSize:] + if string(b[4:]) != "PAR1" { + return nil, fmt.Errorf("invalid magic footer of parquet file: %q", b[4:]) + } + + footerSize := int64(binary.LittleEndian.Uint32(b[:4])) + footerData := []byte(nil) + + if footerSize <= optimisticFooterSize { + footerData = optimisticFooterData[optimisticFooterSize-footerSize : optimisticFooterSize] + } else { + footerData = make([]byte, footerSize) + if cast, ok := f.reader.(interface{ SetFooterSection(offset, length int64) }); ok { + cast.SetFooterSection(size-(footerSize+8), footerSize) + } + if _, err := f.readAt(footerData, size-(footerSize+8)); err != nil { + return nil, fmt.Errorf("reading footer of parquet file: %w", err) + } + } + + if err := thrift.Unmarshal(&f.protocol, footerData, &f.metadata); err != nil { + return nil, fmt.Errorf("reading parquet file metadata: %w", err) + } + if len(f.metadata.Schema) == 0 { + return nil, ErrMissingRootColumn + } + + if !c.SkipPageIndex { + if f.columnIndexes, f.offsetIndexes, err = f.ReadPageIndex(); err != nil { + return nil, fmt.Errorf("reading page index of parquet file: %w", err) + } + } + + if f.root, err = openColumns(f, &f.metadata, f.columnIndexes, f.offsetIndexes); err != nil { + return nil, fmt.Errorf("opening columns of parquet file: %w", err) + } + + if c.Schema != nil { + f.schema = c.Schema + } else { + f.schema = NewSchema(f.root.Name(), f.root) + } + columns := makeLeafColumns(f.root) + rowGroups := makeFileRowGroups(f, columns) + f.rowGroups = makeRowGroups(rowGroups) + + if !c.SkipBloomFilters { + section := io.NewSectionReader(r, 0, size) + rbuf, rbufpool := getBufioReader(section, c.ReadBufferSize) + defer putBufioReader(rbuf, rbufpool) + + header := format.BloomFilterHeader{} + compact := thrift.CompactProtocol{} + decoder := thrift.NewDecoder(compact.NewReader(rbuf)) + + for i := range rowGroups { + g := &rowGroups[i] + + for j := range g.columns { + c := g.columns[j].(*FileColumnChunk) + + if offset := c.chunk.MetaData.BloomFilterOffset; offset > 0 { + section.Seek(offset, io.SeekStart) + rbuf.Reset(section) + + header = format.BloomFilterHeader{} + if err := decoder.Decode(&header); err != nil { + return nil, fmt.Errorf("decoding bloom filter header: %w", err) + } + + offset, _ = section.Seek(0, io.SeekCurrent) + offset -= int64(rbuf.Buffered()) + + if cast, ok := r.(interface{ SetBloomFilterSection(offset, length int64) }); ok { + bloomFilterOffset := c.chunk.MetaData.BloomFilterOffset + bloomFilterLength := (offset - bloomFilterOffset) + int64(header.NumBytes) + cast.SetBloomFilterSection(bloomFilterOffset, bloomFilterLength) + } + + c.bloomFilter.Store(newBloomFilter(r, offset, &header)) + } + } + } + } + + sortKeyValueMetadata(f.metadata.KeyValueMetadata) + f.reader = r // restore in case an optimistic reader was used + return f, nil +} + +// ReadPageIndex reads the page index section of the parquet file f. +// +// If the file did not contain a page index, the method returns two empty slices +// and a nil error. +// +// Only leaf columns have indexes, the returned indexes are arranged using the +// following layout: +// +// ------------------ +// | col 0: chunk 0 | +// ------------------ +// | col 1: chunk 0 | +// ------------------ +// | ... | +// ------------------ +// | col 0: chunk 1 | +// ------------------ +// | col 1: chunk 1 | +// ------------------ +// | ... | +// ------------------ +// +// This method is useful in combination with the SkipPageIndex option to delay +// reading the page index section until after the file was opened. Note that in +// this case the page index is not cached within the file, programs are expected +// to make use of independently from the parquet package. +func (f *File) ReadPageIndex() ([]format.ColumnIndex, []format.OffsetIndex, error) { + if len(f.metadata.RowGroups) == 0 { + return nil, nil, nil + } + + columnIndexOffset := f.metadata.RowGroups[0].Columns[0].ColumnIndexOffset + offsetIndexOffset := f.metadata.RowGroups[0].Columns[0].OffsetIndexOffset + columnIndexLength := int64(0) + offsetIndexLength := int64(0) + + forEachColumnChunk := func(do func(int, int, *format.ColumnChunk) error) error { + for i := range f.metadata.RowGroups { + for j := range f.metadata.RowGroups[i].Columns { + c := &f.metadata.RowGroups[i].Columns[j] + if err := do(i, j, c); err != nil { + return err + } + } + } + return nil + } + + forEachColumnChunk(func(_, _ int, c *format.ColumnChunk) error { + columnIndexLength += int64(c.ColumnIndexLength) + offsetIndexLength += int64(c.OffsetIndexLength) + return nil + }) + + if columnIndexLength == 0 && offsetIndexLength == 0 { + return nil, nil, nil + } + + numRowGroups := len(f.metadata.RowGroups) + numColumns := len(f.metadata.RowGroups[0].Columns) + numColumnChunks := numRowGroups * numColumns + + columnIndexes := make([]format.ColumnIndex, numColumnChunks) + offsetIndexes := make([]format.OffsetIndex, numColumnChunks) + indexBuffer := make([]byte, max(int(columnIndexLength), int(offsetIndexLength))) + + if columnIndexOffset > 0 { + columnIndexData := indexBuffer[:columnIndexLength] + + if cast, ok := f.reader.(interface{ SetColumnIndexSection(offset, length int64) }); ok { + cast.SetColumnIndexSection(columnIndexOffset, columnIndexLength) + } + if _, err := f.readAt(columnIndexData, columnIndexOffset); err != nil { + return nil, nil, fmt.Errorf("reading %d bytes column index at offset %d: %w", columnIndexLength, columnIndexOffset, err) + } + + err := forEachColumnChunk(func(i, j int, c *format.ColumnChunk) error { + // Some parquet files are missing the column index on some columns. + // + // An example of this file is testdata/alltypes_tiny_pages_plain.parquet + // which was added in https://github.com/apache/parquet-testing/pull/24. + if c.ColumnIndexOffset > 0 { + offset := c.ColumnIndexOffset - columnIndexOffset + length := int64(c.ColumnIndexLength) + buffer := columnIndexData[offset : offset+length] + if err := thrift.Unmarshal(&f.protocol, buffer, &columnIndexes[(i*numColumns)+j]); err != nil { + return fmt.Errorf("decoding column index: rowGroup=%d columnChunk=%d/%d: %w", i, j, numColumns, err) + } + } + return nil + }) + if err != nil { + return nil, nil, err + } + } + + if offsetIndexOffset > 0 { + offsetIndexData := indexBuffer[:offsetIndexLength] + + if cast, ok := f.reader.(interface{ SetOffsetIndexSection(offset, length int64) }); ok { + cast.SetOffsetIndexSection(offsetIndexOffset, offsetIndexLength) + } + if _, err := f.readAt(offsetIndexData, offsetIndexOffset); err != nil { + return nil, nil, fmt.Errorf("reading %d bytes offset index at offset %d: %w", offsetIndexLength, offsetIndexOffset, err) + } + + err := forEachColumnChunk(func(i, j int, c *format.ColumnChunk) error { + if c.OffsetIndexOffset > 0 { + offset := c.OffsetIndexOffset - offsetIndexOffset + length := int64(c.OffsetIndexLength) + buffer := offsetIndexData[offset : offset+length] + if err := thrift.Unmarshal(&f.protocol, buffer, &offsetIndexes[(i*numColumns)+j]); err != nil { + return fmt.Errorf("decoding column index: rowGroup=%d columnChunk=%d/%d: %w", i, j, numColumns, err) + } + } + return nil + }) + if err != nil { + return nil, nil, err + } + } + + return columnIndexes, offsetIndexes, nil +} + +// NumRows returns the number of rows in the file. +func (f *File) NumRows() int64 { return f.metadata.NumRows } + +// RowGroups returns the list of row groups in the file. +// +// Elements of the returned slice are guaranteed to be of type *FileRowGroup. +func (f *File) RowGroups() []RowGroup { return f.rowGroups } + +// Root returns the root column of f. +func (f *File) Root() *Column { return f.root } + +// Schema returns the schema of f. +func (f *File) Schema() *Schema { return f.schema } + +// Metadata returns the metadata of f. +func (f *File) Metadata() *format.FileMetaData { return &f.metadata } + +// Size returns the size of f (in bytes). +func (f *File) Size() int64 { return f.size } + +// ReadAt reads bytes into b from f at the given offset. +// +// The method satisfies the io.ReaderAt interface. +func (f *File) ReadAt(b []byte, off int64) (int, error) { + if off < 0 || off >= f.size { + return 0, io.EOF + } + + if limit := f.size - off; limit < int64(len(b)) { + n, err := f.readAt(b[:limit], off) + if err == nil { + err = io.EOF + } + return n, err + } + + return f.readAt(b, off) +} + +// ColumnIndexes returns the page index of the parquet file f. +// +// If the file did not contain a column index, the method returns an empty slice. +func (f *File) ColumnIndexes() []format.ColumnIndex { return f.columnIndexes } + +// OffsetIndexes returns the page index of the parquet file f. +// +// If the file did not contain an offset index, the method returns an empty +// slice. +func (f *File) OffsetIndexes() []format.OffsetIndex { return f.offsetIndexes } + +// Lookup returns the value associated with the given key in the file key/value +// metadata. +// +// The ok boolean will be true if the key was found, false otherwise. +func (f *File) Lookup(key string) (value string, ok bool) { + return lookupKeyValueMetadata(f.metadata.KeyValueMetadata, key) +} + +func (f *File) hasIndexes() bool { + return f.columnIndexes != nil && f.offsetIndexes != nil +} + +var _ io.ReaderAt = (*File)(nil) + +func sortKeyValueMetadata(keyValueMetadata []format.KeyValue) { + slices.SortFunc(keyValueMetadata, func(a, b format.KeyValue) int { + if cmp := strings.Compare(a.Key, b.Key); cmp != 0 { + return cmp + } + return strings.Compare(a.Value, b.Value) + }) +} + +func lookupKeyValueMetadata(keyValueMetadata []format.KeyValue, key string) (value string, ok bool) { + i, found := slices.BinarySearchFunc(keyValueMetadata, key, func(kv format.KeyValue, key string) int { + return strings.Compare(kv.Key, key) + }) + if found { + return keyValueMetadata[i].Value, true + } + return "", false +} + +// FileRowGroup is an implementation of the RowGroup interface on parquet files +// returned by OpenFile. +type FileRowGroup struct { + file *File + rowGroup *format.RowGroup + columns []ColumnChunk + sorting []SortingColumn +} + +func (g *FileRowGroup) init(file *File, columns []*Column, rowGroup *format.RowGroup) { + g.file = file + g.rowGroup = rowGroup + g.columns = make([]ColumnChunk, len(rowGroup.Columns)) + g.sorting = make([]SortingColumn, len(rowGroup.SortingColumns)) + fileColumnChunks := make([]FileColumnChunk, len(rowGroup.Columns)) + fileColumnIndexes := make([]FileColumnIndex, len(rowGroup.Columns)) + fileOffsetIndexes := make([]FileOffsetIndex, len(rowGroup.Columns)) + + for i := range g.columns { + fileColumnChunks[i] = FileColumnChunk{ + file: file, + column: columns[i], + rowGroup: rowGroup, + chunk: &rowGroup.Columns[i], + } + + if file.hasIndexes() { + j := (int(rowGroup.Ordinal) * len(columns)) + i + + fileColumnIndexes[i] = FileColumnIndex{index: &file.columnIndexes[j], kind: columns[i].Type().Kind()} + fileOffsetIndexes[i] = FileOffsetIndex{index: &file.offsetIndexes[j]} + + fileColumnChunks[i].columnIndex.Store(&fileColumnIndexes[i]) + fileColumnChunks[i].offsetIndex.Store(&fileOffsetIndexes[i]) + } + + g.columns[i] = &fileColumnChunks[i] + } + + for i := range g.sorting { + g.sorting[i] = &fileSortingColumn{ + column: columns[rowGroup.SortingColumns[i].ColumnIdx], + descending: rowGroup.SortingColumns[i].Descending, + nullsFirst: rowGroup.SortingColumns[i].NullsFirst, + } + } +} + +// File returns the file that this row group belongs to. +func (g *FileRowGroup) File() *File { return g.file } + +// Schema returns the schema of the row group. +func (g *FileRowGroup) Schema() *Schema { return g.file.schema } + +// NumRows returns the number of rows in the row group. +func (g *FileRowGroup) NumRows() int64 { return g.rowGroup.NumRows } + +// ColumnChunks returns the list of column chunks in the row group. +// +// Elements of the returned slice are guaranteed to be of type *FileColumnChunk. +func (g *FileRowGroup) ColumnChunks() []ColumnChunk { return g.columns } + +// SortingColumns returns the list of sorting columns in the row group. +func (g *FileRowGroup) SortingColumns() []SortingColumn { return g.sorting } + +// Rows returns a row reader for the row group. +func (g *FileRowGroup) Rows() Rows { + rowGroup := RowGroup(g) + if g.file.config.ReadMode == ReadModeAsync { + rowGroup = AsyncRowGroup(rowGroup) + } + return NewRowGroupRowReader(rowGroup) +} + +type fileSortingColumn struct { + column *Column + descending bool + nullsFirst bool +} + +func (s *fileSortingColumn) Path() []string { return s.column.Path() } +func (s *fileSortingColumn) Descending() bool { return s.descending } +func (s *fileSortingColumn) NullsFirst() bool { return s.nullsFirst } +func (s *fileSortingColumn) String() string { + b := new(strings.Builder) + if s.nullsFirst { + b.WriteString("nulls_first+") + } + if s.descending { + b.WriteString("descending(") + } else { + b.WriteString("ascending(") + } + b.WriteString(columnPath(s.Path()).String()) + b.WriteString(")") + return b.String() +} + +// FileColumnChunk is an implementation of the ColumnChunk interface on parquet +// files returned by OpenFile. +type FileColumnChunk struct { + file *File + column *Column + rowGroup *format.RowGroup + chunk *format.ColumnChunk + columnIndex atomic.Pointer[FileColumnIndex] + offsetIndex atomic.Pointer[FileOffsetIndex] + bloomFilter atomic.Pointer[FileBloomFilter] +} + +// File returns the file that this column chunk belongs to. +func (c *FileColumnChunk) File() *File { return c.file } + +// Node returns the node that this column chunk belongs to in the parquet schema. +func (c *FileColumnChunk) Node() Node { return c.column } + +// Type returns the type of the column chunk. +func (c *FileColumnChunk) Type() Type { return c.column.Type() } + +// Column returns the column index of this chunk in its parent row group. +func (c *FileColumnChunk) Column() int { return int(c.column.Index()) } + +// Bounds returns the min and max values found in the column chunk. +func (c *FileColumnChunk) Bounds() (min, max Value, ok bool) { + stats := &c.chunk.MetaData.Statistics + columnKind := c.Type().Kind() + hasMinValue := stats.MinValue != nil + hasMaxValue := stats.MaxValue != nil + if hasMinValue { + min = columnKind.Value(stats.MinValue) + } + if hasMaxValue { + max = columnKind.Value(stats.MaxValue) + } + return min, max, hasMinValue && hasMaxValue +} + +// Pages returns a page reader for the column chunk. +func (c *FileColumnChunk) Pages() Pages { + pages := Pages(c.PagesFrom(c.file.reader)) + if c.file.config.ReadMode == ReadModeAsync { + pages = AsyncPages(pages) + } + return pages +} + +// PagesFrom returns a page reader for the column chunk, using the reader passed +// as argument instead of the one that the file was originally opened from. +// +// Note that unlike when calling Pages, the returned reader is not wrapped in an +// AsyncPages reader if the file was opened in async mode. +func (c *FileColumnChunk) PagesFrom(reader io.ReaderAt) *FilePages { + pages := new(FilePages) + pages.init(c, reader) + return pages +} + +// ColumnIndex returns the column index of the column chunk, or an error if it +// didn't exist or couldn't be read. +func (c *FileColumnChunk) ColumnIndex() (ColumnIndex, error) { + index, err := c.ColumnIndexFrom(c.file.reader) + if err != nil { + return nil, err + } + return index, nil +} + +// ColumnIndexFrom is like ColumnIndex but uses the reader passed as argument to +// read the column index. +func (c *FileColumnChunk) ColumnIndexFrom(reader io.ReaderAt) (*FileColumnIndex, error) { + index, err := c.readColumnIndexFrom(reader) + if err != nil { + return nil, err + } + if index == nil || c.chunk.ColumnIndexOffset == 0 { + return nil, ErrMissingColumnIndex + } + return index, nil +} + +// OffsetIndex returns the offset index of the column chunk, or an error if it +// didn't exist or couldn't be read. +func (c *FileColumnChunk) OffsetIndex() (OffsetIndex, error) { + index, err := c.OffsetIndexFrom(c.file.reader) + if err != nil { + return nil, err + } + return index, nil +} + +// OffsetIndexFrom is like OffsetIndex but uses the reader passed as argument to +// read the offset index. +func (c *FileColumnChunk) OffsetIndexFrom(reader io.ReaderAt) (*FileOffsetIndex, error) { + index, err := c.readOffsetIndex(reader) + if err != nil { + return nil, err + } + if index == nil || c.chunk.OffsetIndexOffset == 0 { + return nil, ErrMissingOffsetIndex + } + return index, nil +} + +// BloomFilter returns the bloom filter of the column chunk, or nil if it didn't +// have one. +func (c *FileColumnChunk) BloomFilter() BloomFilter { + filter, err := c.BloomFilterFrom(c.file.reader) + switch err { + case nil: + return filter + case ErrMissingBloomFilter: + return nil + default: + return &errorBloomFilter{err: err} + } +} + +// BloomFilterFrom is like BloomFilter but uses the reader passed as argument to +// read the bloom filter. +func (c *FileColumnChunk) BloomFilterFrom(reader io.ReaderAt) (*FileBloomFilter, error) { + filter, err := c.readBloomFilter(reader) + if err != nil { + return nil, err + } + if filter == nil || c.chunk.MetaData.BloomFilterOffset == 0 { + return nil, ErrMissingBloomFilter + } + return filter, nil +} + +// NumValues returns the number of values in the column chunk. +func (c *FileColumnChunk) NumValues() int64 { + return c.chunk.MetaData.NumValues +} + +// NullCount returns the number of null values in the column chunk. +// +// This value is extracted from the column chunk statistics, parquet writers are +// not required to populate it. +func (c *FileColumnChunk) NullCount() int64 { + return c.chunk.MetaData.Statistics.NullCount +} + +func (c *FileColumnChunk) readColumnIndex() (*FileColumnIndex, error) { + return c.readColumnIndexFrom(c.file.reader) +} + +func (c *FileColumnChunk) readColumnIndexFrom(reader io.ReaderAt) (*FileColumnIndex, error) { + if index := c.columnIndex.Load(); index != nil { + return index, nil + } + columnChunk := &c.file.metadata.RowGroups[c.rowGroup.Ordinal].Columns[c.Column()] + offset, length := columnChunk.ColumnIndexOffset, columnChunk.ColumnIndexLength + if offset == 0 { + return nil, nil + } + + indexData := make([]byte, int(length)) + var columnIndex format.ColumnIndex + if _, err := readAt(reader, indexData, offset); err != nil { + return nil, fmt.Errorf("read %d bytes column index at offset %d: %w", length, offset, err) + } + if err := thrift.Unmarshal(&c.file.protocol, indexData, &columnIndex); err != nil { + return nil, fmt.Errorf("decode column index: rowGroup=%d columnChunk=%d/%d: %w", c.rowGroup.Ordinal, c.Column(), len(c.rowGroup.Columns), err) + } + index := &FileColumnIndex{index: &columnIndex, kind: c.column.Type().Kind()} + // We do a CAS (and Load on CAS failure) instead of a simple Store for + // the nice property that concurrent calling goroutines will only ever + // observe a single pointer value for the result. + if !c.columnIndex.CompareAndSwap(nil, index) { + // another goroutine populated it since we last read the pointer + return c.columnIndex.Load(), nil + } + return index, nil +} + +func (c *FileColumnChunk) readOffsetIndex(reader io.ReaderAt) (*FileOffsetIndex, error) { + if index := c.offsetIndex.Load(); index != nil { + return index, nil + } + columnChunk := &c.file.metadata.RowGroups[c.rowGroup.Ordinal].Columns[c.Column()] + offset, length := columnChunk.OffsetIndexOffset, columnChunk.OffsetIndexLength + if offset == 0 { + return nil, nil + } + + indexData := make([]byte, int(length)) + var offsetIndex format.OffsetIndex + if _, err := readAt(reader, indexData, offset); err != nil { + return nil, fmt.Errorf("read %d bytes offset index at offset %d: %w", length, offset, err) + } + if err := thrift.Unmarshal(&c.file.protocol, indexData, &offsetIndex); err != nil { + return nil, fmt.Errorf("decode offset index: rowGroup=%d columnChunk=%d/%d: %w", c.rowGroup.Ordinal, c.Column(), len(c.rowGroup.Columns), err) + } + index := &FileOffsetIndex{index: &offsetIndex} + if !c.offsetIndex.CompareAndSwap(nil, index) { + // another goroutine populated it since we last read the pointer + return c.offsetIndex.Load(), nil + } + return index, nil +} + +func (c *FileColumnChunk) readBloomFilter(reader io.ReaderAt) (*FileBloomFilter, error) { + if filter := c.bloomFilter.Load(); filter != nil { + return filter, nil + } + columnChunkMetaData := &c.file.metadata.RowGroups[c.rowGroup.Ordinal].Columns[c.Column()].MetaData + offset := columnChunkMetaData.BloomFilterOffset + length := c.file.size - offset + if offset == 0 { + return nil, nil + } + + section := io.NewSectionReader(reader, offset, length) + rbuf, rbufpool := getBufioReader(section, 1024) + defer putBufioReader(rbuf, rbufpool) + + header := format.BloomFilterHeader{} + compact := thrift.CompactProtocol{} + decoder := thrift.NewDecoder(compact.NewReader(rbuf)) + + if err := decoder.Decode(&header); err != nil { + return nil, fmt.Errorf("decoding bloom filter header: %w", err) + } + + offset, _ = section.Seek(0, io.SeekCurrent) + filter := newBloomFilter(reader, offset, &header) + + if !c.bloomFilter.CompareAndSwap(nil, filter) { + return c.bloomFilter.Load(), nil + } + return filter, nil +} + +type FilePages struct { + chunk *FileColumnChunk + rbuf *bufio.Reader + rbufpool *memory.Pool[bufio.Reader] + section io.SectionReader + + protocol thrift.CompactProtocol + decoder thrift.Decoder + + baseOffset int64 + dataOffset int64 + dictOffset int64 + index int + skip int64 + dictionary Dictionary + + // track the last page to prevent re-reading the same page + lastPageIndex int + lastPage Page + serveLastPage bool + + bufferSize int +} + +func (f *FilePages) init(c *FileColumnChunk, reader io.ReaderAt) { + f.chunk = c + f.baseOffset = c.chunk.MetaData.DataPageOffset + f.dataOffset = f.baseOffset + f.bufferSize = c.file.config.ReadBufferSize + + if c.chunk.MetaData.DictionaryPageOffset != 0 { + f.baseOffset = c.chunk.MetaData.DictionaryPageOffset + f.dictOffset = f.baseOffset + } + + f.section = *io.NewSectionReader(reader, f.baseOffset, c.chunk.MetaData.TotalCompressedSize) + f.rbuf, f.rbufpool = getBufioReader(&f.section, f.bufferSize) + f.decoder.Reset(f.protocol.NewReader(f.rbuf)) + f.index = 0 + Release(f.lastPage) + f.lastPage = nil + f.lastPageIndex = -1 + f.serveLastPage = false +} + +// ReadDictionary returns the dictionary of the column chunk, or nil if the +// column chunk did not have one. +// +// The program is not required to call this method before calling ReadPage, +// the dictionary is read automatically when needed. It is exposed to allow +// programs to access the dictionary without reading the first page. +func (f *FilePages) ReadDictionary() (Dictionary, error) { + if f.dictionary == nil && f.dictOffset > 0 { + if err := f.readDictionary(); err != nil { + return nil, err + } + } + return f.dictionary, nil +} + +// ReadPages reads the next from from f. +func (f *FilePages) ReadPage() (Page, error) { + if f.chunk == nil { + return nil, io.EOF + } + + // seekToRowStart indicates whether we are in the process of seeking to the start + // of requested row to read, as opposed to reading sequentially values and moving through pages + seekToRowStart := f.skip > 0 + + // serve the last page if SeekToRow targeted the same page as last returned + if f.serveLastPage && f.lastPage != nil { + f.serveLastPage = false + f.index = f.lastPageIndex + 1 + + numRows := f.lastPage.NumRows() + if f.skip < numRows { + tail := f.lastPage.Slice(f.skip, numRows) + f.skip = 0 + return tail, nil + } + + f.skip -= numRows // fall through to reading the next page + } + + for { + // Instantiate a new format.PageHeader for each page. + // + // A previous implementation reused page headers to save allocations. + // https://github.com/segmentio/parquet-go/pull/484 + // The optimization turned out to be less effective than expected, + // because all the values referenced by pointers in the page header + // are lost when the header is reset and put back in the pool. + // https://github.com/parquet-go/parquet-go/pull/11 + // + // Even after being reset, reusing page headers still produced instability + // issues. + // https://github.com/parquet-go/parquet-go/issues/70 + header := new(format.PageHeader) + if err := f.decoder.Decode(header); err != nil { + return nil, err + } + + // if this is a dictionary page and we've already read and decoded the dictionary we can skip past it. + // call f.rbuf.Discard to skip the page data and realign f.rbuf with the next page header + if header.Type == format.DictionaryPage && f.dictionary != nil { + f.rbuf.Discard(int(header.CompressedPageSize)) + continue + } + + data, err := f.readPage(header, f.rbuf) + if err != nil { + return nil, err + } + + var page Page + switch header.Type { + case format.DataPageV2: + page, err = f.readDataPageV2(header, data) + case format.DataPage: + page, err = f.readDataPageV1(header, data) + case format.DictionaryPage: + // Sometimes parquet files do not have the dictionary page offset + // recorded in the column metadata. We account for this by lazily + // reading dictionary pages when we encounter them. + err = f.readDictionaryPage(header, data) + default: + err = fmt.Errorf("cannot read values of type %s from page", header.Type) + } + + data.unref() + + if err != nil { + return nil, fmt.Errorf("decoding page %d of column %q: %w", f.index, f.columnPath(), err) + } + + if page == nil { + continue + } + + if f.lastPage != nil { + Release(f.lastPage) // in case we cached a valid last page, release it now + } + + // track last page + f.lastPage = page + Retain(page) + f.lastPageIndex = f.index + + f.index++ + if f.skip == 0 { + // f.skip==0 can be true: + // (1) while reading a row of a column which has multiple values (ie. X.list.element) and values continue + // across pages. In that case we just want to keep reading without skipping any values. + // (2) when seeking to a specific row and trying to reach the start offset of the first + // row in a new page. + if !seekToRowStart || header.Type != format.DataPage { + // keep reading values from beginning of new page + return page, nil + } + // We need to seek to beginning of row. + // V1 data pages do not necessarily start at a row boundary. + if page.NumRows() == 0 { + // if current page does not have any rows, continue until a page with at least 1 row is reached + Release(page) + continue + } + repLvls := page.RepetitionLevels() + if len(repLvls) > 0 && repLvls[0] == 0 { + // avoid page slice if page starts at a row boundary + return page, nil + } + tail := page.Slice(0, page.NumRows()) + Release(page) + return tail, nil + } + + // TODO: what about pages that don't embed the number of rows? + // (data page v1 with no offset index in the column chunk). + numRows := page.NumRows() + + if numRows <= f.skip { + Release(page) + } else { + tail := page.Slice(f.skip, numRows) + Release(page) + f.skip = 0 + return tail, nil + } + + f.skip -= numRows + } +} + +func (f *FilePages) readDictionary() error { + chunk := io.NewSectionReader(f.section.Outer()) + rbuf, pool := getBufioReader(chunk, f.bufferSize) + defer putBufioReader(rbuf, pool) + + decoder := thrift.NewDecoder(f.protocol.NewReader(rbuf)) + + header := new(format.PageHeader) + + if err := decoder.Decode(header); err != nil { + return err + } + + page := buffers.get(int(header.CompressedPageSize)) + defer page.unref() + + if _, err := io.ReadFull(rbuf, page.data.Slice()); err != nil { + return err + } + + return f.readDictionaryPage(header, page) +} + +func (f *FilePages) readDictionaryPage(header *format.PageHeader, page *buffer[byte]) error { + if header.DictionaryPageHeader == nil { + return ErrMissingPageHeader + } + d, err := f.chunk.column.decodeDictionary(DictionaryPageHeader{header.DictionaryPageHeader}, page, header.UncompressedPageSize) + if err != nil { + return err + } + f.dictionary = d + return nil +} + +func (f *FilePages) readDataPageV1(header *format.PageHeader, page *buffer[byte]) (Page, error) { + if header.DataPageHeader == nil { + return nil, ErrMissingPageHeader + } + if isDictionaryFormat(header.DataPageHeader.Encoding) && f.dictionary == nil { + if err := f.readDictionary(); err != nil { + return nil, err + } + } + return f.chunk.column.decodeDataPageV1(DataPageHeaderV1{header.DataPageHeader}, page, f.dictionary, header.UncompressedPageSize) +} + +func (f *FilePages) readDataPageV2(header *format.PageHeader, page *buffer[byte]) (Page, error) { + if header.DataPageHeaderV2 == nil { + return nil, ErrMissingPageHeader + } + if isDictionaryFormat(header.DataPageHeaderV2.Encoding) && f.dictionary == nil { + // If the program seeked to a row passed the first page, the dictionary + // page may not have been seen, in which case we have to lazily load it + // from the beginning of column chunk. + if err := f.readDictionary(); err != nil { + return nil, err + } + } + return f.chunk.column.decodeDataPageV2(DataPageHeaderV2{header.DataPageHeaderV2}, page, f.dictionary, header.UncompressedPageSize) +} + +func (f *FilePages) readPage(header *format.PageHeader, reader *bufio.Reader) (*buffer[byte], error) { + page := buffers.get(int(header.CompressedPageSize)) + defer page.unref() + + if _, err := io.ReadFull(reader, page.data.Slice()); err != nil { + return nil, err + } + + if header.CRC != 0 { + headerChecksum := uint32(header.CRC) + bufferChecksum := crc32.ChecksumIEEE(page.data.Slice()) + + if headerChecksum != bufferChecksum { + // The parquet specs indicate that corruption errors could be + // handled gracefully by skipping pages, tho this may not always + // be practical. Depending on how the pages are consumed, + // missing rows may cause unpredictable behaviors in algorithms. + // + // For now, we assume these errors to be fatal, but we may + // revisit later and improve error handling to be more resilient + // to data corruption. + return nil, fmt.Errorf("crc32 checksum mismatch in page of column %q: want=0x%08X got=0x%08X: %w", + f.columnPath(), + headerChecksum, + bufferChecksum, + ErrCorrupted, + ) + } + } + + page.ref() + return page, nil +} + +// SeekToRow seeks to the given row index in the column chunk. +func (f *FilePages) SeekToRow(rowIndex int64) error { + if f.chunk == nil { + return io.ErrClosedPipe + } + + if index := f.chunk.offsetIndex.Load(); index == nil { + _, err := f.section.Seek(f.dataOffset-f.baseOffset, io.SeekStart) + if err != nil { + return err + } + + f.skip = rowIndex + f.index = 0 + if f.dictOffset > 0 { + f.index = 1 + } + } else { + pages := index.index.PageLocations + target := sort.Search(len(pages), func(i int) bool { + return pages[i].FirstRowIndex > rowIndex + }) - 1 + if target < 0 { + return ErrSeekOutOfRange + } + + f.skip = rowIndex - pages[target].FirstRowIndex + + // positioned at the last returned page: serve it + if f.lastPage != nil && target == f.lastPageIndex { + f.serveLastPage = true + return nil + } + + // already positioned at the target page + if f.index == target { + return nil + } + + f.index = target + + // if the target page is within the unread portion of the current buffer, just skip/discard some bytes + var pos int64 + pos, err := f.section.Seek(0, io.SeekCurrent) // no-op seek to retrieve position + if err != nil { + return err + } + + unread := int64(f.rbuf.Buffered()) + currOffset := pos - unread // section relative offset + targetOffset := pages[target].Offset - f.baseOffset // section relative target offset + skipBytes := targetOffset - currOffset + if skipBytes == 0 { + return nil + } + if skipBytes > 0 && skipBytes <= unread { + _, err = f.rbuf.Discard(int(skipBytes)) + return err + } + + _, err = f.section.Seek(pages[target].Offset-f.baseOffset, io.SeekStart) + if err != nil { + return err + } + } + + f.rbuf.Reset(&f.section) + return nil +} + +// Close closes the page reader. +func (f *FilePages) Close() error { + putBufioReader(f.rbuf, f.rbufpool) + f.chunk = nil + f.section = io.SectionReader{} + f.rbuf = nil + f.rbufpool = nil + f.baseOffset = 0 + f.dataOffset = 0 + f.dictOffset = 0 + f.index = 0 + Release(f.lastPage) + f.lastPage = nil + f.lastPageIndex = -1 + f.serveLastPage = false + f.skip = 0 + f.dictionary = nil + return nil +} + +func (f *FilePages) columnPath() columnPath { + return columnPath(f.chunk.column.Path()) +} + +type putBufioReaderFunc func() + +var ( + bufioReaderPoolLock sync.Mutex + bufioReaderPool = map[int]*memory.Pool[bufio.Reader]{} +) + +func getBufioReader(r io.Reader, bufferSize int) (*bufio.Reader, *memory.Pool[bufio.Reader]) { + pool := getBufioReaderPool(bufferSize) + rbuf := pool.Get( + func() *bufio.Reader { return bufio.NewReaderSize(r, bufferSize) }, + func(rbuf *bufio.Reader) { rbuf.Reset(r) }, + ) + return rbuf, pool +} + +func putBufioReader(rbuf *bufio.Reader, pool *memory.Pool[bufio.Reader]) { + if pool != nil { + rbuf.Reset(nil) + pool.Put(rbuf) + } +} + +func getBufioReaderPool(size int) *memory.Pool[bufio.Reader] { + bufioReaderPoolLock.Lock() + defer bufioReaderPoolLock.Unlock() + + if pool := bufioReaderPool[size]; pool != nil { + return pool + } + + pool := &memory.Pool[bufio.Reader]{} + bufioReaderPool[size] = pool + return pool +} + +func (f *File) readAt(p []byte, off int64) (int, error) { + return readAt(f.reader, p, off) +} + +func readAt(r io.ReaderAt, p []byte, off int64) (n int, err error) { + n, err = r.ReadAt(p, off) + if n == len(p) { + err = nil + // p was fully read.There is no further need to check for errors. This + // operation is a success in principle. + return + } + return +} + +type optimisticFileReaderAt struct { + reader io.ReaderAt + offset int64 + footer []byte +} + +func (r *optimisticFileReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + length := r.offset + int64(len(r.footer)) + + if off >= length { + return 0, io.EOF + } + + if off >= r.offset { + n = copy(p, r.footer[off-r.offset:]) + p = p[n:] + off += int64(n) + if len(p) == 0 { + return n, nil + } + } + + rn, err := r.reader.ReadAt(p, off) + return n + rn, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/filter.go b/vendor/github.com/parquet-go/parquet-go/filter.go new file mode 100644 index 00000000000..26e0f3f4570 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/filter.go @@ -0,0 +1,82 @@ +package parquet + +// FilterRowReader constructs a RowReader which exposes rows from reader for +// which the predicate has returned true. +func FilterRowReader(reader RowReader, predicate func(Row) bool) RowReader { + f := &filterRowReader{reader: reader, predicate: predicate} + for i := range f.rows { + f.rows[i] = f.values[i : i : i+1] + } + return f +} + +type filterRowReader struct { + reader RowReader + predicate func(Row) bool + rows [defaultRowBufferSize]Row + values [defaultRowBufferSize]Value +} + +func (f *filterRowReader) ReadRows(rows []Row) (n int, err error) { + for n < len(rows) { + r := min(len(rows)-n, len(f.rows)) + + r, err = f.reader.ReadRows(f.rows[:r]) + + for i := range r { + if f.predicate(f.rows[i]) { + rows[n] = append(rows[n][:0], f.rows[i]...) + n++ + } + } + + if err != nil { + break + } + } + return n, err +} + +// FilterRowWriter constructs a RowWriter which writes rows to writer for which +// the predicate has returned true. +func FilterRowWriter(writer RowWriter, predicate func(Row) bool) RowWriter { + return &filterRowWriter{writer: writer, predicate: predicate} +} + +type filterRowWriter struct { + writer RowWriter + predicate func(Row) bool + rows [defaultRowBufferSize]Row +} + +func (f *filterRowWriter) WriteRows(rows []Row) (n int, err error) { + defer func() { + clear := f.rows[:] + for i := range clear { + clearValues(clear[i]) + } + }() + + for n < len(rows) { + i := 0 + j := min(len(rows)-n, len(f.rows)) + + for _, row := range rows[n : n+j] { + if f.predicate(row) { + f.rows[i] = row + i++ + } + } + + if i > 0 { + _, err := f.writer.WriteRows(f.rows[:i]) + if err != nil { + break + } + } + + n += j + } + + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/format/parquet.go b/vendor/github.com/parquet-go/parquet-go/format/parquet.go new file mode 100644 index 00000000000..42420c4b84a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/format/parquet.go @@ -0,0 +1,1232 @@ +package format + +import ( + "fmt" + + "github.com/parquet-go/parquet-go/deprecated" +) + +// Types supported by Parquet. These types are intended to be used in combination +// with the encodings to control the on disk storage format. For example INT16 +// is not included as a type since a good encoding of INT32 would handle this. +type Type int32 + +const ( + Boolean Type = 0 + Int32 Type = 1 + Int64 Type = 2 + Int96 Type = 3 // deprecated, only used by legacy implementations. + Float Type = 4 + Double Type = 5 + ByteArray Type = 6 + FixedLenByteArray Type = 7 +) + +func (t Type) String() string { + switch t { + case Boolean: + return "BOOLEAN" + case Int32: + return "INT32" + case Int64: + return "INT64" + case Int96: + return "INT96" + case Float: + return "FLOAT" + case Double: + return "DOUBLE" + case ByteArray: + return "BYTE_ARRAY" + case FixedLenByteArray: + return "FIXED_LEN_BYTE_ARRAY" + default: + return "Type(?)" + } +} + +// Representation of Schemas. +type FieldRepetitionType int32 + +const ( + // The field is required (can not be null) and each record has exactly 1 value. + Required FieldRepetitionType = 0 + // The field is optional (can be null) and each record has 0 or 1 values. + Optional FieldRepetitionType = 1 + // The field is repeated and can contain 0 or more values. + Repeated FieldRepetitionType = 2 +) + +func (t FieldRepetitionType) String() string { + switch t { + case Required: + return "REQUIRED" + case Optional: + return "OPTIONAL" + case Repeated: + return "REPEATED" + default: + return "FieldRepeationaType(?)" + } +} + +// A structure for capturing metadata for estimating the unencoded, +// uncompressed size of data written. This is useful for readers to estimate +// how much memory is needed to reconstruct data in their memory model and for +// fine grained filter pushdown on nested structures (the histograms contained +// in this structure can help determine the number of nulls at a particular +// nesting level and maximum length of lists). +type SizeStatistics struct { + // The number of physical bytes stored for BYTE_ARRAY data values assuming + // no encoding. This is exclusive of the bytes needed to store the length of + // each byte array. In other words, this field is equivalent to the `(size + // of PLAIN-ENCODING the byte array values) - (4 bytes * number of values + // written)`. To determine unencoded sizes of other types readers can use + // schema information multiplied by the number of non-null and null values. + // The number of null/non-null values can be inferred from the histograms + // below. + // + // For example, if a column chunk is dictionary-encoded with dictionary + // ["a", "bc", "cde"], and a data page contains the indices [0, 0, 1, 2], + // then this value for that data page should be 7 (1 + 1 + 2 + 3). + // + // This field should only be set for types that use BYTE_ARRAY as their + // physical type. + UnencodedByteArrayDataBytes int64 `thrift:"1,optional"` + + // When present, there is expected to be one element corresponding to each + // repetition (i.e. size=max repetition_level+1) where each element + // represents the number of times the repetition level was observed in the + // data. + // + // This field may be omitted if max_repetition_level is 0 without loss + // of information. + RepetitionLevelHistogram []int64 `thrift:"2,optional"` + + // Same as repetition_level_histogram except for definition levels. + // + // This field may be omitted if max_definition_level is 0 or 1 without + // loss of information. + DefinitionLevelHistogram []int64 `thrift:"3,optional"` +} + +// Bounding box for GEOMETRY or GEOGRAPHY type in the representation of min/max +// value pair of coordinates from each axis. +type BoundingBox struct { + XMin float64 `thrift:"1,required"` + XMax float64 `thrift:"2,required"` + YMin float64 `thrift:"3,required"` + YMax float64 `thrift:"4,required"` + ZMin *float64 `thrift:"5,optional"` + ZMax *float64 `thrift:"6,optional"` + MMin *float64 `thrift:"7,optional"` + MMax *float64 `thrift:"8,optional"` +} + +// Statistics specific to Geometry and Geography logical types +type GeospatialStatistics struct { + // A bounding box of geospatial instances + BBox BoundingBox `thrift:"1,optional"` + // Geospatial type codes of all instances, or an empty list if not known + GeoSpatialTypes []int32 `thrift:"2,optional"` +} + +// Statistics per row group and per page. +// All fields are optional. +type Statistics struct { + // DEPRECATED: min and max value of the column. Use min_value and max_value. + // + // Values are encoded using PLAIN encoding, except that variable-length byte + // arrays do not include a length prefix. + // + // These fields encode min and max values determined by signed comparison + // only. New files should use the correct order for a column's logical type + // and store the values in the min_value and max_value fields. + // + // To support older readers, these may be set when the column order is + // signed. + Max []byte `thrift:"1"` + Min []byte `thrift:"2"` + // Count of null value in the column. + // The writezero tag satisfies spec: + // "Writers SHOULD always write this field even if it is zero (i.e. no null value) or the column is not nullable." + // https://github.com/apache/parquet-format/blob/apache-parquet-format-2.12.0/src/main/thrift/parquet.thrift#L283-L291 + NullCount int64 `thrift:"3,writezero"` + // Count of distinct values occurring. + DistinctCount int64 `thrift:"4"` + // Min and max values for the column, determined by its ColumnOrder. + // + // Values are encoded using PLAIN encoding, except that variable-length byte + // arrays do not include a length prefix. + MaxValue []byte `thrift:"5"` + MinValue []byte `thrift:"6"` +} + +// Empty structs to use as logical type annotations. +type StringType struct{} // allowed for BINARY, must be encoded with UTF-8 +type UUIDType struct{} // allowed for FIXED[16], must encode raw UUID bytes +type MapType struct{} // see LogicalTypes.md +type ListType struct{} // see LogicalTypes.md +type EnumType struct{} // allowed for BINARY, must be encoded with UTF-8 +type DateType struct{} // allowed for INT32 +type Float16Type struct{} // allowed for FIXED[2], must encoded raw FLOAT16 bytes + +func (*StringType) String() string { return "STRING" } +func (*UUIDType) String() string { return "UUID" } +func (*MapType) String() string { return "MAP" } +func (*ListType) String() string { return "LIST" } +func (*EnumType) String() string { return "ENUM" } +func (*DateType) String() string { return "DATE" } +func (*Float16Type) String() string { return "FLOAT16" } + +// Logical type to annotate a column that is always null. +// +// Sometimes when discovering the schema of existing data, values are always +// null and the physical type can't be determined. This annotation signals +// the case where the physical type was guessed from all null values. +type NullType struct{} + +func (*NullType) String() string { return "NULL" } + +// Decimal logical type annotation +// +// To maintain forward-compatibility in v1, implementations using this logical +// type must also set scale and precision on the annotated SchemaElement. +// +// Allowed for physical types: INT32, INT64, FIXED, and BINARY +type DecimalType struct { + Scale int32 `thrift:"1,required"` + Precision int32 `thrift:"2,required"` +} + +func (t *DecimalType) String() string { + // Matching parquet-cli's decimal string format: https://github.com/apache/parquet-java/blob/d057b39d93014fe40f5067ee4a33621e65c91552/parquet-column/src/test/java/org/apache/parquet/parser/TestParquetParser.java#L249-L265 + return fmt.Sprintf("DECIMAL(%d,%d)", t.Precision, t.Scale) +} + +// Time units for logical types. +type MilliSeconds struct{} +type MicroSeconds struct{} +type NanoSeconds struct{} + +func (*MilliSeconds) String() string { return "MILLIS" } +func (*MicroSeconds) String() string { return "MICROS" } +func (*NanoSeconds) String() string { return "NANOS" } + +type TimeUnit struct { // union + Millis *MilliSeconds `thrift:"1"` + Micros *MicroSeconds `thrift:"2"` + Nanos *NanoSeconds `thrift:"3"` +} + +func (u *TimeUnit) String() string { + switch { + case u.Millis != nil: + return u.Millis.String() + case u.Micros != nil: + return u.Micros.String() + case u.Nanos != nil: + return u.Nanos.String() + default: + return "" + } +} + +// Timestamp logical type annotation +// +// Allowed for physical types: INT64 +type TimestampType struct { + IsAdjustedToUTC bool `thrift:"1,required"` + Unit TimeUnit `thrift:"2,required"` +} + +func (t *TimestampType) String() string { + return fmt.Sprintf("TIMESTAMP(isAdjustedToUTC=%t,unit=%s)", t.IsAdjustedToUTC, &t.Unit) +} + +// Time logical type annotation +// +// Allowed for physical types: INT32 (millis), INT64 (micros, nanos) +type TimeType struct { + IsAdjustedToUTC bool `thrift:"1,required"` + Unit TimeUnit `thrift:"2,required"` +} + +func (t *TimeType) String() string { + return fmt.Sprintf("TIME(isAdjustedToUTC=%t,unit=%s)", t.IsAdjustedToUTC, &t.Unit) +} + +// Integer logical type annotation +// +// bitWidth must be 8, 16, 32, or 64. +// +// Allowed for physical types: INT32, INT64 +type IntType struct { + BitWidth int8 `thrift:"1,required"` + IsSigned bool `thrift:"2,required"` +} + +func (t *IntType) String() string { + return fmt.Sprintf("INT(%d,%t)", t.BitWidth, t.IsSigned) +} + +// Embedded JSON logical type annotation +// +// Allowed for physical types: BINARY +type JsonType struct{} + +func (t *JsonType) String() string { return "JSON" } + +// Embedded BSON logical type annotation +// +// Allowed for physical types: BINARY +type BsonType struct{} + +func (t *BsonType) String() string { return "BSON" } + +// Embedded Variant logical type annotation +type VariantType struct{} + +func (*VariantType) String() string { return "VARIANT" } + +// Edge interpolation algorithm for Geography logical type +type EdgeInterpolationAlgorithm int32 + +const ( + Spherical EdgeInterpolationAlgorithm = 0 + Vincenty EdgeInterpolationAlgorithm = 1 + Thomas EdgeInterpolationAlgorithm = 2 + Andoyer EdgeInterpolationAlgorithm = 3 + Karney EdgeInterpolationAlgorithm = 4 +) + +func (e EdgeInterpolationAlgorithm) String() string { + switch e { + case Spherical: + return "SPHERICAL" + case Vincenty: + return "VINCENTY" + case Thomas: + return "THOMAS" + case Andoyer: + return "ANDOYER" + case Karney: + return "KARNEY" + default: + return "EdgeInterpolationAlgorithm(?)" + } +} + +// Embedded Geometry logical type annotation +// +// Geospatial features in the Well-Known Binary (WKB) format and edges interpolation +// is always linear/planar. +// +// A custom CRS can be set by the crs field. If unset, it defaults to "OGC:CRS84", +// which means that the geometries must be stored in longitude, latitude based on +// the WGS84 datum. +// +// Allowed for physical type: BYTE_ARRAY. +// +// See Geospatial.md for details. +type GeometryType struct { + CRS string `thrift:"1,optional"` +} + +func (t *GeometryType) String() string { + crs := t.CRS + if crs == "" { + crs = "OGC:CRS84" + } + return fmt.Sprintf("GEOMETRY(%q)", crs) +} + +// Embedded Geography logical type annotation +// +// Geospatial features in the WKB format with an explicit (non-linear/non-planar) +// edges interpolation algorithm. +// +// A custom geographic CRS can be set by the crs field, where longitudes are +// bound by [-180, 180] and latitudes are bound by [-90, 90]. If unset, the CRS +// defaults to "OGC:CRS84". +// +// An optional algorithm can be set to correctly interpret edges interpolation +// of the geometries. If unset, the algorithm defaults to SPHERICAL. +// +// Allowed for physical type: BYTE_ARRAY. +// +// See Geospatial.md for details. +type GeographyType struct { + CRS string `thrift:"1,optional"` + Algorithm EdgeInterpolationAlgorithm `thrift:"2,optional"` +} + +func (t *GeographyType) String() string { + crs := t.CRS + if crs == "" { + crs = "OGC:CRS84" + } + return fmt.Sprintf("GEOGRAPHY(%q, %s)", crs, t.Algorithm) +} + +// LogicalType annotations to replace ConvertedType. +// +// To maintain compatibility, implementations using LogicalType for a +// SchemaElement must also set the corresponding ConvertedType (if any) +// from the following table. +type LogicalType struct { // union + UTF8 *StringType `thrift:"1"` // use ConvertedType UTF8 + Map *MapType `thrift:"2"` // use ConvertedType Map + List *ListType `thrift:"3"` // use ConvertedType List + Enum *EnumType `thrift:"4"` // use ConvertedType Enum + Decimal *DecimalType `thrift:"5"` // use ConvertedType Decimal + SchemaElement.{Scale, Precision} + Date *DateType `thrift:"6"` // use ConvertedType Date + + // use ConvertedType TimeMicros for Time{IsAdjustedToUTC: *, Unit: Micros} + // use ConvertedType TimeMillis for Time{IsAdjustedToUTC: *, Unit: Millis} + Time *TimeType `thrift:"7"` + + // use ConvertedType TimestampMicros for Timestamp{IsAdjustedToUTC: *, Unit: Micros} + // use ConvertedType TimestampMillis for Timestamp{IsAdjustedToUTC: *, Unit: Millis} + Timestamp *TimestampType `thrift:"8"` + + // 9: reserved for Interval + Integer *IntType `thrift:"10"` // use ConvertedType Int* or Uint* + Unknown *NullType `thrift:"11"` // no compatible ConvertedType + Json *JsonType `thrift:"12"` // use ConvertedType JSON + Bson *BsonType `thrift:"13"` // use ConvertedType BSON + UUID *UUIDType `thrift:"14"` // no compatible ConvertedType + Float16 *Float16Type `thrift:"15"` // no compatible ConvertedType + Variant *VariantType `thrift:"16"` // no compatible ConvertedType + Geometry *GeometryType `thrift:"17"` // no compatible ConvertedType + Geography *GeographyType `thrift:"18"` // no compatible ConvertedType +} + +func (t *LogicalType) String() string { + switch { + case t.UTF8 != nil: + return t.UTF8.String() + case t.Map != nil: + return t.Map.String() + case t.List != nil: + return t.List.String() + case t.Enum != nil: + return t.Enum.String() + case t.Decimal != nil: + return t.Decimal.String() + case t.Date != nil: + return t.Date.String() + case t.Time != nil: + return t.Time.String() + case t.Timestamp != nil: + return t.Timestamp.String() + case t.Integer != nil: + return t.Integer.String() + case t.Unknown != nil: + return t.Unknown.String() + case t.Json != nil: + return t.Json.String() + case t.Bson != nil: + return t.Bson.String() + case t.UUID != nil: + return t.UUID.String() + case t.Float16 != nil: + return t.Float16.String() + case t.Variant != nil: + return t.Variant.String() + case t.Geometry != nil: + return t.Geometry.String() + case t.Geography != nil: + return t.Geography.String() + default: + return "" + } +} + +// Represents a element inside a schema definition. +// +// - if it is a group (inner node) then type is undefined and num_children is +// defined +// +// - if it is a primitive type (leaf) then type is defined and num_children is +// undefined +// +// The nodes are listed in depth first traversal order. +type SchemaElement struct { + // Data type for this field. Not set if the current element is a non-leaf node. + Type *Type `thrift:"1,optional"` + + // If type is FixedLenByteArray, this is the byte length of the values. + // Otherwise, if specified, this is the maximum bit length to store any of the values. + // (e.g. a low cardinality INT col could have this set to 3). Note that this is + // in the schema, and therefore fixed for the entire file. + TypeLength *int32 `thrift:"2,optional"` + + // repetition of the field. The root of the schema does not have a repetition_type. + // All other nodes must have one. + RepetitionType *FieldRepetitionType `thrift:"3,optional"` + + // Name of the field in the schema. + Name string `thrift:"4,required"` + + // Nested fields. Since thrift does not support nested fields, + // the nesting is flattened to a single list by a depth-first traversal. + // The children count is used to construct the nested relationship. + // This field is not set when the element is a primitive type + NumChildren *int32 `thrift:"5,optional"` + + // DEPRECATED: When the schema is the result of a conversion from another model. + // Used to record the original type to help with cross conversion. + // + // This is superseded by logicalType. + ConvertedType *deprecated.ConvertedType `thrift:"6,optional"` + + // DEPRECATED: Used when this column contains decimal data. + // See the DECIMAL converted type for more details. + // + // This is superseded by using the DecimalType annotation in logicalType. + Scale *int32 `thrift:"7,optional"` + Precision *int32 `thrift:"8,optional"` + + // When the original schema supports field ids, this will save the + // original field id in the parquet schema. + FieldID int32 `thrift:"9,optional"` + + // The logical type of this SchemaElement + // + // LogicalType replaces ConvertedType, but ConvertedType is still required + // for some logical types to ensure forward-compatibility in format v1. + LogicalType *LogicalType `thrift:"10,optional"` +} + +// Encodings supported by Parquet. Not all encodings are valid for all types. +// These enums are also used to specify the encoding of definition and +// repetition levels. See the accompanying doc for the details of the more +// complicated encodings. +type Encoding int32 + +const ( + // Default encoding. + // Boolean - 1 bit per value. 0 is false; 1 is true. + // Int32 - 4 bytes per value. Stored as little-endian. + // Int64 - 8 bytes per value. Stored as little-endian. + // Float - 4 bytes per value. IEEE. Stored as little-endian. + // Double - 8 bytes per value. IEEE. Stored as little-endian. + // ByteArray - 4 byte length stored as little endian, followed by bytes. + // FixedLenByteArray - Just the bytes. + Plain Encoding = 0 + + // Group VarInt encoding for Int32/Int64. + // This encoding is deprecated. It was never used. + // GroupVarInt Encoding = 1 + + // Deprecated: Dictionary encoding. The values in the dictionary are encoded + // in the plain type. + // In a data page use RLEDictionary instead. + // In a Dictionary page use Plain instead. + PlainDictionary Encoding = 2 + + // Group packed run length encoding. Usable for definition/repetition levels + // encoding and Booleans (on one bit: 0 is false 1 is true.) + RLE Encoding = 3 + + // Bit packed encoding. This can only be used if the data has a known max + // width. Usable for definition/repetition levels encoding. + BitPacked Encoding = 4 + + // Delta encoding for integers. This can be used for int columns and works best + // on sorted data. + DeltaBinaryPacked Encoding = 5 + + // Encoding for byte arrays to separate the length values and the data. + // The lengths are encoded using DeltaBinaryPacked. + DeltaLengthByteArray Encoding = 6 + + // Incremental-encoded byte array. Prefix lengths are encoded using DELTA_BINARY_PACKED. + // Suffixes are stored as delta length byte arrays. + DeltaByteArray Encoding = 7 + + // Dictionary encoding: the ids are encoded using the RLE encoding + RLEDictionary Encoding = 8 + + // Encoding for floating-point data. + // K byte-streams are created where K is the size in bytes of the data type. + // The individual bytes of an FP value are scattered to the corresponding stream and + // the streams are concatenated. + // This itself does not reduce the size of the data but can lead to better compression + // afterwards. + ByteStreamSplit Encoding = 9 +) + +func (e Encoding) String() string { + switch e { + case Plain: + return "PLAIN" + case PlainDictionary: + return "PLAIN_DICTIONARY" + case RLE: + return "RLE" + case BitPacked: + return "BIT_PACKED" + case DeltaBinaryPacked: + return "DELTA_BINARY_PACKED" + case DeltaLengthByteArray: + return "DELTA_LENGTH_BYTE_ARRAY" + case DeltaByteArray: + return "DELTA_BYTE_ARRAY" + case RLEDictionary: + return "RLE_DICTIONARY" + case ByteStreamSplit: + return "BYTE_STREAM_SPLIT" + default: + return "Encoding(?)" + } +} + +// Supported compression algorithms. +// +// Codecs added in format version X.Y can be read by readers based on X.Y and later. +// Codec support may vary between readers based on the format version and +// libraries available at runtime. +// +// See Compression.md for a detailed specification of these algorithms. +type CompressionCodec int32 + +const ( + Uncompressed CompressionCodec = 0 + Snappy CompressionCodec = 1 + Gzip CompressionCodec = 2 + LZO CompressionCodec = 3 + Brotli CompressionCodec = 4 // Added in 2.4 + Lz4 CompressionCodec = 5 // DEPRECATED (Added in 2.4) + Zstd CompressionCodec = 6 // Added in 2.4 + Lz4Raw CompressionCodec = 7 // Added in 2.9 +) + +func (c CompressionCodec) String() string { + switch c { + case Uncompressed: + return "UNCOMPRESSED" + case Snappy: + return "SNAPPY" + case Gzip: + return "GZIP" + case LZO: + return "LZO" + case Brotli: + return "BROTLI" + case Lz4: + return "LZ4" + case Zstd: + return "ZSTD" + case Lz4Raw: + return "LZ4_RAW" + default: + return "CompressionCodec(?)" + } +} + +type PageType int32 + +const ( + DataPage PageType = 0 + IndexPage PageType = 1 + DictionaryPage PageType = 2 + // Version 2 is indicated in the PageHeader and the use of DataPageHeaderV2, + // and allows you to read repetition and definition level data without + // decompressing the Page. + DataPageV2 PageType = 3 +) + +func (p PageType) String() string { + switch p { + case DataPage: + return "DATA_PAGE" + case IndexPage: + return "INDEX_PAGE" + case DictionaryPage: + return "DICTIONARY_PAGE" + case DataPageV2: + return "DATA_PAGE_V2" + default: + return "PageType(?)" + } +} + +// Enum to annotate whether lists of min/max elements inside ColumnIndex +// are ordered and if so, in which direction. +type BoundaryOrder int32 + +const ( + Unordered BoundaryOrder = 0 + Ascending BoundaryOrder = 1 + Descending BoundaryOrder = 2 +) + +func (b BoundaryOrder) String() string { + switch b { + case Unordered: + return "UNORDERED" + case Ascending: + return "ASCENDING" + case Descending: + return "DESCENDING" + default: + return "BoundaryOrder(?)" + } +} + +// Data page header. +type DataPageHeader struct { + // Number of values, including NULLs, in this data page. + NumValues int32 `thrift:"1,required"` + + // Encoding used for this data page. + Encoding Encoding `thrift:"2,required"` + + // Encoding used for definition levels. + DefinitionLevelEncoding Encoding `thrift:"3,required"` + + // Encoding used for repetition levels. + RepetitionLevelEncoding Encoding `thrift:"4,required"` + + // Optional statistics for the data in this page. + // The writezero tag supports writezero fields of Statistics. + Statistics Statistics `thrift:"5,optional,writezero"` +} + +type IndexPageHeader struct { + // TODO +} + +// The dictionary page must be placed at the first position of the column chunk +// if it is partly or completely dictionary encoded. At most one dictionary page +// can be placed in a column chunk. +type DictionaryPageHeader struct { + // Number of values in the dictionary. + NumValues int32 `thrift:"1,required"` + + // Encoding using this dictionary page. + Encoding Encoding `thrift:"2,required"` + + // If true, the entries in the dictionary are sorted in ascending order. + IsSorted bool `thrift:"3,optional"` +} + +// New page format allowing reading levels without decompressing the data +// Repetition and definition levels are uncompressed +// The remaining section containing the data is compressed if is_compressed is +// true. +type DataPageHeaderV2 struct { + // Number of values, including NULLs, in this data page. + NumValues int32 `thrift:"1,required"` + // Number of NULL values, in this data page. + // Number of non-null = num_values - num_nulls which is also the number of + // values in the data section. + NumNulls int32 `thrift:"2,required"` + // Number of rows in this data page. which means pages change on record boundaries (r = 0). + NumRows int32 `thrift:"3,required"` + // Encoding used for data in this page. + Encoding Encoding `thrift:"4,required"` + + // Repetition levels and definition levels are always using RLE (without size in it). + + // Length of the definition levels. + DefinitionLevelsByteLength int32 `thrift:"5,required"` + // Length of the repetition levels. + RepetitionLevelsByteLength int32 `thrift:"6,required"` + + // Whether the values are compressed. + // Which means the section of the page between + // definition_levels_byte_length + repetition_levels_byte_length + 1 and compressed_page_size (included) + // is compressed with the compression_codec. + // If missing it is considered compressed. + IsCompressed *bool `thrift:"7,optional"` + + // Optional statistics for the data in this page. + // The writezero tag supports writezero fields of Statistics. + Statistics Statistics `thrift:"8,optional,writezero"` +} + +// Block-based algorithm type annotation. +type SplitBlockAlgorithm struct{} + +// The algorithm used in Bloom filter. +type BloomFilterAlgorithm struct { // union + Block *SplitBlockAlgorithm `thrift:"1"` +} + +// Hash strategy type annotation. xxHash is an extremely fast non-cryptographic +// hash algorithm. It uses 64 bits version of xxHash. +type XxHash struct{} + +// The hash function used in Bloom filter. This function takes the hash of a +// column value using plain encoding. +type BloomFilterHash struct { // union + XxHash *XxHash `thrift:"1"` +} + +// The compression used in the Bloom filter. +type BloomFilterUncompressed struct{} +type BloomFilterCompression struct { // union + Uncompressed *BloomFilterUncompressed `thrift:"1"` +} + +// Bloom filter header is stored at beginning of Bloom filter data of each column +// and followed by its bitset. +type BloomFilterHeader struct { + // The size of bitset in bytes. + NumBytes int32 `thrift:"1,required"` + // The algorithm for setting bits. + Algorithm BloomFilterAlgorithm `thrift:"2,required"` + // The hash function used for Bloom filter. + Hash BloomFilterHash `thrift:"3,required"` + // The compression used in the Bloom filter. + Compression BloomFilterCompression `thrift:"4,required"` +} + +type PageHeader struct { + // The type of the page indicates which of the *Header fields below is set. + Type PageType `thrift:"1,required"` + + // Uncompressed page size in bytes (not including this header). + UncompressedPageSize int32 `thrift:"2,required"` + + // Compressed (and potentially encrypted) page size in bytes, not including + // this header. + CompressedPageSize int32 `thrift:"3,required"` + + // The 32bit CRC for the page, to be be calculated as follows: + // - Using the standard CRC32 algorithm + // - On the data only, i.e. this header should not be included. 'Data' + // hereby refers to the concatenation of the repetition levels, the + // definition levels and the column value, in this exact order. + // - On the encoded versions of the repetition levels, definition levels and + // column values. + // - On the compressed versions of the repetition levels, definition levels + // and column values where possible; + // - For v1 data pages, the repetition levels, definition levels and column + // values are always compressed together. If a compression scheme is + // specified, the CRC shall be calculated on the compressed version of + // this concatenation. If no compression scheme is specified, the CRC + // shall be calculated on the uncompressed version of this concatenation. + // - For v2 data pages, the repetition levels and definition levels are + // handled separately from the data and are never compressed (only + // encoded). If a compression scheme is specified, the CRC shall be + // calculated on the concatenation of the uncompressed repetition levels, + // uncompressed definition levels and the compressed column values. + // If no compression scheme is specified, the CRC shall be calculated on + // the uncompressed concatenation. + // - In encrypted columns, CRC is calculated after page encryption; the + // encryption itself is performed after page compression (if compressed) + // If enabled, this allows for disabling checksumming in HDFS if only a few + // pages need to be read. + CRC int32 `thrift:"4,optional"` + + // Headers for page specific data. One only will be set. + DataPageHeader *DataPageHeader `thrift:"5,optional"` + IndexPageHeader *IndexPageHeader `thrift:"6,optional"` + DictionaryPageHeader *DictionaryPageHeader `thrift:"7,optional"` + DataPageHeaderV2 *DataPageHeaderV2 `thrift:"8,optional"` +} + +// Wrapper struct to store key values. +type KeyValue struct { + Key string `thrift:"1,required"` + Value string `thrift:"2,required"` +} + +// Wrapper struct to specify sort order. +type SortingColumn struct { + // The column index (in this row group) + ColumnIdx int32 `thrift:"1,required"` + + // If true, indicates this column is sorted in descending order. + Descending bool `thrift:"2,required"` + + // If true, nulls will come before non-null values, otherwise, + // nulls go at the end. + NullsFirst bool `thrift:"3,required"` +} + +// Statistics of a given page type and encoding. +type PageEncodingStats struct { + // The page type (data/dic/...). + PageType PageType `thrift:"1,required"` + + // Encoding of the page. + Encoding Encoding `thrift:"2,required"` + + // Number of pages of this type with this encoding. + Count int32 `thrift:"3,required"` +} + +// Description for column metadata. +type ColumnMetaData struct { + // Type of this column. + Type Type `thrift:"1,required"` + + // Set of all encodings used for this column. The purpose is to validate + // whether we can decode those pages. + Encoding []Encoding `thrift:"2,required"` + + // Path in schema. + PathInSchema []string `thrift:"3,required"` + + // Compression codec. + Codec CompressionCodec `thrift:"4,required"` + + // Number of values in this column. + NumValues int64 `thrift:"5,required"` + + // Total byte size of all uncompressed pages in this column chunk (including the headers). + TotalUncompressedSize int64 `thrift:"6,required"` + + // Total byte size of all compressed, and potentially encrypted, pages + // in this column chunk (including the headers). + TotalCompressedSize int64 `thrift:"7,required"` + + // Optional key/value metadata. + KeyValueMetadata []KeyValue `thrift:"8,optional"` + + // Byte offset from beginning of file to first data page. + DataPageOffset int64 `thrift:"9,required"` + + // Byte offset from beginning of file to root index page. + IndexPageOffset int64 `thrift:"10,optional"` + + // Byte offset from the beginning of file to first (only) dictionary page. + DictionaryPageOffset int64 `thrift:"11,optional"` + + // optional statistics for this column chunk. + // The writezero tag supports writezero fields of Statistics. + Statistics Statistics `thrift:"12,optional,writezero"` + + // Set of all encodings used for pages in this column chunk. + // This information can be used to determine if all data pages are + // dictionary encoded for example. + EncodingStats []PageEncodingStats `thrift:"13,optional"` + + // Byte offset from beginning of file to Bloom filter data. + BloomFilterOffset int64 `thrift:"14,optional"` + + // Size of Bloom filter data including the serialized header, in bytes. + // Added in 2.10 so readers may not read this field from old files and + // it can be obtained after the BloomFilterHeader has been deserialized. + // Writers should write this field so readers can read the bloom filter + // in a single I/O. + BloomFilterLength int32 `thrift:"15,optional"` + + // Optional statistics to help estimate total memory when converted to in-memory + // representations. The histograms contained in these statistics can + // also be useful in some cases for more fine-grained nullability/list length + // filter pushdown. + SizeStatistics SizeStatistics `thrift:"16,optional"` + + // Optional statistics specific for Geometry and Geography logical types + GeospatialStatistics GeospatialStatistics `thrift:"17,optional"` +} + +type EncryptionWithFooterKey struct{} + +type EncryptionWithColumnKey struct { + // Column path in schema. + PathInSchema []string `thrift:"1,required"` + + // Retrieval metadata of column encryption key. + KeyMetadata []byte `thrift:"2,optional"` +} + +type ColumnCryptoMetaData struct { + EncryptionWithFooterKey *EncryptionWithFooterKey `thrift:"1"` + EncryptionWithColumnKey *EncryptionWithColumnKey `thrift:"2"` +} + +type ColumnChunk struct { + // File where column data is stored. If not set, assumed to be same file as + // metadata. This path is relative to the current file. + FilePath string `thrift:"1,optional"` + + // Byte offset in file_path to the ColumnMetaData. + FileOffset int64 `thrift:"2,required"` + + // Column metadata for this chunk. This is the same content as what is at + // file_path/file_offset. Having it here has it replicated in the file + // metadata. + MetaData ColumnMetaData `thrift:"3,optional"` + + // File offset of ColumnChunk's OffsetIndex. + OffsetIndexOffset int64 `thrift:"4,optional"` + + // Size of ColumnChunk's OffsetIndex, in bytes. + OffsetIndexLength int32 `thrift:"5,optional"` + + // File offset of ColumnChunk's ColumnIndex. + ColumnIndexOffset int64 `thrift:"6,optional"` + + // Size of ColumnChunk's ColumnIndex, in bytes. + ColumnIndexLength int32 `thrift:"7,optional"` + + // Crypto metadata of encrypted columns. + CryptoMetadata ColumnCryptoMetaData `thrift:"8,optional"` + + // Encrypted column metadata for this chunk. + EncryptedColumnMetadata []byte `thrift:"9,optional"` +} + +type RowGroup struct { + // Metadata for each column chunk in this row group. + // This list must have the same order as the SchemaElement list in FileMetaData. + Columns []ColumnChunk `thrift:"1,required"` + + // Total byte size of all the uncompressed column data in this row group. + TotalByteSize int64 `thrift:"2,required"` + + // Number of rows in this row group. + NumRows int64 `thrift:"3,required"` + + // If set, specifies a sort ordering of the rows in this RowGroup. + // The sorting columns can be a subset of all the columns. + SortingColumns []SortingColumn `thrift:"4,optional"` + + // Byte offset from beginning of file to first page (data or dictionary) + // in this row group + FileOffset int64 `thrift:"5,optional"` + + // Total byte size of all compressed (and potentially encrypted) column data + // in this row group. + TotalCompressedSize int64 `thrift:"6,optional"` + + // Row group ordinal in the file. + Ordinal int16 `thrift:"7,optional"` +} + +// Empty struct to signal the order defined by the physical or logical type. +type TypeDefinedOrder struct{} + +// Union to specify the order used for the min_value and max_value fields for a +// column. This union takes the role of an enhanced enum that allows rich +// elements (which will be needed for a collation-based ordering in the future). +// +// Possible values are: +// +// TypeDefinedOrder - the column uses the order defined by its logical or +// physical type (if there is no logical type). +// +// If the reader does not support the value of this union, min and max stats +// for this column should be ignored. +type ColumnOrder struct { // union + // The sort orders for logical types are: + // UTF8 - unsigned byte-wise comparison + // INT8 - signed comparison + // INT16 - signed comparison + // INT32 - signed comparison + // INT64 - signed comparison + // UINT8 - unsigned comparison + // UINT16 - unsigned comparison + // UINT32 - unsigned comparison + // UINT64 - unsigned comparison + // DECIMAL - signed comparison of the represented value + // DATE - signed comparison + // TIME_MILLIS - signed comparison + // TIME_MICROS - signed comparison + // TIMESTAMP_MILLIS - signed comparison + // TIMESTAMP_MICROS - signed comparison + // INTERVAL - unsigned comparison + // JSON - unsigned byte-wise comparison + // BSON - unsigned byte-wise comparison + // ENUM - unsigned byte-wise comparison + // LIST - undefined + // MAP - undefined + // VARIANT - undefined + // GEOMETRY - undefined + // GEOGRAPHY - undefined + // + // In the absence of logical types, the sort order is determined by the physical type: + // BOOLEAN - false, true + // INT32 - signed comparison + // INT64 - signed comparison + // INT96 (only used for legacy timestamps) - undefined + // FLOAT - signed comparison of the represented value (*) + // DOUBLE - signed comparison of the represented value (*) + // BYTE_ARRAY - unsigned byte-wise comparison + // FIXED_LEN_BYTE_ARRAY - unsigned byte-wise comparison + // + // (*) Because the sorting order is not specified properly for floating + // point values (relations vs. total ordering) the following + // compatibility rules should be applied when reading statistics: + // - If the min is a NaN, it should be ignored. + // - If the max is a NaN, it should be ignored. + // - If the min is +0, the row group may contain -0 values as well. + // - If the max is -0, the row group may contain +0 values as well. + // - When looking for NaN values, min and max should be ignored. + TypeOrder *TypeDefinedOrder `thrift:"1"` +} + +type PageLocation struct { + // Offset of the page in the file. + Offset int64 `thrift:"1,required"` + + // Size of the page, including header. Sum of compressed_page_size and + // header length. + CompressedPageSize int32 `thrift:"2,required"` + + // Index within the RowGroup of the first row of the page; this means + // pages change on record boundaries (r = 0). + FirstRowIndex int64 `thrift:"3,required"` +} + +type OffsetIndex struct { + // PageLocations, ordered by increasing PageLocation.offset. It is required + // that page_locations[i].first_row_index < page_locations[i+1].first_row_index. + PageLocations []PageLocation `thrift:"1,required"` + + // Unencoded/uncompressed size for BYTE_ARRAY types. + // + // See documention for unencoded_byte_array_data_bytes in SizeStatistics for + // more details on this field. + UnencodedByteArrayDataBytes []int64 `thrift:"2,optional"` +} + +// Description for ColumnIndex. +// Each [i] refers to the page at OffsetIndex.PageLocations[i] +type ColumnIndex struct { + // A list of Boolean values to determine the validity of the corresponding + // min and max values. If true, a page contains only null values, and writers + // have to set the corresponding entries in min_values and max_values to + // byte[0], so that all lists have the same length. If false, the + // corresponding entries in min_values and max_values must be valid. + NullPages []bool `thrift:"1,required"` + + // Two lists containing lower and upper bounds for the values of each page + // determined by the ColumnOrder of the column. These may be the actual + // minimum and maximum values found on a page, but can also be (more compact) + // values that do not exist on a page. For example, instead of storing ""Blart + // Versenwald III", a writer may set min_values[i]="B", max_values[i]="C". + // Such more compact values must still be valid values within the column's + // logical type. Readers must make sure that list entries are populated before + // using them by inspecting null_pages. + MinValues [][]byte `thrift:"2,required"` + MaxValues [][]byte `thrift:"3,required"` + + // Stores whether both min_values and max_values are ordered and if so, in + // which direction. This allows readers to perform binary searches in both + // lists. Readers cannot assume that max_values[i] <= min_values[i+1], even + // if the lists are ordered. + BoundaryOrder BoundaryOrder `thrift:"4,required"` + + // A list containing the number of null values for each page. + // The writezero tag satisfies spec: + // "Writers SHOULD always write this field even if no null values are present or the column is not nullable." + // https://github.com/apache/parquet-format/blob/apache-parquet-format-2.12.0/src/main/thrift/parquet.thrift#L1197-L1198 + NullCounts []int64 `thrift:"5,optional,writezero"` + + // Contains repetition level histograms for each page + // concatenated together. The repetition_level_histogram field on + // SizeStatistics contains more details. + // + // When present the length should always be (number of pages * + // (max_repetition_level + 1)) elements. + // + // Element 0 is the first element of the histogram for the first page. + // Element (max_repetition_level + 1) is the first element of the histogram + // for the second page. + RepetitionLevelHistogram []int64 `thrift:"6,optional"` + + // Same as repetition_level_histograms except for definitions levels. + DefinitionLevelHistogram []int64 `thrift:"7,optional"` +} + +type AesGcmV1 struct { + // AAD prefix. + AadPrefix []byte `thrift:"1,optional"` + + // Unique file identifier part of AAD suffix. + AadFileUnique []byte `thrift:"2,optional"` + + // In files encrypted with AAD prefix without storing it, + // readers must supply the prefix. + SupplyAadPrefix bool `thrift:"3,optional"` +} + +type AesGcmCtrV1 struct { + // AAD prefix. + AadPrefix []byte `thrift:"1,optional"` + + // Unique file identifier part of AAD suffix. + AadFileUnique []byte `thrift:"2,optional"` + + // In files encrypted with AAD prefix without storing it, + // readers must supply the prefix. + SupplyAadPrefix bool `thrift:"3,optional"` +} + +type EncryptionAlgorithm struct { // union + AesGcmV1 *AesGcmV1 `thrift:"1"` + AesGcmCtrV1 *AesGcmCtrV1 `thrift:"2"` +} + +// Description for file metadata. +type FileMetaData struct { + // Version of this file. + Version int32 `thrift:"1,required"` + + // Parquet schema for this file. This schema contains metadata for all the columns. + // The schema is represented as a tree with a single root. The nodes of the tree + // are flattened to a list by doing a depth-first traversal. + // The column metadata contains the path in the schema for that column which can be + // used to map columns to nodes in the schema. + // The first element is the root. + Schema []SchemaElement `thrift:"2,required"` + + // Number of rows in this file. + NumRows int64 `thrift:"3,required"` + + // Row groups in this file. + RowGroups []RowGroup `thrift:"4,required"` + + // Optional key/value metadata. + KeyValueMetadata []KeyValue `thrift:"5,optional"` + + // String for application that wrote this file. This should be in the format + // version (build ). + // e.g. impala version 1.0 (build 6cf94d29b2b7115df4de2c06e2ab4326d721eb55) + CreatedBy string `thrift:"6,optional"` + + // Sort order used for the min_value and max_value fields in the Statistics + // objects and the min_values and max_values fields in the ColumnIndex + // objects of each column in this file. Sort orders are listed in the order + // matching the columns in the schema. The indexes are not necessary the same + // though, because only leaf nodes of the schema are represented in the list + // of sort orders. + // + // Without column_orders, the meaning of the min_value and max_value fields + // in the Statistics object and the ColumnIndex object is undefined. To ensure + // well-defined behavior, if these fields are written to a Parquet file, + // column_orders must be written as well. + // + // The obsolete min and max fields in the Statistics object are always sorted + // by signed comparison regardless of column_orders. + ColumnOrders []ColumnOrder `thrift:"7,optional"` + + // Encryption algorithm. This field is set only in encrypted files + // with plaintext footer. Files with encrypted footer store algorithm id + // in FileCryptoMetaData structure. + EncryptionAlgorithm EncryptionAlgorithm `thrift:"8,optional"` + + // Retrieval metadata of key used for signing the footer. + // Used only in encrypted files with plaintext footer. + FooterSigningKeyMetadata []byte `thrift:"9,optional"` +} + +// Crypto metadata for files with encrypted footer. +type FileCryptoMetaData struct { + // Encryption algorithm. This field is only used for files + // with encrypted footer. Files with plaintext footer store algorithm id + // inside footer (FileMetaData structure). + EncryptionAlgorithm EncryptionAlgorithm `thrift:"1,required"` + + // Retrieval metadata of key used for encryption of footer, + // and (possibly) columns. + KeyMetadata []byte `thrift:"2,optional"` +} diff --git a/vendor/github.com/parquet-go/parquet-go/go.tools.mod b/vendor/github.com/parquet-go/parquet-go/go.tools.mod new file mode 100644 index 00000000000..77c0913dd45 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/go.tools.mod @@ -0,0 +1,24 @@ +module github.com/parquet-go/parquet-go + +go 1.25.1 + +tool golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize + +require ( + github.com/andybalholm/brotli v1.1.1 + github.com/google/uuid v1.6.0 + github.com/hexops/gotextdiff v1.0.3 + github.com/klauspost/compress v1.18.0 + github.com/parquet-go/bitpack v0.0.0-20251026130316-7709569977d0 + github.com/parquet-go/jsonlite v0.1.0 + github.com/pierrec/lz4/v4 v4.1.22 + golang.org/x/sys v0.37.0 + google.golang.org/protobuf v1.36.5 +) + +require ( + golang.org/x/mod v0.23.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/tools v0.30.1-0.20250221230316-5055f70f240c // indirect + golang.org/x/tools/gopls v0.18.1 // indirect +) diff --git a/vendor/github.com/parquet-go/parquet-go/go.tools.sum b/vendor/github.com/parquet-go/parquet-go/go.tools.sum new file mode 100644 index 00000000000..008d66299a8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/go.tools.sum @@ -0,0 +1,30 @@ +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/parquet-go/bitpack v0.0.0-20251026130316-7709569977d0 h1:WDJJwDZk8KYIFVkyvQJE7q6UGwNSre4pzw+bpJ0osIY= +github.com/parquet-go/bitpack v0.0.0-20251026130316-7709569977d0/go.mod h1:Ef5FEKAqlyHECov7Z3RKOq8Ud9Vr8cbGNoCF56KEKaE= +github.com/parquet-go/jsonlite v0.1.0 h1:hVHIMaDBeKkruXcrP0K90vu528z7+tCCYpKWUrR/m9Y= +github.com/parquet-go/jsonlite v0.1.0/go.mod h1:SPIV4YIXv+4eKxONWRe4mVJCw/D/7v0SJju8wCyDnOc= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/tools v0.30.1-0.20250221230316-5055f70f240c h1:Ja/5gV5a9Vvho3p2NC/T2TtxhHjrWS/2DvCKMvA0a+Y= +golang.org/x/tools v0.30.1-0.20250221230316-5055f70f240c/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools/gopls v0.18.1 h1:2xJBNzdImS5u/kV/ZzqDLSvlBSeZX+pWY9uKVP7Pask= +golang.org/x/tools/gopls v0.18.1/go.mod h1:UdNu0zeGjkmjL9L20QDszXu9tP2798pUIHC980kOBrI= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash.go new file mode 100644 index 00000000000..4e1018a0178 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash.go @@ -0,0 +1,21 @@ +// Package aeshash implements hashing functions derived from the Go runtime's +// internal hashing based on the support of AES encryption in CPU instructions. +// +// On architecture where the CPU does not provide instructions for AES +// encryption, the aeshash.Enabled function always returns false, and attempting +// to call any other function will trigger a panic. +package aeshash + +import "github.com/parquet-go/parquet-go/sparse" + +func MultiHash32(hashes []uintptr, values []uint32, seed uintptr) { + MultiHashUint32Array(hashes, sparse.MakeUint32Array(values), seed) +} + +func MultiHash64(hashes []uintptr, values []uint64, seed uintptr) { + MultiHashUint64Array(hashes, sparse.MakeUint64Array(values), seed) +} + +func MultiHash128(hashes []uintptr, values [][16]byte, seed uintptr) { + MultiHashUint128Array(hashes, sparse.MakeUint128Array(values), seed) +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.go new file mode 100644 index 00000000000..8b6ee21290c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.go @@ -0,0 +1,60 @@ +//go:build !purego + +package aeshash + +import ( + "math/rand" + "unsafe" + + "golang.org/x/sys/cpu" + + "github.com/parquet-go/parquet-go/sparse" +) + +// hashRandomBytes is 48 since this is what the assembly code depends on. +const hashRandomBytes = 48 + +var aeskeysched [hashRandomBytes]byte + +func init() { + for _, v := range aeskeysched { + if v != 0 { + // aeskeysched was initialized somewhere else (e.g. tests), so we + // can skip initialization. No synchronization is needed since init + // functions are called sequentially in a single goroutine (see + // https://go.dev/ref/spec#Package_initialization). + return + } + } + + key := (*[hashRandomBytes / 8]uint64)(unsafe.Pointer(&aeskeysched)) + for i := range key { + key[i] = rand.Uint64() + } +} + +// Enabled returns true if AES hash is available on the system. +// +// The function uses the same logic than the Go runtime since we depend on +// the AES hash state being initialized. +// +// See https://go.dev/src/runtime/alg.go +func Enabled() bool { return cpu.X86.HasAES && cpu.X86.HasSSSE3 && cpu.X86.HasSSE41 } + +//go:noescape +func Hash32(value uint32, seed uintptr) uintptr + +//go:noescape +func Hash64(value uint64, seed uintptr) uintptr + +//go:noescape +func Hash128(value [16]byte, seed uintptr) uintptr + +//go:noescape +func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) + +//go:noescape +func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) + +//go:noescape +func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.s b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.s new file mode 100644 index 00000000000..ad6812aeba4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_amd64.s @@ -0,0 +1,155 @@ +//go:build !purego + +#include "textflag.h" + +// func Hash32(value uint32, seed uintptr) uintptr +TEXT ·Hash32(SB), NOSPLIT, $0-24 + MOVL value+0(FP), AX + MOVQ seed+8(FP), BX + + MOVOU ·aeskeysched+0(SB), X1 + MOVOU ·aeskeysched+16(SB), X2 + MOVOU ·aeskeysched+32(SB), X3 + + MOVQ BX, X0 + PINSRD $2, AX, X0 + + AESENC X1, X0 + AESENC X2, X0 + AESENC X3, X0 + + MOVQ X0, ret+16(FP) + RET + +// func Hash64(value uint64, seed uintptr) uintptr +TEXT ·Hash64(SB), NOSPLIT, $0-24 + MOVQ value+0(FP), AX + MOVQ seed+8(FP), BX + + MOVOU ·aeskeysched+0(SB), X1 + MOVOU ·aeskeysched+16(SB), X2 + MOVOU ·aeskeysched+32(SB), X3 + + MOVQ BX, X0 + PINSRQ $1, AX, X0 + + AESENC X1, X0 + AESENC X2, X0 + AESENC X3, X0 + + MOVQ X0, ret+16(FP) + RET + +// func Hash128(value [16]byte, seed uintptr) uintptr +TEXT ·Hash128(SB), NOSPLIT, $0-32 + LEAQ value+0(FP), AX + MOVQ seed+16(FP), BX + MOVQ $16, CX + + MOVQ BX, X0 // 64 bits of per-table hash seed + PINSRW $4, CX, X0 // 16 bits of length + PSHUFHW $0, X0, X0 // repeat length 4 times total + PXOR ·aeskeysched(SB), X0 // xor in per-process seed + AESENC X0, X0 // scramble seed + + MOVOU (AX), X1 + PXOR X0, X1 + AESENC X1, X1 + AESENC X1, X1 + AESENC X1, X1 + + MOVQ X1, ret+24(FP) + RET + +// func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) +TEXT ·MultiHashUint32Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), AX + MOVQ values_array_ptr+24(FP), BX + MOVQ values_array_len+32(FP), CX + MOVQ values_array_off+40(FP), R8 + MOVQ seed+48(FP), DX + + MOVOU ·aeskeysched+0(SB), X1 + MOVOU ·aeskeysched+16(SB), X2 + MOVOU ·aeskeysched+32(SB), X3 + + XORQ SI, SI + JMP test +loop: + MOVQ DX, X0 + PINSRD $2, (BX), X0 + + AESENC X1, X0 + AESENC X2, X0 + AESENC X3, X0 + + MOVQ X0, (AX)(SI*8) + INCQ SI + ADDQ R8, BX +test: + CMPQ SI, CX + JNE loop + RET + +// func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) +TEXT ·MultiHashUint64Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), AX + MOVQ values_array_ptr+24(FP), BX + MOVQ values_array_len+32(FP), CX + MOVQ values_array_off+40(FP), R8 + MOVQ seed+48(FP), DX + + MOVOU ·aeskeysched+0(SB), X1 + MOVOU ·aeskeysched+16(SB), X2 + MOVOU ·aeskeysched+32(SB), X3 + + XORQ SI, SI + JMP test +loop: + MOVQ DX, X0 + PINSRQ $1, (BX), X0 + + AESENC X1, X0 + AESENC X2, X0 + AESENC X3, X0 + + MOVQ X0, (AX)(SI*8) + INCQ SI + ADDQ R8, BX +test: + CMPQ SI, CX + JNE loop + RET + +// func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) +TEXT ·MultiHashUint128Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), AX + MOVQ values_array_ptr+24(FP), BX + MOVQ values_array_len+32(FP), CX + MOVQ values_array_off+40(FP), R8 + MOVQ seed+48(FP), DX + MOVQ $16, DI + + MOVQ DX, X0 + PINSRW $4, DI, X0 + PSHUFHW $0, X0, X0 + PXOR ·aeskeysched(SB), X0 + AESENC X0, X0 + + XORQ SI, SI + JMP test +loop: + MOVOU (BX), X1 + + PXOR X0, X1 + AESENC X1, X1 + AESENC X1, X1 + AESENC X1, X1 + + MOVQ X1, (AX)(SI*8) + INCQ SI + ADDQ R8, BX +test: + CMPQ SI, CX + JNE loop + RET diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_purego.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_purego.go new file mode 100644 index 00000000000..42d367a0de4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/aeshash/aeshash_purego.go @@ -0,0 +1,29 @@ +//go:build purego || !amd64 + +package aeshash + +import "github.com/parquet-go/parquet-go/sparse" + +// Enabled always returns false since we assume that AES instructions are not +// available by default. +func Enabled() bool { return false } + +const unsupported = "BUG: AES hash is not available on this platform" + +func Hash32(value uint32, seed uintptr) uintptr { panic(unsupported) } + +func Hash64(value uint64, seed uintptr) uintptr { panic(unsupported) } + +func Hash128(value [16]byte, seed uintptr) uintptr { panic(unsupported) } + +func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) { + panic(unsupported) +} + +func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) { + panic(unsupported) +} + +func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) { + panic(unsupported) +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe.go new file mode 100644 index 00000000000..1c81b32856a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe.go @@ -0,0 +1,783 @@ +// Package hashprobe provides implementations of probing tables for various +// data types. +// +// Probing tables are specialized hash tables supporting only a single +// "probing" operation which behave like a "lookup or insert". When a key +// is probed, either its value is retrieved if it already existed in the table, +// or it is inserted and assigned its index in the insert sequence as value. +// +// Values are represented as signed 32 bits integers, which means that probing +// tables defined in this package may contain at most 2^31-1 entries. +// +// Probing tables have a method named Probe with the following signature: +// +// func (t *Int64Table) Probe(keys []int64, values []int32) int { +// ... +// } +// +// The method takes an array of keys to probe as first argument, an array of +// values where the indexes of each key will be written as second argument, and +// returns the number of keys that were inserted during the call. +// +// Applications that need to determine which keys were inserted can capture the +// length of the probing table prior to the call, and scan the list of values +// looking for indexes greater or equal to the length of the table before the +// call. +package hashprobe + +import ( + cryptoRand "crypto/rand" + "encoding/binary" + "math" + "math/bits" + "math/rand" + "sync" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/hashprobe/aeshash" + "github.com/parquet-go/parquet-go/hashprobe/wyhash" + "github.com/parquet-go/parquet-go/sparse" +) + +const ( + // Number of probes tested per iteration. This parameter balances between + // the amount of memory allocated on the stack to hold the computed hashes + // of the keys being probed, and amortizing the baseline cost of the probing + // algorithm. + // + // The larger the value, the more memory is required, but lower the baseline + // cost will be. + // + // We chose a value that is somewhat large, resulting in reserving 2KiB of + // stack but mostly erasing the baseline cost. + probesPerLoop = 256 +) + +var ( + prngSeed [8]byte + prngMutex sync.Mutex + prngSource rand.Source64 +) + +func init() { + _, err := cryptoRand.Read(prngSeed[:]) + if err != nil { + panic("cannot seed random number generator from system source: " + err.Error()) + } + seed := int64(binary.LittleEndian.Uint64(prngSeed[:])) + prngSource = rand.NewSource(seed).(rand.Source64) +} + +func tableSizeAndMaxLen(groupSize, numValues int, maxLoad float64) (size, maxLen int) { + n := int(math.Ceil((1 / maxLoad) * float64(numValues))) + size = nextPowerOf2((n + (groupSize - 1)) / groupSize) + maxLen = int(math.Ceil(maxLoad * float64(groupSize*size))) + return +} + +func nextPowerOf2(n int) int { + return 1 << (64 - bits.LeadingZeros64(uint64(n-1))) +} + +func randSeed() uintptr { + prngMutex.Lock() + defer prngMutex.Unlock() + return uintptr(prngSource.Uint64()) +} + +type Int32Table struct{ table32 } + +func NewInt32Table(cap int, maxLoad float64) *Int32Table { + return &Int32Table{makeTable32(cap, maxLoad)} +} + +func (t *Int32Table) Reset() { t.reset() } + +func (t *Int32Table) Len() int { return t.len } + +func (t *Int32Table) Cap() int { return t.size() } + +func (t *Int32Table) Probe(keys, values []int32) int { + return t.probe(unsafecast.Slice[uint32](keys), values) +} + +func (t *Int32Table) ProbeArray(keys sparse.Int32Array, values []int32) int { + return t.probeArray(keys.Uint32Array(), values) +} + +type Float32Table struct{ table32 } + +func NewFloat32Table(cap int, maxLoad float64) *Float32Table { + return &Float32Table{makeTable32(cap, maxLoad)} +} + +func (t *Float32Table) Reset() { t.reset() } + +func (t *Float32Table) Len() int { return t.len } + +func (t *Float32Table) Cap() int { return t.size() } + +func (t *Float32Table) Probe(keys []float32, values []int32) int { + return t.probe(unsafecast.Slice[uint32](keys), values) +} + +func (t *Float32Table) ProbeArray(keys sparse.Float32Array, values []int32) int { + return t.probeArray(keys.Uint32Array(), values) +} + +type Uint32Table struct{ table32 } + +func NewUint32Table(cap int, maxLoad float64) *Uint32Table { + return &Uint32Table{makeTable32(cap, maxLoad)} +} + +func (t *Uint32Table) Reset() { t.reset() } + +func (t *Uint32Table) Len() int { return t.len } + +func (t *Uint32Table) Cap() int { return t.size() } + +func (t *Uint32Table) Probe(keys []uint32, values []int32) int { + return t.probe(keys, values) +} + +func (t *Uint32Table) ProbeArray(keys sparse.Uint32Array, values []int32) int { + return t.probeArray(keys, values) +} + +// table32 is the generic implementation of probing tables for 32 bit types. +// +// The table uses the following memory layout: +// +// [group 0][group 1][...][group N] +// +// Each group contains up to 7 key/value pairs, and is exactly 64 bytes in size, +// which allows it to fit within a single cache line, and ensures that probes +// can be performed with a single memory load per key. +// +// Groups fill up by appending new entries to the keys and values arrays. When a +// group is full, the probe checks the next group. +// +// https://en.wikipedia.org/wiki/Linear_probing +type table32 struct { + len int + maxLen int + maxLoad float64 + seed uintptr + table []table32Group +} + +const table32GroupSize = 7 + +type table32Group struct { + keys [table32GroupSize]uint32 + values [table32GroupSize]uint32 + bits uint32 + _ uint32 +} + +func makeTable32(cap int, maxLoad float64) (t table32) { + if maxLoad < 0 || maxLoad > 1 { + panic("max load of probing table must be a value between 0 and 1") + } + if cap < table32GroupSize { + cap = table32GroupSize + } + t.init(cap, maxLoad) + return t +} + +func (t *table32) size() int { + return table32GroupSize * len(t.table) +} + +func (t *table32) init(cap int, maxLoad float64) { + size, maxLen := tableSizeAndMaxLen(table32GroupSize, cap, maxLoad) + *t = table32{ + maxLen: maxLen, + maxLoad: maxLoad, + seed: randSeed(), + table: make([]table32Group, size), + } +} + +func (t *table32) grow(totalValues int) { + tmp := table32{} + tmp.init(totalValues, t.maxLoad) + tmp.len = t.len + + hashes := make([]uintptr, table32GroupSize) + modulo := uintptr(len(tmp.table)) - 1 + + for i := range t.table { + g := &t.table[i] + n := bits.OnesCount32(g.bits) + + if aeshash.Enabled() { + aeshash.MultiHash32(hashes[:n], g.keys[:n], tmp.seed) + } else { + wyhash.MultiHash32(hashes[:n], g.keys[:n], tmp.seed) + } + + for j, hash := range hashes[:n] { + for { + group := &tmp.table[hash&modulo] + + if n := bits.OnesCount32(group.bits); n < table32GroupSize { + group.bits = (group.bits << 1) | 1 + group.keys[n] = g.keys[j] + group.values[n] = g.values[j] + break + } + + hash++ + } + } + } + + *t = tmp +} + +func (t *table32) reset() { + t.len = 0 + + for i := range t.table { + t.table[i] = table32Group{} + } +} + +func (t *table32) probe(keys []uint32, values []int32) int { + return t.probeArray(sparse.MakeUint32Array(keys), values) +} + +func (t *table32) probeArray(keys sparse.Uint32Array, values []int32) int { + numKeys := keys.Len() + + if totalValues := t.len + numKeys; totalValues > t.maxLen { + t.grow(totalValues) + } + + var hashes [probesPerLoop]uintptr + var baseLength = t.len + var useAesHash = aeshash.Enabled() + + _ = values[:numKeys] + + for i := 0; i < numKeys; { + j := len(hashes) + i + n := len(hashes) + + if j > numKeys { + j = numKeys + n = numKeys - i + } + + k := keys.Slice(i, j) + v := values[i:j:j] + h := hashes[:n:n] + + if useAesHash { + aeshash.MultiHashUint32Array(h, k, t.seed) + } else { + wyhash.MultiHashUint32Array(h, k, t.seed) + } + + t.len = multiProbe32(t.table, t.len, h, k, v) + i = j + } + + return t.len - baseLength +} + +func multiProbe32Default(table []table32Group, numKeys int, hashes []uintptr, keys sparse.Uint32Array, values []int32) int { + modulo := uintptr(len(table)) - 1 + + for i, hash := range hashes { + key := keys.Index(i) + for { + group := &table[hash&modulo] + index := table32GroupSize + value := int32(0) + + for j, k := range group.keys { + if k == key { + index = j + break + } + } + + if n := bits.OnesCount32(group.bits); index < n { + value = int32(group.values[index]) + } else { + if n == table32GroupSize { + hash++ + continue + } + + value = int32(numKeys) + group.bits = (group.bits << 1) | 1 + group.keys[n] = key + group.values[n] = uint32(value) + numKeys++ + } + + values[i] = value + break + } + } + + return numKeys +} + +type Int64Table struct{ table64 } + +func NewInt64Table(cap int, maxLoad float64) *Int64Table { + return &Int64Table{makeTable64(cap, maxLoad)} +} + +func (t *Int64Table) Reset() { t.reset() } + +func (t *Int64Table) Len() int { return t.len } + +func (t *Int64Table) Cap() int { return t.size() } + +func (t *Int64Table) Probe(keys []int64, values []int32) int { + return t.probe(unsafecast.Slice[uint64](keys), values) +} + +func (t *Int64Table) ProbeArray(keys sparse.Int64Array, values []int32) int { + return t.probeArray(keys.Uint64Array(), values) +} + +type Float64Table struct{ table64 } + +func NewFloat64Table(cap int, maxLoad float64) *Float64Table { + return &Float64Table{makeTable64(cap, maxLoad)} +} + +func (t *Float64Table) Reset() { t.reset() } + +func (t *Float64Table) Len() int { return t.len } + +func (t *Float64Table) Cap() int { return t.size() } + +func (t *Float64Table) Probe(keys []float64, values []int32) int { + return t.probe(unsafecast.Slice[uint64](keys), values) +} + +func (t *Float64Table) ProbeArray(keys sparse.Float64Array, values []int32) int { + return t.probeArray(keys.Uint64Array(), values) +} + +type Uint64Table struct{ table64 } + +func NewUint64Table(cap int, maxLoad float64) *Uint64Table { + return &Uint64Table{makeTable64(cap, maxLoad)} +} + +func (t *Uint64Table) Reset() { t.reset() } + +func (t *Uint64Table) Len() int { return t.len } + +func (t *Uint64Table) Cap() int { return t.size() } + +func (t *Uint64Table) Probe(keys []uint64, values []int32) int { + return t.probe(keys, values) +} + +func (t *Uint64Table) ProbeArray(keys sparse.Uint64Array, values []int32) int { + return t.probeArray(keys, values) +} + +// table64 is the generic implementation of probing tables for 64 bit types. +// +// The table uses a layout similar to the one documented on the table for 32 bit +// keys (see table32). Each group holds up to 4 key/value pairs (instead of 7 +// like table32) so that each group fits in a single CPU cache line. This table +// version has a bit lower memory density, with ~23% of table memory being used +// for padding. +// +// Technically we could hold up to 5 entries per group and still fit within the +// 64 bytes of a CPU cache line; on x86 platforms, AVX2 registers can only hold +// four 64 bit values, we would need twice as many instructions per probe if the +// groups were holding 5 values. The trade off of memory for compute efficiency +// appeared to be the right choice at the time. +type table64 struct { + len int + maxLen int + maxLoad float64 + seed uintptr + table []table64Group +} + +const table64GroupSize = 4 + +type table64Group struct { + keys [table64GroupSize]uint64 + values [table64GroupSize]uint32 + bits uint32 + _ uint32 + _ uint32 + _ uint32 +} + +func makeTable64(cap int, maxLoad float64) (t table64) { + if maxLoad < 0 || maxLoad > 1 { + panic("max load of probing table must be a value between 0 and 1") + } + if cap < table64GroupSize { + cap = table64GroupSize + } + t.init(cap, maxLoad) + return t +} + +func (t *table64) size() int { + return table64GroupSize * len(t.table) +} + +func (t *table64) init(cap int, maxLoad float64) { + size, maxLen := tableSizeAndMaxLen(table64GroupSize, cap, maxLoad) + *t = table64{ + maxLen: maxLen, + maxLoad: maxLoad, + seed: randSeed(), + table: make([]table64Group, size), + } +} + +func (t *table64) grow(totalValues int) { + tmp := table64{} + tmp.init(totalValues, t.maxLoad) + tmp.len = t.len + + hashes := make([]uintptr, table64GroupSize) + modulo := uintptr(len(tmp.table)) - 1 + + for i := range t.table { + g := &t.table[i] + n := bits.OnesCount32(g.bits) + + if aeshash.Enabled() { + aeshash.MultiHash64(hashes[:n], g.keys[:n], tmp.seed) + } else { + wyhash.MultiHash64(hashes[:n], g.keys[:n], tmp.seed) + } + + for j, hash := range hashes[:n] { + for { + group := &tmp.table[hash&modulo] + + if n := bits.OnesCount32(group.bits); n < table64GroupSize { + group.bits = (group.bits << 1) | 1 + group.keys[n] = g.keys[j] + group.values[n] = g.values[j] + break + } + + hash++ + } + } + } + + *t = tmp +} + +func (t *table64) reset() { + t.len = 0 + + for i := range t.table { + t.table[i] = table64Group{} + } +} + +func (t *table64) probe(keys []uint64, values []int32) int { + return t.probeArray(sparse.MakeUint64Array(keys), values) +} + +func (t *table64) probeArray(keys sparse.Uint64Array, values []int32) int { + numKeys := keys.Len() + + if totalValues := t.len + numKeys; totalValues > t.maxLen { + t.grow(totalValues) + } + + var hashes [probesPerLoop]uintptr + var baseLength = t.len + var useAesHash = aeshash.Enabled() + + _ = values[:numKeys] + + for i := 0; i < numKeys; { + j := len(hashes) + i + n := len(hashes) + + if j > numKeys { + j = numKeys + n = numKeys - i + } + + k := keys.Slice(i, j) + v := values[i:j:j] + h := hashes[:n:n] + + if useAesHash { + aeshash.MultiHashUint64Array(h, k, t.seed) + } else { + wyhash.MultiHashUint64Array(h, k, t.seed) + } + + t.len = multiProbe64(t.table, t.len, h, k, v) + i = j + } + + return t.len - baseLength +} + +func multiProbe64Default(table []table64Group, numKeys int, hashes []uintptr, keys sparse.Uint64Array, values []int32) int { + modulo := uintptr(len(table)) - 1 + + for i, hash := range hashes { + key := keys.Index(i) + for { + group := &table[hash&modulo] + index := table64GroupSize + value := int32(0) + + for i, k := range group.keys { + if k == key { + index = i + break + } + } + + if n := bits.OnesCount32(group.bits); index < n { + value = int32(group.values[index]) + } else { + if n == table64GroupSize { + hash++ + continue + } + + value = int32(numKeys) + group.bits = (group.bits << 1) | 1 + group.keys[n] = key + group.values[n] = uint32(value) + numKeys++ + } + + values[i] = value + break + } + } + + return numKeys +} + +type Uint128Table struct{ table128 } + +func NewUint128Table(cap int, maxLoad float64) *Uint128Table { + return &Uint128Table{makeTable128(cap, maxLoad)} +} + +func (t *Uint128Table) Reset() { t.reset() } + +func (t *Uint128Table) Len() int { return t.len } + +func (t *Uint128Table) Cap() int { return t.cap } + +func (t *Uint128Table) Probe(keys [][16]byte, values []int32) int { + return t.probe(keys, values) +} + +func (t *Uint128Table) ProbeArray(keys sparse.Uint128Array, values []int32) int { + return t.probeArray(keys, values) +} + +// table128 is the generic implementation of probing tables for 128 bit types. +// +// This table uses the following memory layout: +// +// [key A][key B][...][value A][value B][...] +// +// The table stores values as their actual value plus one, and uses zero as a +// sentinel to determine whether a slot is occupied. A linear probing strategy +// is used to resolve conflicts. This approach results in at most two memory +// loads for every four keys being tested, since the location of a key and its +// corresponding value will not be contiguous on the same CPU cache line, but +// a cache line can hold four 16 byte keys. +type table128 struct { + len int + cap int + maxLen int + maxLoad float64 + seed uintptr + table []byte +} + +func makeTable128(cap int, maxLoad float64) (t table128) { + if maxLoad < 0 || maxLoad > 1 { + panic("max load of probing table must be a value between 0 and 1") + } + if cap < 8 { + cap = 8 + } + t.init(cap, maxLoad) + return t +} + +func (t *table128) init(cap int, maxLoad float64) { + size, maxLen := tableSizeAndMaxLen(1, cap, maxLoad) + *t = table128{ + cap: size, + maxLen: maxLen, + maxLoad: maxLoad, + seed: randSeed(), + table: make([]byte, 16*size+4*size), + } +} + +func (t *table128) kv() (keys [][16]byte, values []int32) { + i := t.cap * 16 + return unsafecast.Slice[[16]byte](t.table[:i]), unsafecast.Slice[int32](t.table[i:]) +} + +func (t *table128) grow(totalValues int) { + tmp := table128{} + tmp.init(totalValues, t.maxLoad) + tmp.len = t.len + + keys, values := t.kv() + hashes := make([]uintptr, probesPerLoop) + useAesHash := aeshash.Enabled() + + _ = values[:len(keys)] + + for i := 0; i < len(keys); { + j := len(hashes) + i + n := len(hashes) + + if j > len(keys) { + j = len(keys) + n = len(keys) - i + } + + h := hashes[:n:n] + k := keys[i:j:j] + v := values[i:j:j] + + if useAesHash { + aeshash.MultiHash128(h, k, tmp.seed) + } else { + wyhash.MultiHash128(h, k, tmp.seed) + } + + tmp.insert(h, k, v) + i = j + } + + *t = tmp +} + +func (t *table128) insert(hashes []uintptr, keys [][16]byte, values []int32) { + tableKeys, tableValues := t.kv() + modulo := uintptr(t.cap) - 1 + + for i, hash := range hashes { + for { + j := hash & modulo + v := tableValues[j] + + if v == 0 { + tableKeys[j] = keys[i] + tableValues[j] = values[i] + break + } + + hash++ + } + } +} + +func (t *table128) reset() { + t.len = 0 + + for i := range t.table { + t.table[i] = 0 + } +} + +func (t *table128) probe(keys [][16]byte, values []int32) int { + return t.probeArray(sparse.MakeUint128Array(keys), values) +} + +func (t *table128) probeArray(keys sparse.Uint128Array, values []int32) int { + numKeys := keys.Len() + + if totalValues := t.len + numKeys; totalValues > t.maxLen { + t.grow(totalValues) + } + + var hashes [probesPerLoop]uintptr + var baseLength = t.len + var useAesHash = aeshash.Enabled() + + _ = values[:numKeys] + + for i := 0; i < numKeys; { + j := len(hashes) + i + n := len(hashes) + + if j > numKeys { + j = numKeys + n = numKeys - i + } + + k := keys.Slice(i, j) + v := values[i:j:j] + h := hashes[:n:n] + + if useAesHash { + aeshash.MultiHashUint128Array(h, k, t.seed) + } else { + wyhash.MultiHashUint128Array(h, k, t.seed) + } + + t.len = multiProbe128(t.table, t.cap, t.len, h, k, v) + i = j + } + + return t.len - baseLength +} + +func multiProbe128Default(table []byte, tableCap, tableLen int, hashes []uintptr, keys sparse.Uint128Array, values []int32) int { + modulo := uintptr(tableCap) - 1 + offset := uintptr(tableCap) * 16 + tableKeys := unsafecast.Slice[[16]byte](table[:offset]) + tableValues := unsafecast.Slice[int32](table[offset:]) + + for i, hash := range hashes { + key := keys.Index(i) + for { + j := hash & modulo + v := tableValues[j] + + if v == 0 { + values[i] = int32(tableLen) + tableLen++ + tableKeys[j] = key + tableValues[j] = int32(tableLen) + break + } + + if key == tableKeys[j] { + values[i] = v - 1 + break + } + + hash++ + } + } + + return tableLen +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.go new file mode 100644 index 00000000000..8802d151a6b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.go @@ -0,0 +1,38 @@ +//go:build !purego + +package hashprobe + +import ( + "github.com/parquet-go/parquet-go/sparse" + "golang.org/x/sys/cpu" +) + +//go:noescape +func multiProbe32AVX2(table []table32Group, numKeys int, hashes []uintptr, keys sparse.Uint32Array, values []int32) int + +//go:noescape +func multiProbe64AVX2(table []table64Group, numKeys int, hashes []uintptr, keys sparse.Uint64Array, values []int32) int + +//go:noescape +func multiProbe128SSE2(table []byte, tableCap, tableLen int, hashes []uintptr, keys sparse.Uint128Array, values []int32) int + +func multiProbe32(table []table32Group, numKeys int, hashes []uintptr, keys sparse.Uint32Array, values []int32) int { + if cpu.X86.HasAVX2 { + return multiProbe32AVX2(table, numKeys, hashes, keys, values) + } + return multiProbe32Default(table, numKeys, hashes, keys, values) +} + +func multiProbe64(table []table64Group, numKeys int, hashes []uintptr, keys sparse.Uint64Array, values []int32) int { + if cpu.X86.HasAVX2 { + return multiProbe64AVX2(table, numKeys, hashes, keys, values) + } + return multiProbe64Default(table, numKeys, hashes, keys, values) +} + +func multiProbe128(table []byte, tableCap, tableLen int, hashes []uintptr, keys sparse.Uint128Array, values []int32) int { + if cpu.X86.HasSSE2 { + return multiProbe128SSE2(table, tableCap, tableLen, hashes, keys, values) + } + return multiProbe128Default(table, tableCap, tableLen, hashes, keys, values) +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.s b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.s new file mode 100644 index 00000000000..5c5c0c02f7e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_amd64.s @@ -0,0 +1,197 @@ +//go:build !purego + +#include "textflag.h" + +// This version of the probing algorithm for 32 bit keys takes advantage of +// the memory layout of table groups and SIMD instructions to accelerate the +// probing operations. +// +// The first 32 bytes of a table group contain the bit mask indicating which +// slots are in use, and the array of keys, which fits into a single vector +// register (YMM) and can be loaded and tested with a single instruction. +// +// A first version of the table group used the number of keys held in the +// group instead of a bit mask, which required the probing operation to +// reconstruct the bit mask during the lookup operation in order to identify +// which elements of the VPCMPEQD result should be retained. The extra CPU +// instructions used to reconstruct the bit mask had a measurable overhead. +// By holding the bit mask in the data structure, we can determine the number +// of keys in a group using the POPCNT instruction, and avoid recomputing the +// mask during lookups. +// +// func multiProbe32AVX2(table []table32Group, numKeys int, hashes []uintptr, keys sparse.Uint32Array, values []int32) int +TEXT ·multiProbe32AVX2(SB), NOSPLIT, $0-112 + MOVQ table_base+0(FP), AX + MOVQ table_len+8(FP), BX + MOVQ numKeys+24(FP), CX + MOVQ hashes_base+32(FP), DX + MOVQ hashes_len+40(FP), DI + MOVQ keys_array_ptr+56(FP), R8 + MOVQ keys_array_off+72(FP), R15 + MOVQ values_base+80(FP), R9 + DECQ BX // modulo = len(table) - 1 + + XORQ SI, SI + JMP test +loop: + MOVQ (DX)(SI*8), R10 // hash + VPBROADCASTD (R8), Y0 // [key] +probe: + MOVQ R10, R11 + ANDQ BX, R11 // hash & modulo + SHLQ $6, R11 // x 64 (size of table32Group) + LEAQ (AX)(R11*1), R12 + + VMOVDQU (R12), Y1 + VPCMPEQD Y0, Y1, Y2 + VMOVMSKPS Y2, R11 + MOVL 56(R12), R13 + TESTL R11, R13 + JZ insert + + TZCNTL R11, R13 + MOVL 28(R12)(R13*4), R14 +next: + MOVL R14, (R9)(SI*4) + INCQ SI + ADDQ R15, R8 +test: + CMPQ SI, DI + JNE loop + MOVQ CX, ret+104(FP) + VZEROUPPER + RET +insert: + CMPL R13, $0b1111111 + JE probeNextGroup + + MOVL R13, R11 + POPCNTL R13, R13 + MOVQ X0, R14 // key + SHLL $1, R11 + ORL $1, R11 + MOVL R11, 56(R12) // group.len = (group.len << 1) | 1 + MOVL R14, (R12)(R13*4) // group.keys[i] = key + MOVL CX, 28(R12)(R13*4) // group.values[i] = value + MOVL CX, R14 + INCL CX + JMP next +probeNextGroup: + INCQ R10 + JMP probe + +// func multiProbe64AVX2(table []table64Group, numKeys int, hashes []uintptr, keys sparse.Uint64Array, values []int32) int +TEXT ·multiProbe64AVX2(SB), NOSPLIT, $0-112 + MOVQ table_base+0(FP), AX + MOVQ table_len+8(FP), BX + MOVQ numKeys+24(FP), CX + MOVQ hashes_base+32(FP), DX + MOVQ hashes_len+40(FP), DI + MOVQ keys_array_ptr+56(FP), R8 + MOVQ keys_array_off+72(FP), R15 + MOVQ values_base+80(FP), R9 + DECQ BX // modulo = len(table) - 1 + + XORQ SI, SI + JMP test +loop: + MOVQ (DX)(SI*8), R10 // hash + VPBROADCASTQ (R8), Y0 // [key] +probe: + MOVQ R10, R11 + ANDQ BX, R11 // hash & modulo + SHLQ $6, R11 // x 64 (size of table64Group) + LEAQ (AX)(R11*1), R12 + + VMOVDQU (R12), Y1 + VPCMPEQQ Y0, Y1, Y2 + VMOVMSKPD Y2, R11 + MOVL 48(R12), R13 + TESTL R11, R13 + JZ insert + + TZCNTL R11, R13 + MOVL 32(R12)(R13*4), R14 +next: + MOVL R14, (R9)(SI*4) + INCQ SI + ADDQ R15, R8 +test: + CMPQ SI, DI + JNE loop + MOVQ CX, ret+104(FP) + VZEROUPPER + RET +insert: + CMPL R13, $0b1111 + JE probeNextGroup + + MOVL R13, R11 + POPCNTL R13, R13 + SHLL $1, R11 + ORL $1, R11 + MOVL R11, 48(R12) // group.len = (group.len << 1) | 1 + MOVQ X0, (R12)(R13*8) // group.keys[i] = key + MOVL CX, 32(R12)(R13*4) // group.values[i] = value + MOVL CX, R14 + INCL CX + JMP next +probeNextGroup: + INCQ R10 + JMP probe + +// func multiProbe128SSE2(table []byte, tableCap, tableLen int, hashes []uintptr, keys sparse.Uint128Array, values []int32) int +TEXT ·multiProbe128SSE2(SB), NOSPLIT, $0-120 + MOVQ table_base+0(FP), AX + MOVQ tableCap+24(FP), BX + MOVQ tableLen+32(FP), CX + MOVQ hashes_base+40(FP), DX + MOVQ hashes_len+48(FP), DI + MOVQ keys_array_ptr+64(FP), R8 + MOVQ keys_array_off+80(FP), R15 + MOVQ values_base+88(FP), R9 + + MOVQ BX, R10 + SHLQ $4, R10 + LEAQ (AX)(R10*1), R10 + DECQ BX // modulo = tableCap - 1 + + XORQ SI, SI + JMP test +loop: + MOVQ (DX)(SI*8), R11 // hash + MOVOU (R8), X0 // key +probe: + MOVQ R11, R12 + ANDQ BX, R12 + + MOVL (R10)(R12*4), R14 + CMPL R14, $0 + JE insert + + SHLQ $4, R12 + MOVOU (AX)(R12*1), X1 + PCMPEQL X0, X1 + MOVMSKPS X1, R13 + CMPL R13, $0b1111 + JE next + + INCQ R11 + JMP probe +next: + DECL R14 + MOVL R14, (R9)(SI*4) + INCQ SI + ADDQ R15, R8 +test: + CMPQ SI, DI + JNE loop + MOVQ CX, ret+112(FP) + RET +insert: + INCL CX + MOVL CX, (R10)(R12*4) + MOVL CX, R14 + SHLQ $4, R12 + MOVOU X0, (AX)(R12*1) + JMP next diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_purego.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_purego.go new file mode 100644 index 00000000000..5afb40c74e9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/hashprobe_purego.go @@ -0,0 +1,19 @@ +//go:build purego || !amd64 + +package hashprobe + +import ( + "github.com/parquet-go/parquet-go/sparse" +) + +func multiProbe32(table []table32Group, numKeys int, hashes []uintptr, keys sparse.Uint32Array, values []int32) int { + return multiProbe32Default(table, numKeys, hashes, keys, values) +} + +func multiProbe64(table []table64Group, numKeys int, hashes []uintptr, keys sparse.Uint64Array, values []int32) int { + return multiProbe64Default(table, numKeys, hashes, keys, values) +} + +func multiProbe128(table []byte, tableCap, tableLen int, hashes []uintptr, keys sparse.Uint128Array, values []int32) int { + return multiProbe128Default(table, tableCap, tableLen, hashes, keys, values) +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash.go new file mode 100644 index 00000000000..457bd8e114d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash.go @@ -0,0 +1,49 @@ +// Package wyhash implements a hashing algorithm derived from the Go runtime's +// internal hashing fallback, which uses a variation of the wyhash algorithm. +package wyhash + +import ( + "encoding/binary" + "math/bits" + + "github.com/parquet-go/parquet-go/sparse" +) + +const ( + m1 = 0xa0761d6478bd642f + m2 = 0xe7037ed1a0b428db + m3 = 0x8ebc6af09c88c6e3 + m4 = 0x589965cc75374cc3 + m5 = 0x1d8e4e27c47d124f +) + +func mix(a, b uint64) uint64 { + hi, lo := bits.Mul64(a, b) + return hi ^ lo +} + +func Hash32(value uint32, seed uintptr) uintptr { + return uintptr(mix(m5^4, mix(uint64(value)^m2, uint64(value)^uint64(seed)^m1))) +} + +func Hash64(value uint64, seed uintptr) uintptr { + return uintptr(mix(m5^8, mix(value^m2, value^uint64(seed)^m1))) +} + +func Hash128(value [16]byte, seed uintptr) uintptr { + a := binary.LittleEndian.Uint64(value[:8]) + b := binary.LittleEndian.Uint64(value[8:]) + return uintptr(mix(m5^16, mix(a^m2, b^uint64(seed)^m1))) +} + +func MultiHash32(hashes []uintptr, values []uint32, seed uintptr) { + MultiHashUint32Array(hashes, sparse.MakeUint32Array(values), seed) +} + +func MultiHash64(hashes []uintptr, values []uint64, seed uintptr) { + MultiHashUint64Array(hashes, sparse.MakeUint64Array(values), seed) +} + +func MultiHash128(hashes []uintptr, values [][16]byte, seed uintptr) { + MultiHashUint128Array(hashes, sparse.MakeUint128Array(values), seed) +} diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.go new file mode 100644 index 00000000000..55f499debcf --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.go @@ -0,0 +1,14 @@ +//go:build !purego + +package wyhash + +import "github.com/parquet-go/parquet-go/sparse" + +//go:noescape +func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) + +//go:noescape +func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) + +//go:noescape +func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.s b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.s new file mode 100644 index 00000000000..7b99879f02d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_amd64.s @@ -0,0 +1,118 @@ +//go:build !purego + +#include "textflag.h" + +#define m1 0xa0761d6478bd642f +#define m2 0xe7037ed1a0b428db +#define m3 0x8ebc6af09c88c6e3 +#define m4 0x589965cc75374cc3 +#define m5 0x1d8e4e27c47d124f + +// func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) +TEXT ·MultiHashUint32Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), R12 + MOVQ values_array_ptr+24(FP), R13 + MOVQ values_array_len+32(FP), R14 + MOVQ values_array_off+40(FP), R15 + MOVQ seed+48(FP), R11 + + MOVQ $m1, R8 + MOVQ $m2, R9 + MOVQ $m5^4, R10 + XORQ R11, R8 + + XORQ SI, SI + JMP test +loop: + MOVL (R13), AX + MOVQ R8, BX + + XORQ AX, BX + XORQ R9, AX + + MULQ BX + XORQ DX, AX + + MULQ R10 + XORQ DX, AX + + MOVQ AX, (R12)(SI*8) + INCQ SI + ADDQ R15, R13 +test: + CMPQ SI, R14 + JNE loop + RET + +// func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) +TEXT ·MultiHashUint64Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), R12 + MOVQ values_array_ptr+24(FP), R13 + MOVQ values_array_len+32(FP), R14 + MOVQ values_array_off+40(FP), R15 + MOVQ seed+48(FP), R11 + + MOVQ $m1, R8 + MOVQ $m2, R9 + MOVQ $m5^8, R10 + XORQ R11, R8 + + XORQ SI, SI + JMP test +loop: + MOVQ (R13), AX + MOVQ R8, BX + + XORQ AX, BX + XORQ R9, AX + + MULQ BX + XORQ DX, AX + + MULQ R10 + XORQ DX, AX + + MOVQ AX, (R12)(SI*8) + INCQ SI + ADDQ R15, R13 +test: + CMPQ SI, R14 + JNE loop + RET + +// func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) +TEXT ·MultiHashUint128Array(SB), NOSPLIT, $0-56 + MOVQ hashes_base+0(FP), R12 + MOVQ values_array_ptr+24(FP), R13 + MOVQ values_array_len+32(FP), R14 + MOVQ values_array_off+40(FP), R15 + MOVQ seed+48(FP), R11 + + MOVQ $m1, R8 + MOVQ $m2, R9 + MOVQ $m5^16, R10 + XORQ R11, R8 + + XORQ SI, SI + JMP test +loop: + MOVQ 0(R13), AX + MOVQ 8(R13), DX + MOVQ R8, BX + + XORQ DX, BX + XORQ R9, AX + + MULQ BX + XORQ DX, AX + + MULQ R10 + XORQ DX, AX + + MOVQ AX, (R12)(SI*8) + INCQ SI + ADDQ R15, R13 +test: + CMPQ SI, R14 + JNE loop + RET diff --git a/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_purego.go b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_purego.go new file mode 100644 index 00000000000..b5760f7ec5c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/hashprobe/wyhash/wyhash_purego.go @@ -0,0 +1,23 @@ +//go:build purego || !amd64 + +package wyhash + +import "github.com/parquet-go/parquet-go/sparse" + +func MultiHashUint32Array(hashes []uintptr, values sparse.Uint32Array, seed uintptr) { + for i := range hashes { + hashes[i] = Hash32(values.Index(i), seed) + } +} + +func MultiHashUint64Array(hashes []uintptr, values sparse.Uint64Array, seed uintptr) { + for i := range hashes { + hashes[i] = Hash64(values.Index(i), seed) + } +} + +func MultiHashUint128Array(hashes []uintptr, values sparse.Uint128Array, seed uintptr) { + for i := range hashes { + hashes[i] = Hash128(values.Index(i), seed) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.go new file mode 100644 index 00000000000..407f1cfc528 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.go @@ -0,0 +1,17 @@ +//go:build !purego + +package bytealg + +//go:noescape +func broadcastAVX2(dst []byte, src byte) + +// Broadcast writes the src value to all bytes of dst. +func Broadcast(dst []byte, src byte) { + if len(dst) >= 8 && hasAVX2 { + broadcastAVX2(dst, src) + } else { + for i := range dst { + dst[i] = src + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.s b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.s new file mode 100644 index 00000000000..241a658e318 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_amd64.s @@ -0,0 +1,51 @@ +//go:build !purego + +#include "textflag.h" + +// func broadcastAVX2(dst []byte, src byte) +TEXT ·broadcastAVX2(SB), NOSPLIT, $0-25 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), BX + MOVBQZX src+24(FP), CX + + CMPQ BX, $8 + JBE test + + CMPQ BX, $64 + JB init8 + + XORQ SI, SI + MOVQ BX, DX + SHRQ $6, DX + SHLQ $6, DX + MOVQ CX, X0 + VPBROADCASTB X0, Y0 +loop64: + VMOVDQU Y0, (AX)(SI*1) + VMOVDQU Y0, 32(AX)(SI*1) + ADDQ $64, SI + CMPQ SI, DX + JNE loop64 + VMOVDQU Y0, -64(AX)(BX*1) + VMOVDQU Y0, -32(AX)(BX*1) + VZEROUPPER + RET + +init8: + MOVQ $0x0101010101010101, R8 + IMULQ R8, CX +loop8: + MOVQ CX, -8(AX)(BX*1) + SUBQ $8, BX + CMPQ BX, $8 + JAE loop8 + MOVQ CX, (AX) + RET + +loop: + MOVB CX, -1(AX)(BX*1) + DECQ BX +test: + CMPQ BX, $0 + JNE loop + RET diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_purego.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_purego.go new file mode 100644 index 00000000000..fd858e0a236 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/broadcast_purego.go @@ -0,0 +1,9 @@ +//go:build purego || !amd64 + +package bytealg + +func Broadcast(dst []byte, src byte) { + for i := range dst { + dst[i] = src + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg.go new file mode 100644 index 00000000000..0698df22f5f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg.go @@ -0,0 +1,2 @@ +// Package bytealg contains optimized algorithms operating on byte slices. +package bytealg diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg_amd64.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg_amd64.go new file mode 100644 index 00000000000..edfcb64c74a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/bytealg_amd64.go @@ -0,0 +1,17 @@ +//go:build !purego + +package bytealg + +import "golang.org/x/sys/cpu" + +var ( + hasAVX2 = cpu.X86.HasAVX2 + // These use AVX-512 instructions in the countByte algorithm relies + // operations that are available in the AVX512BW extension: + // * VPCMPUB + // * KMOVQ + // + // Note that the function will fallback to an AVX2 version if those + // instructions are not available. + hasAVX512Count = cpu.X86.HasAVX512VL && cpu.X86.HasAVX512BW +) diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.go new file mode 100644 index 00000000000..b41d3d8d979 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.go @@ -0,0 +1,26 @@ +//go:build !purego + +package bytealg + +// This function is similar to using the standard bytes.Count function with a +// one-byte separator. However, the implementation makes use of AVX-512 when +// possible, which yields measurable throughput improvements: +// +// name old time/op new time/op delta +// CountByte 82.5ns ± 0% 43.9ns ± 0% -46.74% (p=0.000 n=10+10) +// +// name old speed new speed delta +// CountByte 49.6GB/s ± 0% 93.2GB/s ± 0% +87.74% (p=0.000 n=10+10) +// +// On systems that do not have AVX-512, the AVX2 version of the code is also +// optimized to make use of multiple register lanes, which gives a bit better +// throughput than the standard library function: +// +// name old time/op new time/op delta +// CountByte 82.5ns ± 0% 61.0ns ± 0% -26.04% (p=0.000 n=10+10) +// +// name old speed new speed delta +// CountByte 49.6GB/s ± 0% 67.1GB/s ± 0% +35.21% (p=0.000 n=10+10) +// +//go:noescape +func Count(data []byte, value byte) int diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.s b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.s new file mode 100644 index 00000000000..1a75a0f798a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_amd64.s @@ -0,0 +1,100 @@ +//go:build !purego + +#include "textflag.h" + +// func Count(data []byte, value byte) int +TEXT ·Count(SB), NOSPLIT, $0-40 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + MOVB value+24(FP), BX + MOVQ CX, DX // len + ADDQ AX, CX // end + XORQ SI, SI // count + + CMPQ DX, $256 + JB test + + CMPB ·hasAVX2(SB), $0 + JE test + + XORQ R12, R12 + XORQ R13, R13 + XORQ R14, R14 + XORQ DI, DI + + CMPB ·hasAVX512Count(SB), $0 + JE initAVX2 + + SHRQ $8, DX + SHLQ $8, DX + ADDQ AX, DX + VPBROADCASTB BX, Z0 +loopAVX512: + VMOVDQU64 (AX), Z1 + VMOVDQU64 64(AX), Z2 + VMOVDQU64 128(AX), Z3 + VMOVDQU64 192(AX), Z4 + VPCMPUB $0, Z0, Z1, K1 + VPCMPUB $0, Z0, Z2, K2 + VPCMPUB $0, Z0, Z3, K3 + VPCMPUB $0, Z0, Z4, K4 + KMOVQ K1, R8 + KMOVQ K2, R9 + KMOVQ K3, R10 + KMOVQ K4, R11 + POPCNTQ R8, R8 + POPCNTQ R9, R9 + POPCNTQ R10, R10 + POPCNTQ R11, R11 + ADDQ R8, R12 + ADDQ R9, R13 + ADDQ R10, R14 + ADDQ R11, DI + ADDQ $256, AX + CMPQ AX, DX + JNE loopAVX512 + ADDQ R12, R13 + ADDQ R14, DI + ADDQ R13, SI + ADDQ DI, SI + JMP doneAVX + +initAVX2: + SHRQ $6, DX + SHLQ $6, DX + ADDQ AX, DX + VPBROADCASTB value+24(FP), Y0 +loopAVX2: + VMOVDQU (AX), Y1 + VMOVDQU 32(AX), Y2 + VPCMPEQB Y0, Y1, Y1 + VPCMPEQB Y0, Y2, Y2 + VPMOVMSKB Y1, R12 + VPMOVMSKB Y2, R13 + POPCNTL R12, R12 + POPCNTL R13, R13 + ADDQ R12, R14 + ADDQ R13, DI + ADDQ $64, AX + CMPQ AX, DX + JNE loopAVX2 + ADDQ R14, SI + ADDQ DI, SI + +doneAVX: + VZEROUPPER + JMP test + +loop: + MOVQ SI, DI + INCQ DI + MOVB (AX), R8 + CMPB BX, R8 + CMOVQEQ DI, SI + INCQ AX +test: + CMPQ AX, CX + JNE loop +done: + MOVQ SI, ret+32(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_purego.go b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_purego.go new file mode 100644 index 00000000000..de35ee44a39 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bytealg/count_purego.go @@ -0,0 +1,9 @@ +//go:build purego || !amd64 + +package bytealg + +import "bytes" + +func Count(data []byte, value byte) int { + return bytes.Count(data, []byte{value}) +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/debug/debug.go b/vendor/github.com/parquet-go/parquet-go/internal/debug/debug.go new file mode 100644 index 00000000000..8acc28c4c79 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/debug/debug.go @@ -0,0 +1,95 @@ +package debug + +import ( + "encoding/hex" + "fmt" + "io" + "log" + "os" + "strconv" + "strings" +) + +func ReaderAt(reader io.ReaderAt, prefix string) io.ReaderAt { + return &ioReaderAt{ + reader: reader, + prefix: prefix, + } +} + +type ioReaderAt struct { + reader io.ReaderAt + prefix string +} + +func (d *ioReaderAt) ReadAt(b []byte, off int64) (int, error) { + n, err := d.reader.ReadAt(b, off) + fmt.Printf("%s: Read(%d) @%d => %d %v \n%s\n", d.prefix, len(b), off, n, err, hex.Dump(b[:n])) + return n, err +} + +func Reader(reader io.Reader, prefix string) io.Reader { + return &ioReader{ + reader: reader, + prefix: prefix, + } +} + +type ioReader struct { + reader io.Reader + prefix string + offset int64 +} + +func (d *ioReader) Read(b []byte) (int, error) { + n, err := d.reader.Read(b) + fmt.Printf("%s: Read(%d) @%d => %d %v \n%s\n", d.prefix, len(b), d.offset, n, err, hex.Dump(b[:n])) + d.offset += int64(n) + return n, err +} + +func Writer(writer io.Writer, prefix string) io.Writer { + return &ioWriter{ + writer: writer, + prefix: prefix, + } +} + +type ioWriter struct { + writer io.Writer + prefix string + offset int64 +} + +func (d *ioWriter) Write(b []byte) (int, error) { + n, err := d.writer.Write(b) + fmt.Printf("%s: Write(%d) @%d => %d %v \n %q\n", d.prefix, len(b), d.offset, n, err, b[:n]) + d.offset += int64(n) + return n, err +} + +var ( + TRACEBUF int +) + +func init() { + for arg := range strings.SplitSeq(os.Getenv("PARQUETGODEBUG"), ",") { + k := arg + v := "" + i := strings.IndexByte(arg, '=') + if i >= 0 { + k, v = arg[:i], arg[i+1:] + } + var err error + switch k { + case "": + // ignore empty entries + case "tracebuf": + if TRACEBUF, err = strconv.Atoi(v); err != nil { + log.Printf("PARQUETGODEBUG: invalid value for tracebuf: %q", v) + } + default: + log.Printf("PARQUETGODEBUG: unrecognized debug option: %q", k) + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_off.go b/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_off.go new file mode 100644 index 00000000000..acabe9cdf69 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_off.go @@ -0,0 +1,6 @@ +//go:build debug + +package debug + +// SetFinalizer is a no-op when the debug tag is specified. +func SetFinalizer(interface{}, interface{}) {} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_on.go b/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_on.go new file mode 100644 index 00000000000..40edbe481c9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/debug/finalizer_on.go @@ -0,0 +1,7 @@ +//go:build !debug + +package debug + +import "runtime" + +func SetFinalizer(obj, finalizer any) { runtime.SetFinalizer(obj, finalizer) } diff --git a/vendor/github.com/parquet-go/parquet-go/internal/memory/buffer.go b/vendor/github.com/parquet-go/parquet-go/internal/memory/buffer.go new file mode 100644 index 00000000000..d4498179252 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/memory/buffer.go @@ -0,0 +1,128 @@ +package memory + +import ( + "fmt" + "io" +) + +// Buffer is a buffer that stores bytes in fixed-size chunks and implements +// io.ReadWriteSeeker. +// +// It uses ChunkBuffer[byte] internally for chunk management. +type Buffer struct { + data ChunkBuffer[byte] + seek int64 // absolute offset for read/write operations +} + +func NewBuffer(chunkSize int) *Buffer { + return &Buffer{data: ChunkBufferFor[byte](chunkSize)} +} + +func (b *Buffer) Reset() { + b.data.Reset() + b.seek = 0 +} + +func (b *Buffer) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if b.seek >= int64(b.data.Len()) { + return 0, io.EOF + } + + chunkSize := b.data.chunkSize + i := int(b.seek) / chunkSize + offset := int(b.seek) % chunkSize + + chunk := b.data.Chunk(i) + n = copy(p, chunk[offset:]) + b.seek += int64(n) + + return n, nil +} + +func (b *Buffer) Write(p []byte) (int, error) { + n := len(p) + if n == 0 { + return 0, nil + } + + chunkSize := b.data.chunkSize + for len(p) > 0 { + i := int(b.seek) / chunkSize + offset := int(b.seek) % chunkSize + + if i == b.data.NumChunks() { + // Append dummy byte to allocate chunk, we'll overwrite it + b.data.Append(0) + } + + chunk := b.data.Chunk(i) + chunk = chunk[:cap(chunk)] + written := copy(chunk[offset:], p) + + newSeek := b.seek + int64(written) + if int(newSeek) > b.data.length { + b.data.length = int(newSeek) + } + + b.seek = newSeek + p = p[written:] + } + + return n, nil +} + +func (b *Buffer) WriteTo(w io.Writer) (int64, error) { + var written int64 + var err error + chunkSize := b.data.chunkSize + + for err == nil && b.seek < int64(b.data.Len()) { + i := int(b.seek) / chunkSize + offset := int(b.seek) % chunkSize + + chunk := b.data.Chunk(i) + n, e := w.Write(chunk[offset:]) + written += int64(n) + b.seek += int64(n) + err = e + } + return written, err +} + +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var position int64 + + switch whence { + case io.SeekStart: + position = offset + case io.SeekCurrent: + position = b.seek + offset + case io.SeekEnd: + position = int64(b.data.Len()) + offset + default: + return 0, fmt.Errorf("seek: invalid whence: %d", whence) + } + + if position < 0 { + return 0, fmt.Errorf("seek: negative offset: %d<0", position) + } + + end := int64(b.data.Len()) + if position > end { + position = end + } + + b.seek = position + return position, nil +} + +var ( + _ io.Writer = (*Buffer)(nil) + _ io.Reader = (*Buffer)(nil) + _ io.Seeker = (*Buffer)(nil) + _ io.WriterTo = (*Buffer)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/internal/memory/chunk_buffer.go b/vendor/github.com/parquet-go/parquet-go/internal/memory/chunk_buffer.go new file mode 100644 index 00000000000..c11a38f8c5d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/memory/chunk_buffer.go @@ -0,0 +1,116 @@ +package memory + +import ( + "math/bits" + "unsafe" +) + +// ChunkBuffer is a buffer that stores data in fixed-size chunks. +// Chunks are allocated lazily on demand and reused via slice pools. +// The chunk size is rounded up to the nearest power of two to utilize slice buffers fully. +// This design minimizes memory fragmentation and provides predictable memory usage. +type ChunkBuffer[T Datum] struct { + chunks []*slice[byte] + chunkSize int // in bytes, always a power of 2 + length int +} + +// ChunkBufferFor creates a new ChunkBuffer with the given chunk size (in bytes). +// The chunk size will be rounded up to the nearest power of two. +func ChunkBufferFor[T Datum](chunkSize int) ChunkBuffer[T] { + size := nextPowerOfTwo(uint(chunkSize)) + // Clamp to bucket system range + minSize := uint(minBucketSize) + maxSize := uint(bucketSize(numBuckets - 1)) + size = max(minSize, min(size, maxSize)) + return ChunkBuffer[T]{chunkSize: int(size)} +} + +func nextPowerOfTwo(n uint) uint { + if n == 0 { + return 1 + } + // bits.Len(x) returns the minimum number of bits required to represent x + return 1 << bits.Len(n-1) +} + +// Append adds data to the buffer, allocating new chunks as needed. +func (b *ChunkBuffer[T]) Append(data ...T) { + elemSize := int(unsafe.Sizeof(*new(T))) + capacity := b.chunkSize / elemSize + + for len(data) > 0 { + if len(b.chunks) == 0 || (b.length&(capacity-1)) == 0 { + bucketIndex := bucketIndexOfGet(b.chunkSize) + chunk := slicePools[bucketIndex].Get( + func() *slice[byte] { + return newSlice[byte](b.chunkSize) + }, + func(s *slice[byte]) { s.data = s.data[:0] }, + ) + b.chunks = append(b.chunks, chunk) + } + + currentChunk := b.chunks[len(b.chunks)-1] + chunkData := (*T)(unsafe.Pointer(unsafe.SliceData(currentChunk.data))) + typeChunk := unsafe.Slice(chunkData, capacity) + offset := b.length & (capacity - 1) + available := capacity - offset + toWrite := min(len(data), available) + + copy(typeChunk[offset:], data[:toWrite]) + b.length += toWrite + data = data[toWrite:] + } +} + +// Reset returns all chunks to the pool and resets the buffer to empty. +func (b *ChunkBuffer[T]) Reset() { + bucketIndex := bucketIndexOfGet(b.chunkSize) + for i := range b.chunks { + slicePools[bucketIndex].Put(b.chunks[i]) + b.chunks[i] = nil + } + b.chunks = b.chunks[:0] + b.length = 0 +} + +// Len returns the number of elements currently in the buffer. +func (b *ChunkBuffer[T]) Len() int { return b.length } + +// NumChunks returns the number of chunks currently allocated. +func (b *ChunkBuffer[T]) NumChunks() int { return len(b.chunks) } + +// Chunk returns the data for the chunk at the given index. +// The caller must ensure i < NumChunks(). +func (b *ChunkBuffer[T]) Chunk(i int) []T { + elemSize := int(unsafe.Sizeof(*new(T))) + capacity := b.chunkSize / elemSize + chunk := b.chunks[i] + chunkData := (*T)(unsafe.Pointer(unsafe.SliceData(chunk.data))) + // Return slice with valid length but full capacity + remaining := b.length - i*capacity + length := min(remaining, capacity) + s := unsafe.Slice(chunkData, capacity) + return s[:length] +} + +// Chunks returns an iterator over the chunks in the buffer. +// Each chunk is yielded as a slice containing the valid data. +func (b *ChunkBuffer[T]) Chunks(yield func([]T) bool) { + elemSize := int(unsafe.Sizeof(*new(T))) + capacity := b.chunkSize / elemSize + remaining := b.length + for _, c := range b.chunks { + if remaining == 0 { + break + } + chunkData := (*T)(unsafe.Pointer(unsafe.SliceData(c.data))) + chunkSize := min(remaining, capacity) + typeChunk := unsafe.Slice(chunkData, chunkSize) + if !yield(typeChunk) { + return + } + remaining -= chunkSize + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/memory/memory.go b/vendor/github.com/parquet-go/parquet-go/internal/memory/memory.go new file mode 100644 index 00000000000..8256d11dc04 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/memory/memory.go @@ -0,0 +1,7 @@ +package memory + +// Datum is a constraint for types that can be stored in chunk and slice buffers. +// It includes the common numeric types used in parquet files. +type Datum interface { + ~byte | ~int32 | ~int64 | ~uint32 | ~uint64 | ~float32 | ~float64 +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/memory/pool.go b/vendor/github.com/parquet-go/parquet-go/internal/memory/pool.go new file mode 100644 index 00000000000..7046c724b51 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/memory/pool.go @@ -0,0 +1,25 @@ +package memory + +import "sync" + +// Pool is a generic wrapper around sync.Pool that provides type-safe pooling +// of pointers to values of type T. +type Pool[T any] struct { + pool sync.Pool // *T +} + +func (p *Pool[T]) Get(newT func() *T, resetT func(*T)) *T { + v, _ := p.pool.Get().(*T) + if v == nil { + v = newT() + } else { + resetT(v) + } + return v +} + +func (p *Pool[T]) Put(v *T) { + if v != nil { + p.pool.Put(v) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/memory/slice_buffer.go b/vendor/github.com/parquet-go/parquet-go/internal/memory/slice_buffer.go new file mode 100644 index 00000000000..ba035fb26e3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/memory/slice_buffer.go @@ -0,0 +1,352 @@ +package memory + +import ( + "unsafe" + + "github.com/parquet-go/parquet-go/internal/unsafecast" +) + +// slice is a wrapper around a slice to enable pooling. +type slice[T Datum] struct{ data []T } + +func newSlice[T Datum](cap int) *slice[T] { + return &slice[T]{data: make([]T, 0, cap)} +} + +// SliceBuffer is a buffer that stores data in a single contiguous slice. +// The slice grows by moving to larger size buckets from pools as needed. +// This design provides efficient sequential access and minimal overhead for small datasets. +type SliceBuffer[T Datum] struct { + slice *slice[T] // non-nil if data came from pool (used to return to pool on Reset) + data []T // the active slice (always used for access) +} + +const ( + numBuckets = 32 + minBucketSize = 4096 // 4 KiB (smallest bucket) + lastShortBucketSize = 262144 // 256 KiB (transition point for growth strategy) +) + +var slicePools [numBuckets]Pool[slice[byte]] + +// nextBucketSize computes the next bucket size using the hybrid growth strategy: +// - Below 256KB: grow by 2x (4K, 8K, 16K, 32K, 64K, 128K, 256K) +// - Above 256KB: grow by 1.5x (384K, 576K, 864K, ...) +func nextBucketSize(size int) int { + if size < lastShortBucketSize { + return size * 2 + } else { + return size + (size / 2) + } +} + +// SliceBufferFrom creates a SliceBuffer that wraps an existing slice without copying. +// The buffer takes ownership of the slice and will not return it to any pool. +func SliceBufferFrom[T Datum](data []T) SliceBuffer[T] { + return SliceBuffer[T]{data: data} +} + +// SliceBufferFor creates a SliceBuffer with pre-allocated capacity for the given number of elements. +// The buffer will be backed by a pooled slice large enough to hold cap elements. +func SliceBufferFor[T Datum](cap int) SliceBuffer[T] { + var buf SliceBuffer[T] + buf.Grow(cap) + return buf +} + +// reserveMore ensures the buffer has capacity for at least one more element. +// +// This is moved to a separate function to keep the complexity cost of AppendValue +// within the inlining budget. +// +//go:noinline +func (b *SliceBuffer[T]) reserveMore() { b.reserve(1) } + +// reserve ensures the buffer has capacity for at least count more elements. +// It handles transitioning from external data to pooled storage and growing when needed. +// Caller must check that len(b.data)+count > cap(b.data) before calling. +func (b *SliceBuffer[T]) reserve(count int) { + elemSize := int(unsafe.Sizeof(*new(T))) + requiredBytes := (len(b.data) + count) * elemSize + bucketIndex := bucketIndexOfGet(requiredBytes) + + if bucketIndex < 0 { + // Size exceeds all buckets, allocate directly without pooling + newCap := len(b.data) + count + newData := make([]T, len(b.data), newCap) + copy(newData, b.data) + if b.slice != nil { + putSliceToPool(b.slice, elemSize) + b.slice = nil + } + b.data = newData + return + } + + if b.slice == nil { + // Either empty or using external data + b.slice = getSliceFromPool[T](bucketIndex, elemSize) + if b.data != nil { + // Transition from external data to pooled storage + b.slice.data = append(b.slice.data, b.data...) + } + b.data = b.slice.data + } else { + // Already using pooled storage, grow to new bucket + oldSlice := b.slice + b.slice = getSliceFromPool[T](bucketIndex, elemSize) + b.slice.data = append(b.slice.data, b.data...) + oldSlice.data = b.data // Sync before returning to pool + putSliceToPool(oldSlice, elemSize) + b.data = b.slice.data + } +} + +// Append adds data to the buffer, growing the slice as needed by promoting to +// larger pool buckets. +func (b *SliceBuffer[T]) Append(data ...T) { + if len(b.data)+len(data) > cap(b.data) { + b.reserve(len(data)) + } + b.data = append(b.data, data...) +} + +// AppendFunc calls fn with the internal buffer and updates the buffer with the +// returned slice. This is useful for functions that follow the append pattern +// (e.g., strconv.AppendInt, jsonlite.AppendQuote) where the function may +// reallocate the slice if capacity is insufficient. +// +// If fn reallocates the slice (returns a different backing array), the buffer +// transitions to using external data and releases any pooled storage. +func (b *SliceBuffer[T]) AppendFunc(fn func([]T) []T) { + const reserveSize = 1024 + + if (cap(b.data) - len(b.data)) < reserveSize { + // Ensure there's some extra capacity to reduce reallocations + // when fn appends small amounts of data. + b.reserve(reserveSize) + } + + oldData := b.data + newData := fn(b.data) + + if unsafe.SliceData(oldData) != unsafe.SliceData(newData) { + if b.slice != nil { + // Release pooled storage since fn allocated new memory + b.slice.data = oldData + putSliceToPool(b.slice, int(unsafe.Sizeof(*new(T)))) + b.slice = nil + } + b.slice = &slice[T]{data: newData} + } + + b.data = newData +} + +// AppendValue appends a single value to the buffer. +func (b *SliceBuffer[T]) AppendValue(value T) { + if len(b.data) == cap(b.data)-1 { + b.reserveMore() + } + b.data = append(b.data, value) +} + +// SliceWriter wraps a SliceBuffer[byte] to provide io.Writer and other +// byte-specific write methods without generics type parameter shadowing issues. +type SliceWriter struct { + Buffer *SliceBuffer[byte] +} + +// Write appends data to the buffer and returns the number of bytes written. +// Implements io.Writer. +func (w SliceWriter) Write(p []byte) (n int, err error) { + w.Buffer.Append(p...) + return len(p), nil +} + +// WriteString appends a string to the buffer by copying its bytes. +func (w SliceWriter) WriteString(s string) (n int, err error) { + if len(s) == 0 { + return 0, nil + } + oldLen := len(w.Buffer.data) + if oldLen+len(s) > cap(w.Buffer.data) { + w.Buffer.reserve(len(s)) + } + w.Buffer.data = w.Buffer.data[:oldLen+len(s)] + copy(w.Buffer.data[oldLen:], s) + return len(s), nil +} + +// WriteByte appends a single byte to the buffer. +func (w SliceWriter) WriteByte(c byte) error { + w.Buffer.AppendValue(c) + return nil +} + +// Reset returns the slice to its pool and resets the buffer to empty. +func (b *SliceBuffer[T]) Reset() { + if b.slice != nil { + b.slice.data = b.data // Sync before returning to pool + elemSize := int(unsafe.Sizeof(*new(T))) + putSliceToPool(b.slice, elemSize) + b.slice = nil + } + b.data = nil +} + +// Cap returns the current capacity. +func (b *SliceBuffer[T]) Cap() int { return cap(b.data) } + +// Len returns the number of elements currently in the buffer. +func (b *SliceBuffer[T]) Len() int { return len(b.data) } + +// Slice returns a view of the current data. +// The returned slice is only valid until the next call to Append or Reset. +func (b *SliceBuffer[T]) Slice() []T { return b.data } + +// Swap swaps the elements at indices i and j. +func (b *SliceBuffer[T]) Swap(i, j int) { + b.data[i], b.data[j] = b.data[j], b.data[i] +} + +// Less reports whether the element at index i is less than the element at index j. +func (b *SliceBuffer[T]) Less(i, j int) bool { + s := b.data + return s[i] < s[j] +} + +// Grow ensures the buffer has capacity for at least n more elements. +func (b *SliceBuffer[T]) Grow(n int) { + if n > 0 && len(b.data)+n > cap(b.data) { + b.reserve(n) + } +} + +// Clone creates a copy of the buffer with its own pooled allocation. +// The cloned buffer is allocated from the pool with exactly the right size. +func (b *SliceBuffer[T]) Clone() SliceBuffer[T] { + if len(b.data) == 0 { + return SliceBuffer[T]{} + } + + elemSize := int(unsafe.Sizeof(*new(T))) + requiredBytes := len(b.data) * elemSize + bucketIndex := bucketIndexOfGet(requiredBytes) + + if bucketIndex < 0 { + // Size exceeds all buckets, allocate directly without pooling + newData := make([]T, len(b.data)) + copy(newData, b.data) + return SliceBuffer[T]{data: newData} + } + + cloned := SliceBuffer[T]{ + slice: getSliceFromPool[T](bucketIndex, elemSize), + } + cloned.slice.data = append(cloned.slice.data, b.data...) + cloned.data = cloned.slice.data + return cloned +} + +// Resize changes the length of the buffer to size, growing capacity if needed. +// If size is larger than the current length, the new elements contain uninitialized data. +// If size is smaller, the buffer is truncated. +func (b *SliceBuffer[T]) Resize(size int) { + if size <= len(b.data) { + b.data = b.data[:size] + } else { + if size > cap(b.data) { + b.reserve(size - len(b.data)) + } + b.data = b.data[:size] + } +} + +func bucketIndexOfGet(requiredBytes int) int { + if requiredBytes <= 0 { + return 0 + } + + // Find the smallest bucket that can hold requiredBytes + size := minBucketSize + for i := range numBuckets { + if requiredBytes <= size { + return i + } + size = nextBucketSize(size) + } + + // If requiredBytes exceeds all buckets, return -1 to indicate + // the allocation should not use pooling + return -1 +} + +func bucketIndexOfPut(capacityBytes int) int { + // When releasing buffers, some may have a capacity that is not one of the + // bucket sizes (due to the use of append for example). In this case, we + // return the buffer to the highest bucket with a size less or equal + // to the buffer capacity. + if capacityBytes < minBucketSize { + return -1 + } + + size := minBucketSize + for i := range numBuckets { + nextSize := nextBucketSize(size) + if capacityBytes < nextSize { + return i + } + size = nextSize + } + + // If we've gone through all buckets, return the last bucket + return numBuckets - 1 +} + +func bucketSize(bucketIndex int) int { + if bucketIndex < 0 || bucketIndex >= numBuckets { + return 0 + } + + size := minBucketSize + for range bucketIndex { + size = nextBucketSize(size) + } + return size +} + +func getSliceFromPool[T Datum](bucketIndex int, elemSize int) *slice[T] { + byteSlice := slicePools[bucketIndex].Get( + func() *slice[byte] { + return newSlice[byte](bucketSize(bucketIndex)) + }, + func(s *slice[byte]) { + s.data = s.data[:0] + }, + ) + + typeSlice := (*slice[T])(unsafe.Pointer(byteSlice)) + typeSlice.data = unsafecast.Slice[T](byteSlice.data) + return typeSlice +} + +func putSliceToPool[T Datum](s *slice[T], elemSize int) { + if s == nil || s.data == nil { + return + } + + byteLen := cap(s.data) * elemSize + bucketIndex := bucketIndexOfPut(byteLen) + + // If bucket index is -1, the buffer is too small to pool + if bucketIndex < 0 { + return + } + + byteSlice := (*slice[byte])(unsafe.Pointer(s)) + byteSlice.data = unsafecast.Slice[byte](s.data) + slicePools[bucketIndex].Put(byteSlice) +} + +var _ SliceBuffer[byte] diff --git a/vendor/github.com/parquet-go/parquet-go/internal/unsafecast/unsafecast.go b/vendor/github.com/parquet-go/parquet-go/internal/unsafecast/unsafecast.go new file mode 100644 index 00000000000..0838fd10e68 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/unsafecast/unsafecast.go @@ -0,0 +1,54 @@ +// Package unsafecast exposes functions to bypass the Go type system and perform +// conversions between types that would otherwise not be possible. +// +// The functions of this package are mostly useful as optimizations to avoid +// memory copies when converting between compatible memory layouts; for example, +// casting a [][16]byte to a []byte in order to use functions of the standard +// bytes package on the slices. +// +// With great power comes great responsibility. +package unsafecast + +import "unsafe" + +// The slice type represents the memory layout of slices in Go. It is similar to +// reflect.SliceHeader but uses a unsafe.Pointer instead of uintptr to for the +// backing array to allow the garbage collector to track track the reference. +type slice struct { + ptr unsafe.Pointer + len int + cap int +} + +// Slice converts the data slice of type []From to a slice of type []To sharing +// the same backing array. The length and capacity of the returned slice are +// scaled according to the size difference between the source and destination +// types. +// +// Note that the function does not perform any checks to ensure that the memory +// layouts of the types are compatible, it is possible to cause memory +// corruption if the layouts mismatch (e.g. the pointers in the From are +// different than the pointers in To). +func Slice[To, From any](data []From) []To { + // This function could use unsafe.Slice but it would drop the capacity + // information, so instead we implement the type conversion. + var zf From + var zt To + var s = slice{ + ptr: unsafe.Pointer(unsafe.SliceData(data)), + len: int((uintptr(len(data)) * unsafe.Sizeof(zf)) / unsafe.Sizeof(zt)), + cap: int((uintptr(cap(data)) * unsafe.Sizeof(zf)) / unsafe.Sizeof(zt)), + } + return *(*[]To)(unsafe.Pointer(&s)) +} + +// String converts a byte slice to a string value. The returned string shares +// the backing array of the byte slice. +// +// Programs using this function are responsible for ensuring that the data slice +// is not modified while the returned string is in use, otherwise the guarantee +// of immutability of Go string values will be violated, resulting in undefined +// behavior. +func String(data []byte) string { + return unsafe.String(unsafe.SliceData(data), len(data)) +} diff --git a/vendor/github.com/parquet-go/parquet-go/level.go b/vendor/github.com/parquet-go/parquet-go/level.go new file mode 100644 index 00000000000..43b76ae3093 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/level.go @@ -0,0 +1,33 @@ +package parquet + +import "github.com/parquet-go/parquet-go/internal/bytealg" + +type columnLevels struct { + repetitionDepth byte + repetitionLevel byte + definitionLevel byte +} + +func countLevelsEqual(levels []byte, value byte) int { + return bytealg.Count(levels, value) +} + +func countLevelsNotEqual(levels []byte, value byte) int { + return len(levels) - countLevelsEqual(levels, value) +} + +func appendLevel(levels []byte, value byte, count int) []byte { + i := len(levels) + n := len(levels) + count + + if cap(levels) < n { + newLevels := make([]byte, n, 2*n) + copy(newLevels, levels) + levels = newLevels + } else { + levels = levels[:n] + } + + bytealg.Broadcast(levels[i:], value) + return levels +} diff --git a/vendor/github.com/parquet-go/parquet-go/limits.go b/vendor/github.com/parquet-go/parquet-go/limits.go new file mode 100644 index 00000000000..bf877565c98 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/limits.go @@ -0,0 +1,64 @@ +package parquet + +import ( + "fmt" + "math" +) + +const ( + // MaxColumnDepth is the maximum column depth supported by this package. + MaxColumnDepth = math.MaxInt8 + + // MaxColumnIndex is the maximum column index supported by this package. + MaxColumnIndex = math.MaxInt16 + + // MaxRepetitionLevel is the maximum repetition level supported by this + // package. + MaxRepetitionLevel = math.MaxUint8 + + // MaxDefinitionLevel is the maximum definition level supported by this + // package. + MaxDefinitionLevel = math.MaxUint8 + + // MaxRowGroups is the maximum number of row groups which can be contained + // in a single parquet file. + // + // This limit is enforced by the use of 16 bits signed integers in the file + // metadata footer of parquet files. It is part of the parquet specification + // and therefore cannot be changed. + MaxRowGroups = math.MaxInt16 +) + +const ( + estimatedSizeOfByteArrayValues = 20 +) + +func makeRepetitionLevel(i int) byte { + checkIndexRange("repetition level", i, 0, MaxRepetitionLevel) + return byte(i) +} + +func makeDefinitionLevel(i int) byte { + checkIndexRange("definition level", i, 0, MaxDefinitionLevel) + return byte(i) +} + +func makeColumnIndex(i int) int16 { + checkIndexRange("column index", i, 0, MaxColumnIndex) + return int16(i) +} + +func makeNumValues(i int) int32 { + checkIndexRange("number of values", i, 0, math.MaxInt32) + return int32(i) +} + +func checkIndexRange(typ string, i, min, max int) { + if i < min || i > max { + panic(errIndexOutOfRange(typ, i, min, max)) + } +} + +func errIndexOutOfRange(typ string, i, min, max int) error { + return fmt.Errorf("%s out of range: %d not in [%d:%d]", typ, i, min, max) +} diff --git a/vendor/github.com/parquet-go/parquet-go/merge.go b/vendor/github.com/parquet-go/parquet-go/merge.go new file mode 100644 index 00000000000..0c37f1c6995 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/merge.go @@ -0,0 +1,816 @@ +package parquet + +import ( + "cmp" + "fmt" + "io" + "iter" + "slices" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// MergeRowGroups constructs a row group which is a merged view of rowGroups. If +// rowGroups are sorted and the passed options include sorting, the merged row +// group will also be sorted. +// +// The function validates the input to ensure that the merge operation is +// possible, ensuring that the schemas match or can be converted to an +// optionally configured target schema passed as argument in the option list. +// +// The sorting columns of each row group are also consulted to determine whether +// the output can be represented. If sorting columns are configured on the merge +// they must be a prefix of sorting columns of all row groups being merged. +func MergeRowGroups(rowGroups []RowGroup, options ...RowGroupOption) (RowGroup, error) { + config, err := NewRowGroupConfig(options...) + if err != nil { + return nil, err + } + + schema := config.Schema + if len(rowGroups) == 0 { + if schema == nil { + return nil, fmt.Errorf("cannot merge empty row groups without a schema") + } + return newEmptyRowGroup(schema), nil + } + if schema == nil { + schemas := make([]Node, len(rowGroups)) + for i, rowGroup := range rowGroups { + schemas[i] = rowGroup.Schema() + } + schema = NewSchema(rowGroups[0].Schema().Name(), MergeNodes(schemas...)) + } + + mergedRowGroups := slices.Clone(rowGroups) + for i, rowGroup := range mergedRowGroups { + rowGroupSchema := rowGroup.Schema() + // Always apply conversion when merging multiple row groups to ensure + // column indices match the merged schema layout. The merge process can + // reorder fields even when schemas are otherwise identical. + conv, err := Convert(schema, rowGroupSchema) + if err != nil { + return nil, fmt.Errorf("cannot merge row groups: %w", err) + } + mergedRowGroups[i] = ConvertRowGroup(rowGroup, conv) + } + + // Determine the effective sorting columns for the merge + mergedSortingColumns := slices.Clone(config.Sorting.SortingColumns) + if len(mergedSortingColumns) == 0 { + // Auto-detect common sorting columns from input row groups + sortingColumns := make([][]SortingColumn, len(mergedRowGroups)) + for i, rowGroup := range mergedRowGroups { + sortingColumns[i] = rowGroup.SortingColumns() + } + mergedSortingColumns = MergeSortingColumns(sortingColumns...) + } + + if len(mergedSortingColumns) == 0 { + // When there are no effective sorting columns, use a simpler version of the + // merger which simply concatenates rows from each of the row groups. + // This is preferable because it makes the output deterministic, the + // heap merge may otherwise reorder rows across groups. + // + // IMPORTANT: We need to ensure conversions are applied even in the simple + // concatenation path. Instead of returning the multiRowGroup directly + // (which bypasses row-level conversion), we create a simple concatenating + // row reader that preserves the conversion logic. + return newMultiRowGroup(schema, nil, mergedRowGroups), nil + } + + mergedCompare := compareRowsFuncOf(schema, mergedSortingColumns) + // Optimization: detect non-overlapping row groups and create segments + rowGroupSegments := make([]RowGroup, 0) + for segment := range overlappingRowGroups(mergedRowGroups, schema, mergedSortingColumns, mergedCompare) { + if len(segment) == 1 { + rowGroupSegments = append(rowGroupSegments, segment[0]) + } else { + merged := &mergedRowGroup{compare: mergedCompare} + merged.init(schema, mergedSortingColumns, segment) + rowGroupSegments = append(rowGroupSegments, merged) + } + } + + if len(rowGroupSegments) == 1 { + return rowGroupSegments[0], nil + } + + return newMultiRowGroup(schema, mergedSortingColumns, rowGroupSegments), nil +} + +// overlappingRowGroups analyzes row groups to find non-overlapping segments +// Returns groups of row groups where each group either: +// 1. Contains a single non-overlapping row group (can be concatenated) +// 2. Contains multiple overlapping row groups (need to be merged) +func overlappingRowGroups(rowGroups []RowGroup, schema *Schema, sorting []SortingColumn, compare func(Row, Row) int) iter.Seq[[]RowGroup] { + return func(yield func([]RowGroup) bool) { + type rowGroupRange struct { + rowGroup RowGroup + minRow Row + maxRow Row + } + + rowGroupRanges := make([]rowGroupRange, 0, len(rowGroups)) + for _, rg := range rowGroups { + if rg.NumRows() == 0 { + continue + } + minRow, maxRow, err := rowGroupRangeOfSortedColumns(rg, schema, sorting) + if err != nil { + yield(rowGroups) + return + } + rowGroupRanges = append(rowGroupRanges, rowGroupRange{ + rowGroup: rg, + minRow: minRow, + maxRow: maxRow, + }) + } + if len(rowGroupRanges) == 0 { + return + } + if len(rowGroupRanges) == 1 { + yield([]RowGroup{rowGroupRanges[0].rowGroup}) + return + } + + // Sort row groups by their minimum values + slices.SortFunc(rowGroupRanges, func(a, b rowGroupRange) int { + return compare(a.minRow, b.minRow) + }) + + // Detect overlapping segments + currentSegment := []RowGroup{rowGroupRanges[0].rowGroup} + currentMax := rowGroupRanges[0].maxRow + + for _, rr := range rowGroupRanges[1:] { + if cmp := compare(rr.minRow, currentMax); cmp <= 0 { + // Overlapping - add to current segment and extend max if necessary + currentSegment = append(currentSegment, rr.rowGroup) + if cmp > 0 { + currentMax = rr.maxRow + } + } else { + // Non-overlapping - yield current segment + if !yield(currentSegment) { + return + } + currentSegment = []RowGroup{rr.rowGroup} + currentMax = rr.maxRow + } + } + + if len(currentSegment) > 0 { + yield(currentSegment) + } + } +} + +func rowGroupRangeOfSortedColumns(rg RowGroup, schema *Schema, sorting []SortingColumn) (minRow, maxRow Row, err error) { + // Extract min/max values from column indices + columnChunks := rg.ColumnChunks() + columns := schema.Columns() + minValues := make([]Value, len(columns)) + maxValues := make([]Value, len(columns)) + + // Fill in default null values for non-sorting columns + for i := range columns { + minValues[i] = Value{}.Level(0, 0, i) + maxValues[i] = Value{}.Level(0, 0, i) + } + + for _, sortingColumn := range sorting { + // Find column index + sortingColumnIndex := -1 + sortingColumnPath := columnPath(sortingColumn.Path()) + for columnIndex, columnPath := range columns { + if slices.Equal(columnPath, sortingColumnPath) { + sortingColumnIndex = columnIndex + break + } + } + if sortingColumnIndex < 0 { + return nil, nil, fmt.Errorf("sorting column %v not found in schema", sortingColumnPath) + } + + columnChunk := columnChunks[sortingColumnIndex] + columnIndex, err := columnChunk.ColumnIndex() + if err != nil || columnIndex == nil || columnIndex.NumPages() == 0 { + // No column index available - fall back to merging + return nil, nil, fmt.Errorf("column index not available for sorting column %s", sortingColumnPath) + } + + // Since data is sorted by sorting columns, we can efficiently get min/max: + // - Min value = min of first non-null page + // - Max value = max of last non-null page + numPages := columnIndex.NumPages() + + // Find first non-null page for min value + var globalMin Value + var found bool + for pageIdx := range numPages { + if !columnIndex.NullPage(pageIdx) { + if minValue := columnIndex.MinValue(pageIdx); !minValue.IsNull() { + globalMin, found = minValue, true + break + } + } + } + if !found { + return nil, nil, fmt.Errorf("no valid pages found in column index for column %s", sortingColumnPath) + } + + // Find last non-null page for max value + var globalMax Value + for pageIdx := numPages - 1; pageIdx >= 0; pageIdx-- { + if !columnIndex.NullPage(pageIdx) { + if maxValue := columnIndex.MaxValue(pageIdx); !maxValue.IsNull() { + globalMax = maxValue + break + } + } + } + + // Set the min/max values with proper levels + minValues[sortingColumnIndex] = globalMin.Level(0, 1, sortingColumnIndex) + maxValues[sortingColumnIndex] = globalMax.Level(0, 1, sortingColumnIndex) + } + + minRow = Row(minValues) + maxRow = Row(maxValues) + return +} + +// compareValues compares two parquet values, taking into account the descending flag +func compareValues(a, b Value, columnType Type, descending bool) int { + cmp := columnType.Compare(a, b) + if descending { + return -cmp + } + return cmp +} + +type mergedRowGroup struct { + multiRowGroup + compare func(Row, Row) int +} + +func (m *mergedRowGroup) Rows() Rows { + // The row group needs to respect a sorting order; the merged row reader + // uses a heap to merge rows from the row groups. + rows := make([]Rows, len(m.rowGroups)) + for i := range rows { + rows[i] = m.rowGroups[i].Rows() + } + return &mergedRowGroupRows{ + merge: mergeRowReaders(rows, m.compare), + rows: rows, + schema: m.schema, + } +} + +type mergedRowGroupRows struct { + merge RowReader + rowIndex int64 + seekToRow int64 + rows []Rows + schema *Schema +} + +func (r *mergedRowGroupRows) Close() (lastErr error) { + r.rowIndex = -1 + r.seekToRow = 0 + + for _, rows := range r.rows { + if err := rows.Close(); err != nil { + lastErr = err + } + } + + return lastErr +} + +func (r *mergedRowGroupRows) ReadRows(rows []Row) (int, error) { + if r.rowIndex < 0 { + return 0, io.EOF + } + + for r.rowIndex < r.seekToRow { + n := min(int(r.seekToRow-r.rowIndex), len(rows)) + n, err := r.merge.ReadRows(rows[:n]) + if err != nil { + return 0, err + } + rows = rows[n:] + r.rowIndex += int64(n) + } + + n, err := r.merge.ReadRows(rows) + r.rowIndex += int64(n) + return n, err +} + +func (r *mergedRowGroupRows) SeekToRow(rowIndex int64) error { + if r.rowIndex < 0 { + return fmt.Errorf("SeekToRow: cannot seek to %d on closed merged row group rows", rowIndex) + } + if rowIndex >= r.rowIndex { + r.seekToRow = rowIndex + return nil + } + return fmt.Errorf("SeekToRow: merged row reader cannot seek backward from row %d to %d", r.rowIndex, rowIndex) +} + +func (r *mergedRowGroupRows) Schema() *Schema { + return r.schema +} + +// MergeRowReader constructs a RowReader which creates an ordered sequence of +// all the readers using the given compare function as the ordering predicate. +func MergeRowReaders(rows []RowReader, compare func(Row, Row) int) RowReader { + return mergeRowReaders(rows, compare) +} + +func mergeRowReaders[T RowReader](rows []T, compare func(Row, Row) int) RowReader { + switch len(rows) { + case 0: + return emptyRows{} + case 1: + return rows[0] + case 2: + return &mergedRowReader2{ + compare: compare, + buffers: [2]bufferedRowReader{ + {rows: rows[0]}, + {rows: rows[1]}, + }, + } + default: + buffers := make([]bufferedRowReader, len(rows)) + readers := make([]*bufferedRowReader, len(rows)) + for i, r := range rows { + buffers[i].rows = r + readers[i] = &buffers[i] + } + return &mergedRowReader{ + compare: compare, + readers: readers, + } + } +} + +// mergedRowReader2 is a specialized implementation for merging exactly 2 readers +// that avoids heap overhead by doing direct comparisons +type mergedRowReader2 struct { + compare func(Row, Row) int + readers [2]*bufferedRowReader + buffers [2]bufferedRowReader + initialized bool +} + +func (m *mergedRowReader2) initialize() error { + for i := range m.buffers { + r := &m.buffers[i] + switch err := r.read(); err { + case nil: + m.readers[i] = r + case io.EOF: + m.readers[i] = nil + default: + return err + } + } + return nil +} + +func (m *mergedRowReader2) ReadRows(rows []Row) (n int, err error) { + if !m.initialized { + m.initialized = true + if err := m.initialize(); err != nil { + return 0, err + } + } + + r0 := m.readers[0] + r1 := m.readers[1] + + if r0 != nil && r0.empty() { + if err := r0.read(); err != nil { + if err != io.EOF { + return 0, err + } + r0, m.readers[0] = nil, nil + } + } + + if r1 != nil && r1.empty() { + if err := r1.read(); err != nil { + if err != io.EOF { + return 0, err + } + r1, m.readers[1] = nil, nil + } + } + + if r0 == nil && r1 == nil { + return 0, io.EOF + } + + switch { + case r0 == nil: + for n < len(rows) { + rows[n] = append(rows[n][:0], r1.head()...) + n++ + if !r1.next() { + break + } + } + + case r1 == nil: + for n < len(rows) { + rows[n] = append(rows[n][:0], r0.head()...) + n++ + if !r0.next() { + break + } + } + + default: + var hasNext0 bool + var hasNext1 bool + + for n < len(rows) { + switch cmp := m.compare(r0.head(), r1.head()); { + case cmp < 0: + rows[n] = append(rows[n][:0], r0.head()...) + n++ + hasNext0 = r0.next() + hasNext1 = true + case cmp > 0: + rows[n] = append(rows[n][:0], r1.head()...) + n++ + hasNext0 = true + hasNext1 = r1.next() + default: + rows[n] = append(rows[n][:0], r0.head()...) + n++ + hasNext0 = r0.next() + if n < len(rows) { + rows[n] = append(rows[n][:0], r1.head()...) + n++ + hasNext1 = r1.next() + } + } + if !hasNext0 || !hasNext1 { + break + } + } + } + + return n, nil +} + +type mergedRowReader struct { + compare func(Row, Row) int + readers []*bufferedRowReader + initialized bool +} + +func (m *mergedRowReader) initialize() error { + for i, r := range m.readers { + switch err := r.read(); err { + case nil: + case io.EOF: + m.readers[i] = nil + default: + m.readers = nil + return err + } + } + + n := 0 + for _, r := range m.readers { + if r != nil { + m.readers[n] = r + n++ + } + } + + clear := m.readers[n:] + for i := range clear { + clear[i] = nil + } + + m.readers = m.readers[:n] + m.heapInit() + return nil +} + +func (m *mergedRowReader) ReadRows(rows []Row) (n int, err error) { + if !m.initialized { + m.initialized = true + + if err := m.initialize(); err != nil { + return 0, err + } + } + + for n < len(rows) && len(m.readers) != 0 { + r := m.readers[0] + if r.empty() { // This readers buffer has been exhausted, repopulate it. + if err := r.read(); err != nil { + if err == io.EOF { + m.heapPop() + continue + } + return n, err + } else { + if !m.heapDown(0, len(m.readers)) { // heap.Fix + m.heapUp(0) + } + continue + } + } + + rows[n] = append(rows[n][:0], r.head()...) + n++ + + if !r.next() { + return n, nil + } + if !m.heapDown(0, len(m.readers)) { // heap.Fix + m.heapUp(0) + } + } + + if len(m.readers) == 0 { + err = io.EOF + } + + return n, err +} + +func (m *mergedRowReader) heapInit() { + n := len(m.readers) + for i := n/2 - 1; i >= 0; i-- { + m.heapDown(i, n) + } +} + +func (m *mergedRowReader) heapPop() { + n := len(m.readers) - 1 + m.heapSwap(0, n) + m.heapDown(0, n) + m.readers = m.readers[:n] +} + +func (m *mergedRowReader) heapUp(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(m.compare(m.readers[j].head(), m.readers[i].head()) < 0) { + break + } + m.heapSwap(i, j) + j = i + } +} + +func (m *mergedRowReader) heapDown(i0, n int) bool { + i := i0 + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && m.compare(m.readers[j2].head(), m.readers[j1].head()) < 0 { + j = j2 // = 2*i + 2 // right child + } + if !(m.compare(m.readers[j].head(), m.readers[i].head()) < 0) { + break + } + m.heapSwap(i, j) + i = j + } + return i > i0 +} + +func (m *mergedRowReader) heapSwap(i, j int) { + m.readers[i], m.readers[j] = m.readers[j], m.readers[i] +} + +type bufferedRowReader struct { + rows RowReader + off int32 + end int32 + buf [24]Row +} + +func (r *bufferedRowReader) empty() bool { + return r.end == r.off +} + +func (r *bufferedRowReader) head() Row { + return r.buf[r.off] +} + +func (r *bufferedRowReader) next() bool { + r.off++ + hasNext := r.off < r.end + if !hasNext { + // We need to read more rows, however it is unsafe to do so here because we haven't + // returned the current rows to the caller yet which may cause buffer corruption. + r.off = 0 + r.end = 0 + } + return hasNext +} + +func (r *bufferedRowReader) read() error { + n, err := r.rows.ReadRows(r.buf[r.end:]) + if err != nil && n == 0 { + return err + } + r.end += int32(n) + return nil +} + +var ( + _ RowReaderWithSchema = (*mergedRowGroupRows)(nil) +) + +// MergeNodes takes a list of nodes and greedily retains properties of the schemas: +// - keeps last compression that is not nil +// - keeps last non-plain encoding that is not nil +// - keeps last non-zero field id +// - union of all columns for group nodes +// - retains the most permissive repetition (required < optional < repeated) +func MergeNodes(nodes ...Node) Node { + switch len(nodes) { + case 0: + return nil + case 1: + return nodes[0] + default: + merged := nodes[0] + for _, node := range nodes[1:] { + merged = mergeTwoNodes(merged, node) + } + return merged + } +} + +// mergeTwoNodes merges two nodes using greedy property retention +func mergeTwoNodes(a, b Node) Node { + leaf1 := a.Leaf() + leaf2 := b.Leaf() + // Both must be either leaf or group nodes + if leaf1 != leaf2 { + // Cannot merge leaf with group - return the last one + return b + } + + var merged Node + if leaf1 { + // Prefer the type with a logical type annotation if one exists. + // This ensures that logical types like JSON are preserved when merging + // a typed node (from an authoritative schema) with a plain node (from + // reflection-based schema generation). + merged = Leaf(selectLogicalType(b.Type(), a.Type())) + + // Apply compression (keep last non-nil) + compression1 := a.Compression() + compression2 := b.Compression() + compression := cmp.Or(compression2, compression1) + if compression != nil { + merged = Compressed(merged, compression) + } + + // Apply encoding (keep last non-plain, non-nil) + encoding := encoding.Encoding(&Plain) + encoding1 := a.Encoding() + encoding2 := b.Encoding() + if !isPlainEncoding(encoding1) { + encoding = encoding1 + } + if !isPlainEncoding(encoding2) { + encoding = encoding2 + } + if encoding != nil { + merged = Encoded(merged, encoding) + } + } else { + fields1 := slices.Clone(a.Fields()) + fields2 := slices.Clone(b.Fields()) + sortFields(fields1) + sortFields(fields2) + + group := make(Group, len(fields1)) + i1 := 0 + i2 := 0 + for i1 < len(fields1) && i2 < len(fields2) { + name1 := fields1[i1].Name() + name2 := fields2[i2].Name() + switch { + case name1 < name2: + group[name1] = nullable(fields1[i1]) + i1++ + case name1 > name2: + group[name2] = nullable(fields2[i2]) + i2++ + default: + group[name1] = mergeTwoNodes(fields1[i1], fields2[i2]) + i1++ + i2++ + } + } + + for _, field := range fields1[i1:] { + group[field.Name()] = nullable(field) + } + + for _, field := range fields2[i2:] { + group[field.Name()] = nullable(field) + } + + merged = group + + if logicalType := b.Type().LogicalType(); logicalType != nil { + switch { + case logicalType.List != nil: + merged = &listNode{group} + case logicalType.Map != nil: + merged = &mapNode{group} + } + } + } + + // Apply repetition (most permissive: required < optional < repeated) + if a.Repeated() || b.Repeated() { + merged = Repeated(merged) + } else if a.Optional() || b.Optional() { + merged = Optional(merged) + } else { + merged = Required(merged) + } + + // Apply field ID (keep last non-zero) + return FieldID(merged, cmp.Or(b.ID(), a.ID())) +} + +// isPlainEncoding checks if the encoding is plain encoding +func isPlainEncoding(enc encoding.Encoding) bool { + return enc == nil || enc.Encoding() == format.Plain +} + +func nullable(n Node) Node { + if !n.Repeated() { + return Optional(n) + } + return n +} + +func selectLogicalType(t1, t2 Type) Type { + if t1.LogicalType() != nil { + return t1 + } + return t2 +} + +// MergeSortingColumns returns the common prefix of all sorting columns passed as arguments. +// This function is used to determine the resulting sorting columns when merging multiple +// row groups that each have their own sorting columns. +// +// The function returns the longest common prefix where all sorting columns match exactly +// (same path, same descending flag, same nulls first flag). If any row group has no +// sorting columns, or if there's no common prefix, an empty slice is returned. +// +// Example: +// +// columns1 := []SortingColumn{Ascending("A"), Ascending("B"), Descending("C")} +// columns2 := []SortingColumn{Ascending("A"), Ascending("B"), Ascending("D")} +// result := MergeSortingColumns(columns1, columns2) +// // result will be []SortingColumn{Ascending("A"), Ascending("B")} +func MergeSortingColumns(sortingColumns ...[]SortingColumn) []SortingColumn { + if len(sortingColumns) == 0 { + return nil + } + merged := slices.Clone(sortingColumns[0]) + for _, columns := range sortingColumns[1:] { + merged = commonSortingPrefix(merged, columns) + } + return merged +} + +// commonSortingPrefix returns the common prefix of two sorting column slices +func commonSortingPrefix(a, b []SortingColumn) []SortingColumn { + minLen := min(len(a), len(b)) + for i := range minLen { + if !equalSortingColumn(a[i], b[i]) { + return a[:i] + } + } + return a[:minLen] +} diff --git a/vendor/github.com/parquet-go/parquet-go/multi_row_group.go b/vendor/github.com/parquet-go/parquet-go/multi_row_group.go new file mode 100644 index 00000000000..10605966740 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/multi_row_group.go @@ -0,0 +1,537 @@ +package parquet + +import ( + "io" + "slices" +) + +// MultiRowGroup wraps multiple row groups to appear as if it was a single +// RowGroup. RowGroups must have the same schema or it will error. +func MultiRowGroup(rowGroups ...RowGroup) RowGroup { + if len(rowGroups) == 0 { + return &emptyRowGroup{} + } + if len(rowGroups) == 1 { + return rowGroups[0] + } + schema, err := compatibleSchemaOf(rowGroups) + if err != nil { + panic(err) + } + return newMultiRowGroup(schema, nil, slices.Clone(rowGroups)) +} + +func newMultiRowGroup(schema *Schema, sorting []SortingColumn, rowGroups []RowGroup) *multiRowGroup { + m := new(multiRowGroup) + m.init(schema, sorting, rowGroups) + return m +} + +func (m *multiRowGroup) init(schema *Schema, sorting []SortingColumn, rowGroups []RowGroup) *multiRowGroup { + columns := make([]multiColumnChunk, len(schema.Columns())) + columnChunks := make([][]ColumnChunk, len(rowGroups)) + + for i, rowGroup := range rowGroups { + columnChunks[i] = rowGroup.ColumnChunks() + } + + for i := range columns { + columns[i].rowGroup = m + columns[i].column = i + columns[i].chunks = make([]ColumnChunk, len(columnChunks)) + + for j, chunks := range columnChunks { + columns[i].chunks[j] = chunks[i] + } + } + + m.schema = schema + m.sorting = sorting + m.rowGroups = rowGroups + m.columns = make([]ColumnChunk, len(columns)) + + for i := range columns { + m.columns[i] = &columns[i] + } + + return m +} + +func compatibleSchemaOf(rowGroups []RowGroup) (*Schema, error) { + schema := rowGroups[0].Schema() + + // Fast path: Many times all row groups have the exact same schema so a + // pointer comparison is cheaper. + samePointer := true + for _, rowGroup := range rowGroups[1:] { + if rowGroup.Schema() != schema { + samePointer = false + break + } + } + if samePointer { + return schema, nil + } + + // Slow path: The schema pointers are not the same, but they still have to + // be compatible. + for _, rowGroup := range rowGroups[1:] { + if !EqualNodes(schema, rowGroup.Schema()) { + return nil, ErrRowGroupSchemaMismatch + } + } + + return schema, nil +} + +type multiRowGroup struct { + schema *Schema + rowGroups []RowGroup + columns []ColumnChunk + sorting []SortingColumn +} + +func (m *multiRowGroup) NumRows() (numRows int64) { + for _, rowGroup := range m.rowGroups { + numRows += rowGroup.NumRows() + } + return numRows +} + +func (m *multiRowGroup) ColumnChunks() []ColumnChunk { return m.columns } + +func (m *multiRowGroup) SortingColumns() []SortingColumn { return m.sorting } + +func (m *multiRowGroup) Schema() *Schema { return m.schema } + +func (m *multiRowGroup) Rows() Rows { return NewRowGroupRowReader(m) } + +type multiColumnChunk struct { + rowGroup *multiRowGroup + column int + chunks []ColumnChunk +} + +func (c *multiColumnChunk) Type() Type { + if len(c.chunks) != 0 { + return c.chunks[0].Type() // all chunks should be of the same type + } + return nil +} + +func (c *multiColumnChunk) NumValues() int64 { + n := int64(0) + for i := range c.chunks { + n += c.chunks[i].NumValues() + } + return n +} + +func (c *multiColumnChunk) Column() int { + return c.column +} + +func (c *multiColumnChunk) Pages() Pages { + return &multiPages{column: c} +} + +func (c *multiColumnChunk) ColumnIndex() (ColumnIndex, error) { + if len(c.chunks) == 0 { + return emptyColumnIndex{}, nil + } + + // Collect indexes from all chunks + indexes := make([]ColumnIndex, len(c.chunks)) + numPages := 0 + + for i, chunk := range c.chunks { + index, err := chunk.ColumnIndex() + if err != nil { + return nil, err + } + indexes[i] = index + numPages += index.NumPages() + } + + // Build cumulative offsets for page index mapping + offsets := make([]int, len(indexes)+1) + for i, index := range indexes { + offsets[i+1] = offsets[i] + index.NumPages() + } + + return &multiColumnIndex{ + column: c, + indexes: indexes, + offsets: offsets, + numPages: numPages, + typ: c.Type(), + }, nil +} + +func (c *multiColumnChunk) OffsetIndex() (OffsetIndex, error) { + if len(c.chunks) == 0 { + return emptyOffsetIndex{}, nil + } + + // Collect indexes from all chunks + indexes := make([]OffsetIndex, len(c.chunks)) + numPages := 0 + + for i, chunk := range c.chunks { + index, err := chunk.OffsetIndex() + if err != nil { + return nil, err + } + indexes[i] = index + numPages += index.NumPages() + } + + // Build cumulative page offsets + offsets := make([]int, len(indexes)+1) + for i, index := range indexes { + offsets[i+1] = offsets[i] + index.NumPages() + } + + // Build cumulative row offsets for each chunk + rowOffsets := make([]int64, len(c.chunks)+1) + for i := range c.chunks { + rowOffsets[i+1] = rowOffsets[i] + c.rowGroup.rowGroups[i].NumRows() + } + + return &multiOffsetIndex{ + column: c, + indexes: indexes, + offsets: offsets, + rowOffsets: rowOffsets, + numPages: numPages, + }, nil +} + +func (c *multiColumnChunk) BloomFilter() BloomFilter { + return multiBloomFilter{c} +} + +type multiBloomFilter struct{ *multiColumnChunk } + +func (f multiBloomFilter) ReadAt(b []byte, off int64) (int, error) { + i := 0 + + for i < len(f.chunks) { + if r := f.chunks[i].BloomFilter(); r != nil { + size := r.Size() + if off < size { + break + } + off -= size + } + i++ + } + + if i == len(f.chunks) { + return 0, io.EOF + } + + rn := int(0) + for len(b) > 0 { + if i >= len(f.chunks) { + return rn, io.EOF + } + if r := f.chunks[i].BloomFilter(); r != nil { + n, err := r.ReadAt(b, off) + rn += n + if err != nil && err != io.EOF { + return rn, err + } + if b = b[n:]; len(b) == 0 { + return rn, nil + } + // When moving to next chunk, reset offset to 0 + off = 0 + } + i++ + } + + return rn, nil +} + +func (f multiBloomFilter) Size() int64 { + size := int64(0) + for _, c := range f.chunks { + if b := c.BloomFilter(); b != nil { + size += b.Size() + } + } + return size +} + +func (f multiBloomFilter) Check(v Value) (bool, error) { + for _, c := range f.chunks { + if b := c.BloomFilter(); b != nil { + if ok, err := b.Check(v); ok || err != nil { + return ok, err + } + } + } + return false, nil +} + +type multiColumnIndex struct { + column *multiColumnChunk + indexes []ColumnIndex + offsets []int + numPages int + typ Type +} + +func (m *multiColumnIndex) NumPages() int { + return m.numPages +} + +func (m *multiColumnIndex) mapPageIndex(pageIndex int) (chunkIndex, localIndex int) { + for i := range len(m.offsets) - 1 { + if pageIndex >= m.offsets[i] && pageIndex < m.offsets[i+1] { + return i, pageIndex - m.offsets[i] + } + } + // Out of bounds - return last valid position + if len(m.indexes) > 0 { + lastIndex := len(m.indexes) - 1 + lastNumPages := m.indexes[lastIndex].NumPages() + if lastNumPages > 0 { + return lastIndex, lastNumPages - 1 + } + } + return 0, 0 +} + +func (m *multiColumnIndex) NullCount(pageIndex int) int64 { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].NullCount(localIndex) +} + +func (m *multiColumnIndex) NullPage(pageIndex int) bool { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].NullPage(localIndex) +} + +func (m *multiColumnIndex) MinValue(pageIndex int) Value { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].MinValue(localIndex) +} + +func (m *multiColumnIndex) MaxValue(pageIndex int) Value { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].MaxValue(localIndex) +} + +func (m *multiColumnIndex) IsAscending() bool { + if len(m.indexes) == 0 { + return false + } + + // All indexes must be ascending + for _, index := range m.indexes { + if !index.IsAscending() { + return false + } + } + + // Additionally, check boundary between chunks: + // max of chunk[i] must be <= min of chunk[i+1] + if m.typ == nil { + return true + } + cmp := m.typ.Compare + for i := range len(m.indexes) - 1 { + currIndex := m.indexes[i] + nextIndex := m.indexes[i+1] + + // Find last non-null page in current chunk + lastPage := currIndex.NumPages() - 1 + for lastPage >= 0 && currIndex.NullPage(lastPage) { + lastPage-- + } + + // Find first non-null page in next chunk + firstPage := 0 + numPages := nextIndex.NumPages() + for firstPage < numPages && nextIndex.NullPage(firstPage) { + firstPage++ + } + + // If both have valid pages, compare max of current to min of next + if lastPage >= 0 && firstPage < numPages { + currMax := currIndex.MaxValue(lastPage) + nextMin := nextIndex.MinValue(firstPage) + if cmp(currMax, nextMin) > 0 { + return false // Not ascending across chunk boundary + } + } + } + + return true +} + +func (m *multiColumnIndex) IsDescending() bool { + if len(m.indexes) == 0 { + return false + } + + // All indexes must be descending + for _, index := range m.indexes { + if !index.IsDescending() { + return false + } + } + + // Additionally, check boundary between chunks: + // min of chunk[i] must be >= max of chunk[i+1] + if m.typ == nil { + return true + } + cmp := m.typ.Compare + for i := range len(m.indexes) - 1 { + currIndex := m.indexes[i] + nextIndex := m.indexes[i+1] + + // Find first non-null page in current chunk + firstPage := 0 + numPages := currIndex.NumPages() + for firstPage < numPages && currIndex.NullPage(firstPage) { + firstPage++ + } + + // Find last non-null page in next chunk + lastPage := nextIndex.NumPages() - 1 + for lastPage >= 0 && nextIndex.NullPage(lastPage) { + lastPage-- + } + + // If both have valid pages, compare min of current to max of next + if firstPage < numPages && lastPage >= 0 { + currMin := currIndex.MinValue(firstPage) + nextMax := nextIndex.MaxValue(lastPage) + if cmp(currMin, nextMax) < 0 { + return false // Not descending across chunk boundary + } + } + } + + return true +} + +type multiOffsetIndex struct { + column *multiColumnChunk + indexes []OffsetIndex + offsets []int + rowOffsets []int64 + numPages int +} + +func (m *multiOffsetIndex) NumPages() int { + return m.numPages +} + +func (m *multiOffsetIndex) mapPageIndex(pageIndex int) (chunkIndex int, localIndex int) { + for i := range len(m.offsets) - 1 { + if pageIndex >= m.offsets[i] && pageIndex < m.offsets[i+1] { + return i, pageIndex - m.offsets[i] + } + } + // Out of bounds - return last valid position + if len(m.indexes) > 0 { + lastIndex := len(m.indexes) - 1 + lastNumPages := m.indexes[lastIndex].NumPages() + if lastNumPages > 0 { + return lastIndex, lastNumPages - 1 + } + } + return 0, 0 +} + +func (m *multiOffsetIndex) Offset(pageIndex int) int64 { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].Offset(localIndex) +} + +func (m *multiOffsetIndex) CompressedPageSize(pageIndex int) int64 { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + return m.indexes[chunkIndex].CompressedPageSize(localIndex) +} + +func (m *multiOffsetIndex) FirstRowIndex(pageIndex int) int64 { + chunkIndex, localIndex := m.mapPageIndex(pageIndex) + localRowIndex := m.indexes[chunkIndex].FirstRowIndex(localIndex) + return m.rowOffsets[chunkIndex] + localRowIndex +} + +type multiPages struct { + pages Pages + index int + column *multiColumnChunk +} + +func (m *multiPages) ReadPage() (Page, error) { + for { + if m.pages != nil { + p, err := m.pages.ReadPage() + if err == nil || err != io.EOF { + return p, err + } + if err := m.pages.Close(); err != nil { + return nil, err + } + m.pages = nil + } + + if m.column == nil || m.index == len(m.column.chunks) { + return nil, io.EOF + } + + m.pages = m.column.chunks[m.index].Pages() + m.index++ + } +} + +func (m *multiPages) SeekToRow(rowIndex int64) error { + if m.column == nil { + return io.ErrClosedPipe + } + + if m.pages != nil { + if err := m.pages.Close(); err != nil { + return err + } + } + + rowGroups := m.column.rowGroup.rowGroups + numRows := int64(0) + m.pages = nil + m.index = 0 + + for m.index < len(rowGroups) { + numRows = rowGroups[m.index].NumRows() + if rowIndex < numRows { + break + } + rowIndex -= numRows + m.index++ + } + + if m.index < len(rowGroups) { + m.pages = m.column.chunks[m.index].Pages() + m.index++ + return m.pages.SeekToRow(rowIndex) + } + return nil +} + +func (m *multiPages) Close() (err error) { + if m.pages != nil { + err = m.pages.Close() + } + m.pages = nil + m.index = 0 + m.column = nil + return err +} diff --git a/vendor/github.com/parquet-go/parquet-go/node.go b/vendor/github.com/parquet-go/parquet-go/node.go new file mode 100644 index 00000000000..d2460c9d568 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/node.go @@ -0,0 +1,618 @@ +package parquet + +import ( + "reflect" + "slices" + "strings" + "unicode" + "unicode/utf8" + + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Node values represent nodes of a parquet schema. +// +// Nodes carry the type of values, as well as properties like whether the values +// are optional or repeat. Nodes with one or more children represent parquet +// groups and therefore do not have a logical type. +// +// Nodes are immutable values and therefore safe to use concurrently from +// multiple goroutines. +type Node interface { + // The id of this node in its parent node. Zero value is treated as id is not + // set. ID only needs to be unique within its parent context. + // + // This is the same as parquet field_id + ID() int + + // Returns a human-readable representation of the parquet node. + String() string + + // For leaf nodes, returns the type of values of the parquet column. + // + // Calling this method on non-leaf nodes will panic. + Type() Type + + // Returns whether the parquet column is optional. + Optional() bool + + // Returns whether the parquet column is repeated. + Repeated() bool + + // Returns whether the parquet column is required. + Required() bool + + // Returns true if this a leaf node. + Leaf() bool + + // Returns a mapping of the node's fields. + // + // As an optimization, the same slices may be returned by multiple calls to + // this method, programs must treat the returned values as immutable. + // + // This method returns an empty mapping when called on leaf nodes. + Fields() []Field + + // Returns the encoding used by the node. + // + // The method may return nil to indicate that no specific encoding was + // configured on the node, in which case a default encoding might be used. + Encoding() encoding.Encoding + + // Returns compression codec used by the node. + // + // The method may return nil to indicate that no specific compression codec + // was configured on the node, in which case a default compression might be + // used. + Compression() compress.Codec + + // Returns the Go type that best represents the parquet node. + // + // For leaf nodes, this will be one of bool, int32, int64, deprecated.Int96, + // float32, float64, string, []byte, or [N]byte. + // + // For groups, the method returns a struct type. + // + // If the method is called on a repeated node, the method returns a slice of + // the underlying type. + // + // For optional nodes, the method returns a pointer of the underlying type. + // + // For nodes that were constructed from Go values (e.g. using SchemaOf), the + // method returns the original Go type. + GoType() reflect.Type +} + +// Field instances represent fields of a parquet node, which associate a node to +// their name in their parent node. +type Field interface { + Node + + // Returns the name of this field in its parent node. + Name() string + + // Given a reference to the Go value matching the structure of the parent + // node, returns the Go value of the field. + Value(base reflect.Value) reflect.Value +} + +// Encoded wraps the node passed as argument to use the given encoding. +// +// The function panics if it is called on a non-leaf node, or if the +// encoding does not support the node type. +func Encoded(node Node, encoding encoding.Encoding) Node { + if !node.Leaf() { + panic("cannot add encoding to a non-leaf node") + } + if encoding != nil { + kind := node.Type().Kind() + if !canEncode(encoding, kind) { + panic("cannot apply " + encoding.Encoding().String() + " to node of type " + kind.String()) + } + } + return &encodedNode{ + Node: node, + encoding: encoding, + } +} + +type encodedNode struct { + Node + encoding encoding.Encoding +} + +func (n *encodedNode) Encoding() encoding.Encoding { + return n.encoding +} + +// Compressed wraps the node passed as argument to use the given compression +// codec. +// +// If the codec is nil, the node's compression is left unchanged. +// +// The function panics if it is called on a non-leaf node. +func Compressed(node Node, codec compress.Codec) Node { + if !node.Leaf() { + panic("cannot add compression codec to a non-leaf node") + } + return &compressedNode{ + Node: node, + codec: codec, + } +} + +type compressedNode struct { + Node + codec compress.Codec +} + +func (n *compressedNode) Compression() compress.Codec { + return n.codec +} + +// Optional wraps the given node to make it optional. +func Optional(node Node) Node { return &optionalNode{node} } + +type optionalNode struct{ Node } + +func (opt *optionalNode) Optional() bool { return true } +func (opt *optionalNode) Repeated() bool { return false } +func (opt *optionalNode) Required() bool { return false } +func (opt *optionalNode) GoType() reflect.Type { return reflect.PtrTo(opt.Node.GoType()) } + +// FieldID wraps a node to provide node field id +func FieldID(node Node, id int) Node { return &fieldIDNode{Node: node, id: id} } + +type fieldIDNode struct { + Node + id int +} + +func (f *fieldIDNode) ID() int { return f.id } + +// Repeated wraps the given node to make it repeated. +func Repeated(node Node) Node { return &repeatedNode{node} } + +type repeatedNode struct{ Node } + +func (rep *repeatedNode) Optional() bool { return false } +func (rep *repeatedNode) Repeated() bool { return true } +func (rep *repeatedNode) Required() bool { return false } +func (rep *repeatedNode) GoType() reflect.Type { return reflect.SliceOf(rep.Node.GoType()) } + +// Required wraps the given node to make it required. +func Required(node Node) Node { return &requiredNode{node} } + +type requiredNode struct{ Node } + +func (req *requiredNode) Optional() bool { return false } +func (req *requiredNode) Repeated() bool { return false } +func (req *requiredNode) Required() bool { return true } +func (req *requiredNode) GoType() reflect.Type { return req.Node.GoType() } + +type node struct{} + +// Leaf returns a leaf node of the given type. +func Leaf(typ Type) Node { + return &leafNode{typ: typ} +} + +type leafNode struct{ typ Type } + +func (n *leafNode) ID() int { return 0 } + +func (n *leafNode) String() string { return sprint("", n) } + +func (n *leafNode) Type() Type { return n.typ } + +func (n *leafNode) Optional() bool { return false } + +func (n *leafNode) Repeated() bool { return false } + +func (n *leafNode) Required() bool { return true } + +func (n *leafNode) Leaf() bool { return true } + +func (n *leafNode) Fields() []Field { return nil } + +func (n *leafNode) Encoding() encoding.Encoding { return nil } + +func (n *leafNode) Compression() compress.Codec { return nil } + +func (n *leafNode) GoType() reflect.Type { return goTypeOfLeaf(n) } + +var repetitionTypes = [...]format.FieldRepetitionType{ + 0: format.Required, + 1: format.Optional, + 2: format.Repeated, +} + +func fieldRepetitionTypePtrOf(node Node) *format.FieldRepetitionType { + switch { + case node.Required(): + return &repetitionTypes[format.Required] + case node.Optional(): + return &repetitionTypes[format.Optional] + case node.Repeated(): + return &repetitionTypes[format.Repeated] + default: + return nil + } +} + +func fieldRepetitionTypeOf(node Node) format.FieldRepetitionType { + switch { + case node.Optional(): + return format.Optional + case node.Repeated(): + return format.Repeated + default: + return format.Required + } +} + +func applyFieldRepetitionType(t format.FieldRepetitionType, repetitionLevel, definitionLevel byte) (byte, byte) { + switch t { + case format.Optional: + definitionLevel++ + case format.Repeated: + repetitionLevel++ + definitionLevel++ + } + return repetitionLevel, definitionLevel +} + +type Group map[string]Node + +func (g Group) ID() int { return 0 } + +func (g Group) String() string { return sprint("", g) } + +func (g Group) Type() Type { return groupType{} } + +func (g Group) Optional() bool { return false } + +func (g Group) Repeated() bool { return false } + +func (g Group) Required() bool { return true } + +func (g Group) Leaf() bool { return false } + +func (g Group) Fields() []Field { + groupFields := make([]groupField, 0, len(g)) + for name, node := range g { + groupFields = append(groupFields, groupField{ + Node: node, + name: name, + }) + } + slices.SortFunc(groupFields, func(a, b groupField) int { + return strings.Compare(a.name, b.name) + }) + fields := make([]Field, len(groupFields)) + for i := range groupFields { + fields[i] = &groupFields[i] + } + return fields +} + +func (g Group) Encoding() encoding.Encoding { return nil } + +func (g Group) Compression() compress.Codec { return nil } + +func (g Group) GoType() reflect.Type { return goTypeOfGroup(g) } + +type groupField struct { + Node + name string +} + +func (f *groupField) Name() string { return f.name } + +func (f *groupField) Value(base reflect.Value) reflect.Value { + if base.Kind() == reflect.Interface { + if base.IsNil() { + return reflect.ValueOf(nil) + } + if base = base.Elem(); base.Kind() == reflect.Pointer && base.IsNil() { + return reflect.ValueOf(nil) + } + } + return base.MapIndex(reflect.ValueOf(&f.name).Elem()) +} + +func goTypeOf(node Node) reflect.Type { + switch { + case node.Optional(): + return goTypeOfOptional(node) + case node.Repeated(): + return goTypeOfRepeated(node) + default: + return goTypeOfRequired(node) + } +} + +func goTypeOfOptional(node Node) reflect.Type { + return reflect.PtrTo(goTypeOfRequired(node)) +} + +func goTypeOfRepeated(node Node) reflect.Type { + return reflect.SliceOf(goTypeOfRequired(node)) +} + +func goTypeOfRequired(node Node) reflect.Type { + if node.Leaf() { + return goTypeOfLeaf(node) + } else { + return goTypeOfGroup(node) + } +} + +func goTypeOfLeaf(node Node) reflect.Type { + t := node.Type() + if convertibleType, ok := t.(interface{ GoType() reflect.Type }); ok { + return convertibleType.GoType() + } + switch t.Kind() { + case Boolean: + return reflect.TypeOf(false) + case Int32: + return reflect.TypeOf(int32(0)) + case Int64: + return reflect.TypeOf(int64(0)) + case Int96: + return reflect.TypeOf(deprecated.Int96{}) + case Float: + return reflect.TypeOf(float32(0)) + case Double: + return reflect.TypeOf(float64(0)) + case ByteArray: + return reflect.TypeOf(([]byte)(nil)) + case FixedLenByteArray: + return reflect.ArrayOf(t.Length(), reflect.TypeOf(byte(0))) + default: + panic("BUG: parquet type returned an unsupported kind") + } +} + +func goTypeOfGroup(node Node) reflect.Type { + fields := node.Fields() + structFields := make([]reflect.StructField, len(fields)) + for i, field := range fields { + structFields[i].Name = exportedStructFieldName(field.Name()) + structFields[i].Type = field.GoType() + // TODO: can we reconstruct a struct tag that would be valid if a value + // of this type were passed to SchemaOf? + } + return reflect.StructOf(structFields) +} + +func exportedStructFieldName(name string) string { + firstRune, size := utf8.DecodeRuneInString(name) + return string([]rune{unicode.ToUpper(firstRune)}) + name[size:] +} + +func isList(node Node) bool { + logicalType := node.Type().LogicalType() + return logicalType != nil && logicalType.List != nil +} + +func isMap(node Node) bool { + logicalType := node.Type().LogicalType() + return logicalType != nil && logicalType.Map != nil +} + +func numLeafColumnsOf(node Node) int16 { + return makeColumnIndex(numLeafColumns(node, 0)) +} + +func numLeafColumns(node Node, columnIndex int) int { + if node.Leaf() { + return columnIndex + 1 + } + for _, field := range node.Fields() { + columnIndex = numLeafColumns(field, columnIndex) + } + return columnIndex +} + +func listElementOf(node Node) Node { + if !node.Leaf() { + if list := fieldByName(node, "list"); list != nil { + if elem := fieldByName(list, "element"); elem != nil { + return elem + } + // TODO: It should not be named "item", but some versions of pyarrow + // and some versions of polars used that instead of "element". + // https://issues.apache.org/jira/browse/ARROW-11497 + // https://github.com/pola-rs/polars/issues/17100 + if elem := fieldByName(list, "item"); elem != nil { + return elem + } + } + } + panic("node with logical type LIST is not composed of a repeated .list.element") +} + +func mapKeyValueOf(node Node) Node { + if !node.Leaf() && (node.Required() || node.Optional()) { + for _, kv_name := range []string{"key_value", "map"} { + if keyValue := fieldByName(node, kv_name); keyValue != nil && !keyValue.Leaf() && keyValue.Repeated() { + k := fieldByName(keyValue, "key") + v := fieldByName(keyValue, "value") + if k != nil && v != nil && k.Required() { + return keyValue + } + } + } + } + panic("node with logical type MAP is not composed of a repeated .key_value group (or .map group) with key and value fields") +} + +func encodingOf(node Node, defaultEncodings map[Kind]encoding.Encoding) encoding.Encoding { + encoding := node.Encoding() + kind := node.Type().Kind() + if encoding == nil && defaultEncodings != nil { + encoding = defaultEncodings[kind] + } + // The parquet-format documentation states that the + // DELTA_LENGTH_BYTE_ARRAY is always preferred to PLAIN when + // encoding BYTE_ARRAY values. We apply it as a default if + // none were explicitly specified, which gives the application + // the opportunity to override this behavior if needed. + // + // https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-length-byte-array-delta_length_byte_array--6 + if kind == ByteArray && encoding == nil { + encoding = &DeltaLengthByteArray + } + if encoding == nil { + encoding = &Plain + } + return encoding +} + +func forEachNodeOf(name string, node Node, do func(string, Node)) { + do(name, node) + + for _, f := range node.Fields() { + forEachNodeOf(f.Name(), f, do) + } +} + +func fieldByName(node Node, name string) Field { + for _, f := range node.Fields() { + if f.Name() == name { + return f + } + } + return nil +} + +// findByPath navigates the node tree to find the node at the given path. +// Returns nil if the path doesn't exist. +// The path is a sequence of field names to traverse. +func findByPath(node Node, path []string) Node { + for _, name := range path { + field := fieldByName(node, name) + if field == nil { + return nil + } + node = field + } + return node +} + +// EqualNodes returns true if node1 and node2 are equal. +// +// Nodes that are not of the same repetition type (optional, required, repeated) +// or of the same hierarchical type (leaf, group) are considered not equal. +// Leaf nodes are considered equal if they are of the same data type. +// +// Groups are compared recursively, taking the order of fields into account +// (because it influences the column index of each leaf node), and comparing +// their logical types: for example, a MAP node is not equal to a GROUP node +// with the same fields, because MAP nodes have a specific logical type. +// +// Note that the encoding and compression of the nodes are not considered by this +// function. +func EqualNodes(node1, node2 Node) bool { + if node1.Leaf() { + return node2.Leaf() && leafNodesAreEqual(node1, node2) + } else { + return !node2.Leaf() && groupNodesAreEqual(node1, node2) + } +} + +// SameNodes returns true if node1 and node2 are equivalent, ignoring field order. +// +// Unlike EqualNodes, this function considers nodes with the same fields in different +// orders as equivalent. This is useful when comparing schemas that may have been +// reordered by operations like MergeNodes. +// +// For leaf nodes, this behaves identically to EqualNodes. +// For group nodes, this compares fields by name rather than position. +func SameNodes(node1, node2 Node) bool { + if node1.Leaf() { + return node2.Leaf() && leafNodesAreEqual(node1, node2) + } else { + return !node2.Leaf() && groupNodesAreSame(node1, node2) + } +} + +func repetitionsAreEqual(node1, node2 Node) bool { + return node1.Optional() == node2.Optional() && node1.Repeated() == node2.Repeated() +} + +func leafNodesAreEqual(node1, node2 Node) bool { + return EqualTypes(node1.Type(), node2.Type()) && repetitionsAreEqual(node1, node2) +} + +func groupNodesAreEqual(node1, node2 Node) bool { + fields1 := node1.Fields() + fields2 := node2.Fields() + if len(fields1) != len(fields2) { + return false + } + if !repetitionsAreEqual(node1, node2) { + return false + } + if !fieldsAreEqual(fields1, fields2, EqualNodes) { + return false + } + return equalLogicalTypes(node1.Type(), node2.Type()) +} + +func groupNodesAreSame(node1, node2 Node) bool { + fields1 := node1.Fields() + fields2 := node2.Fields() + if len(fields1) != len(fields2) { + return false + } + if !repetitionsAreEqual(node1, node2) { + return false + } + if !fieldsAreSorted(fields1) { + fields1 = slices.Clone(fields1) + sortFields(fields1) + } + if !fieldsAreSorted(fields2) { + fields2 = slices.Clone(fields2) + sortFields(fields2) + } + if !fieldsAreEqual(fields1, fields2, SameNodes) { + return false + } + return equalLogicalTypes(node1.Type(), node2.Type()) +} + +func fieldsAreEqual(fields1, fields2 []Field, equal func(Node, Node) bool) bool { + if len(fields1) != len(fields2) { + return false + } + for i := range fields1 { + if fields1[i].Name() != fields2[i].Name() { + return false + } + } + for i := range fields1 { + if !equal(fields1[i], fields2[i]) { + return false + } + } + return true +} + +func fieldsAreSorted(fields []Field) bool { + return slices.IsSortedFunc(fields, compareFields) +} + +func sortFields(fields []Field) { + slices.SortFunc(fields, compareFields) +} + +func compareFields(a, b Field) int { + return strings.Compare(a.Name(), b.Name()) +} diff --git a/vendor/github.com/parquet-go/parquet-go/null.go b/vendor/github.com/parquet-go/parquet-go/null.go new file mode 100644 index 00000000000..35806b3ecba --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/null.go @@ -0,0 +1,127 @@ +package parquet + +import ( + "reflect" + "time" + "unsafe" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/bytealg" + "github.com/parquet-go/parquet-go/sparse" +) + +// nullIndexFunc is the type of functions used to detect null values in rows. +// +// For each value of the rows array, the bitmap passed as first argument is +// populated to indicate whether the values were null (0) or not (1). +// +// The function writes one bit to the output buffer for each row in the input, +// the buffer must be sized accordingly. +type nullIndexFunc func(bits []uint64, rows sparse.Array) + +func nullIndex[T comparable](bits []uint64, rows sparse.Array) { + var zero T + for i := range rows.Len() { + v := *(*T)(rows.Index(i)) + if v != zero { + x := uint(i) / 64 + y := uint(i) % 64 + bits[x] |= 1 << y + } + } +} + +func nullIndexStruct(bits []uint64, rows sparse.Array) { + bytealg.Broadcast(unsafecast.Slice[byte](bits), 0xFF) +} + +func nullIndexTime(bits []uint64, rows sparse.Array) { + for i := range rows.Len() { + t := (*time.Time)(rows.Index(i)) + if !t.IsZero() { + x := uint(i) / 64 + y := uint(i) % 64 + bits[x] |= 1 << y + } + } +} + +func nullIndexFuncOf(t reflect.Type) nullIndexFunc { + switch t { + case reflect.TypeOf(deprecated.Int96{}): + return nullIndex[deprecated.Int96] + case reflect.TypeOf(time.Time{}): + return nullIndexTime + } + + switch t.Kind() { + case reflect.Bool: + return nullIndexBool + + case reflect.Int: + return nullIndexInt + + case reflect.Int32: + return nullIndexInt32 + + case reflect.Int64: + return nullIndexInt64 + + case reflect.Uint: + return nullIndexUint + + case reflect.Uint32: + return nullIndexUint32 + + case reflect.Uint64: + return nullIndexUint64 + + case reflect.Float32: + return nullIndexFloat32 + + case reflect.Float64: + return nullIndexFloat64 + + case reflect.String: + return nullIndexString + + case reflect.Slice: + return nullIndexSlice + + case reflect.Map: + return nullIndexPointer + + case reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + switch size := t.Len(); size { + case 16: + return nullIndexUint128 + default: + return nullIndexFuncOfByteArray(size) + } + } + + case reflect.Pointer: + return nullIndexPointer + + case reflect.Struct: + return nullIndexStruct + } + + panic("cannot convert Go values of type " + typeNameOf(t) + " to parquet value") +} + +func nullIndexFuncOfByteArray(n int) nullIndexFunc { + return func(bits []uint64, rows sparse.Array) { + for i := range rows.Len() { + p := (*byte)(rows.Index(i)) + b := unsafe.Slice(p, n) + if !isZero(b) { + x := uint(i) / 64 + y := uint(i) % 64 + bits[x] |= 1 << y + } + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/null_amd64.go b/vendor/github.com/parquet-go/parquet-go/null_amd64.go new file mode 100644 index 00000000000..aa0cbffface --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/null_amd64.go @@ -0,0 +1,74 @@ +//go:build !purego + +package parquet + +import "github.com/parquet-go/parquet-go/sparse" + +//go:noescape +func nullIndex8(bits *uint64, rows sparse.Array) + +//go:noescape +func nullIndex32(bits *uint64, rows sparse.Array) + +//go:noescape +func nullIndex64(bits *uint64, rows sparse.Array) + +//go:noescape +func nullIndex128(bits *uint64, rows sparse.Array) + +func nullIndexBool(bits []uint64, rows sparse.Array) { + nullIndex8(&bits[0], rows) +} + +func nullIndexInt(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} + +func nullIndexInt32(bits []uint64, rows sparse.Array) { + nullIndex32(&bits[0], rows) +} + +func nullIndexInt64(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} + +func nullIndexUint(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} + +func nullIndexUint32(bits []uint64, rows sparse.Array) { + nullIndex32(&bits[0], rows) +} + +func nullIndexUint64(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} + +func nullIndexUint128(bits []uint64, rows sparse.Array) { + nullIndex128(&bits[0], rows) +} + +func nullIndexFloat32(bits []uint64, rows sparse.Array) { + nullIndex32(&bits[0], rows) +} + +func nullIndexFloat64(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} + +func nullIndexString(bits []uint64, rows sparse.Array) { + // We offset by an extra 8 bytes to test the lengths of string values where + // the first field is the pointer and the second is the length which we want + // to test. + nullIndex64(&bits[0], rows.Offset(8)) +} + +func nullIndexSlice(bits []uint64, rows sparse.Array) { + // Slice values are null if their pointer is nil, which is held in the first + // 8 bytes of the object so we can simply test 64 bits words. + nullIndex64(&bits[0], rows) +} + +func nullIndexPointer(bits []uint64, rows sparse.Array) { + nullIndex64(&bits[0], rows) +} diff --git a/vendor/github.com/parquet-go/parquet-go/null_amd64.s b/vendor/github.com/parquet-go/parquet-go/null_amd64.s new file mode 100644 index 00000000000..52804d3f12b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/null_amd64.s @@ -0,0 +1,227 @@ +//go:build !purego + +#include "textflag.h" + +// func nullIndex8(bits *uint64, rows sparse.Array) +TEXT ·nullIndex8(SB), NOSPLIT, $0-32 + MOVQ bits+0(FP), AX + MOVQ rows_array_ptr+8(FP), BX + MOVQ rows_array_len+16(FP), DI + MOVQ rows_array_off+24(FP), DX + + MOVQ $1, CX + XORQ SI, SI + + CMPQ DI, $0 + JE done +loop1x1: + XORQ R8, R8 + MOVB (BX), R9 + CMPB R9, $0 + JE next1x1 + + MOVQ SI, R10 + SHRQ $6, R10 + ORQ CX, (AX)(R10*8) +next1x1: + ADDQ DX, BX + ROLQ $1, CX + INCQ SI + CMPQ SI, DI + JNE loop1x1 +done: + RET + +// func nullIndex32(bits *uint64, rows sparse.Array) +TEXT ·nullIndex32(SB), NOSPLIT, $0-32 + MOVQ bits+0(FP), AX + MOVQ rows_array_ptr+8(FP), BX + MOVQ rows_array_len+16(FP), DI + MOVQ rows_array_off+24(FP), DX + + MOVQ $1, CX + XORQ SI, SI + + CMPQ DI, $0 + JE done + + CMPQ DI, $8 + JB loop1x4 + + CMPB ·hasAVX2(SB), $0 + JE loop1x4 + + MOVQ DI, R8 + SHRQ $3, R8 + SHLQ $3, R8 + + VPBROADCASTD rows_array_off+24(FP), Y0 + VPMULLD ·range0n8(SB), Y0, Y0 + VPCMPEQD Y1, Y1, Y1 + VPCMPEQD Y2, Y2, Y2 + VPXOR Y3, Y3, Y3 +loop8x4: + VPGATHERDD Y1, (BX)(Y0*1), Y4 + VPCMPEQD Y3, Y4, Y4 + VMOVMSKPS Y4, R9 + VMOVDQU Y2, Y1 + + NOTQ R9 + ANDQ $0b11111111, R9 + + MOVQ SI, CX + ANDQ $0b111111, CX + + MOVQ SI, R10 + SHRQ $6, R10 + + SHLQ CX, R9 + ORQ R9, (AX)(R10*8) + + LEAQ (BX)(DX*8), BX + ADDQ $8, SI + CMPQ SI, R8 + JNE loop8x4 + VZEROUPPER + + CMPQ SI, DI + JE done + + MOVQ $1, R8 + MOVQ SI, CX + ANDQ $0b111111, R8 + SHLQ CX, R8 + MOVQ R8, CX + +loop1x4: + MOVL (BX), R8 + CMPL R8, $0 + JE next1x4 + + MOVQ SI, R9 + SHRQ $6, R9 + ORQ CX, (AX)(R9*8) +next1x4: + ADDQ DX, BX + ROLQ $1, CX + INCQ SI + CMPQ SI, DI + JNE loop1x4 +done: + RET + +// func nullIndex64(bits *uint64, rows sparse.Array) +TEXT ·nullIndex64(SB), NOSPLIT, $0-32 + MOVQ bits+0(FP), AX + MOVQ rows_array_ptr+8(FP), BX + MOVQ rows_array_len+16(FP), DI + MOVQ rows_array_off+24(FP), DX + + MOVQ $1, CX + XORQ SI, SI + + CMPQ DI, $0 + JE done + + CMPQ DI, $4 + JB loop1x8 + + CMPB ·hasAVX2(SB), $0 + JE loop1x8 + + MOVQ DI, R8 + SHRQ $2, R8 + SHLQ $2, R8 + + VPBROADCASTQ rows_array_off+24(FP), Y0 + VPMULLD scale4x8<>(SB), Y0, Y0 + VPCMPEQQ Y1, Y1, Y1 + VPCMPEQQ Y2, Y2, Y2 + VPXOR Y3, Y3, Y3 +loop4x8: + VPGATHERQQ Y1, (BX)(Y0*1), Y4 + VPCMPEQQ Y3, Y4, Y4 + VMOVMSKPD Y4, R9 + VMOVDQU Y2, Y1 + + NOTQ R9 + ANDQ $0b1111, R9 + + MOVQ SI, CX + ANDQ $0b111111, CX + + MOVQ SI, R10 + SHRQ $6, R10 + + SHLQ CX, R9 + ORQ R9, (AX)(R10*8) + + LEAQ (BX)(DX*4), BX + ADDQ $4, SI + CMPQ SI, R8 + JNE loop4x8 + VZEROUPPER + + CMPQ SI, DI + JE done + + MOVQ $1, R8 + MOVQ SI, CX + ANDQ $0b111111, R8 + SHLQ CX, R8 + MOVQ R8, CX + +loop1x8: + MOVQ (BX), R8 + CMPQ R8, $0 + JE next1x8 + + MOVQ SI, R9 + SHRQ $6, R9 + ORQ CX, (AX)(R9*8) +next1x8: + ADDQ DX, BX + ROLQ $1, CX + INCQ SI + CMPQ SI, DI + JNE loop1x8 +done: + RET + +GLOBL scale4x8<>(SB), RODATA|NOPTR, $32 +DATA scale4x8<>+0(SB)/8, $0 +DATA scale4x8<>+8(SB)/8, $1 +DATA scale4x8<>+16(SB)/8, $2 +DATA scale4x8<>+24(SB)/8, $3 + +// func nullIndex128(bits *uint64, rows sparse.Array) +TEXT ·nullIndex128(SB), NOSPLIT, $0-32 + MOVQ bits+0(FP), AX + MOVQ rows_array_ptr+8(FP), BX + MOVQ rows_array_len+16(FP), DI + MOVQ rows_array_off+24(FP), DX + + CMPQ DI, $0 + JE done + + MOVQ $1, CX + XORQ SI, SI + PXOR X0, X0 +loop1x16: + MOVOU (BX), X1 + PCMPEQQ X0, X1 + MOVMSKPD X1, R8 + CMPB R8, $0b11 + JE next1x16 + + MOVQ SI, R9 + SHRQ $6, R9 + ORQ CX, (AX)(R9*8) +next1x16: + ADDQ DX, BX + ROLQ $1, CX + INCQ SI + CMPQ SI, DI + JNE loop1x16 +done: + RET diff --git a/vendor/github.com/parquet-go/parquet-go/null_purego.go b/vendor/github.com/parquet-go/parquet-go/null_purego.go new file mode 100644 index 00000000000..fd6c0e52f5b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/null_purego.go @@ -0,0 +1,64 @@ +//go:build purego || !amd64 + +package parquet + +import "github.com/parquet-go/parquet-go/sparse" + +func nullIndexBool(bits []uint64, rows sparse.Array) { + nullIndex[bool](bits, rows) +} + +func nullIndexInt(bits []uint64, rows sparse.Array) { + nullIndex[int](bits, rows) +} + +func nullIndexInt32(bits []uint64, rows sparse.Array) { + nullIndex[int32](bits, rows) +} + +func nullIndexInt64(bits []uint64, rows sparse.Array) { + nullIndex[int64](bits, rows) +} + +func nullIndexUint(bits []uint64, rows sparse.Array) { + nullIndex[uint](bits, rows) +} + +func nullIndexUint32(bits []uint64, rows sparse.Array) { + nullIndex[uint32](bits, rows) +} + +func nullIndexUint64(bits []uint64, rows sparse.Array) { + nullIndex[uint64](bits, rows) +} + +func nullIndexUint128(bits []uint64, rows sparse.Array) { + nullIndex[[16]byte](bits, rows) +} + +func nullIndexFloat32(bits []uint64, rows sparse.Array) { + nullIndex[float32](bits, rows) +} + +func nullIndexFloat64(bits []uint64, rows sparse.Array) { + nullIndex[float64](bits, rows) +} + +func nullIndexString(bits []uint64, rows sparse.Array) { + nullIndex[string](bits, rows) +} + +func nullIndexSlice(bits []uint64, rows sparse.Array) { + for i := range rows.Len() { + p := *(**struct{})(rows.Index(i)) + b := uint64(0) + if p != nil { + b = 1 + } + bits[uint(i)/64] |= b << (uint(i) % 64) + } +} + +func nullIndexPointer(bits []uint64, rows sparse.Array) { + nullIndex[*struct{}](bits, rows) +} diff --git a/vendor/github.com/parquet-go/parquet-go/offset_index.go b/vendor/github.com/parquet-go/parquet-go/offset_index.go new file mode 100644 index 00000000000..b499e427cd4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/offset_index.go @@ -0,0 +1,128 @@ +package parquet + +import ( + "github.com/parquet-go/parquet-go/format" +) + +type OffsetIndex interface { + // NumPages returns the number of pages in the offset index. + NumPages() int + + // Offset returns the offset starting from the beginning of the file for the + // page at the given index. + Offset(int) int64 + + // CompressedPageSize returns the size of the page at the given index + // (in bytes). + CompressedPageSize(int) int64 + + // FirstRowIndex returns the the first row in the page at the given index. + // + // The returned row index is based on the row group that the page belongs + // to, the first row has index zero. + FirstRowIndex(int) int64 +} + +type FileOffsetIndex struct { + index *format.OffsetIndex +} + +func (i *FileOffsetIndex) NumPages() int { + return len(i.index.PageLocations) +} + +func (i *FileOffsetIndex) Offset(j int) int64 { + return i.index.PageLocations[j].Offset +} + +func (i *FileOffsetIndex) CompressedPageSize(j int) int64 { + return int64(i.index.PageLocations[j].CompressedPageSize) +} + +func (i *FileOffsetIndex) FirstRowIndex(j int) int64 { + return i.index.PageLocations[j].FirstRowIndex +} + +type emptyOffsetIndex struct{} + +func (emptyOffsetIndex) NumPages() int { return 0 } +func (emptyOffsetIndex) Offset(int) int64 { return 0 } +func (emptyOffsetIndex) CompressedPageSize(int) int64 { return 0 } +func (emptyOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type booleanOffsetIndex struct{ page *booleanPage } + +func (i booleanOffsetIndex) NumPages() int { return 1 } +func (i booleanOffsetIndex) Offset(int) int64 { return 0 } +func (i booleanOffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i booleanOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type int32OffsetIndex struct{ page *int32Page } + +func (i int32OffsetIndex) NumPages() int { return 1 } +func (i int32OffsetIndex) Offset(int) int64 { return 0 } +func (i int32OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i int32OffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type int64OffsetIndex struct{ page *int64Page } + +func (i int64OffsetIndex) NumPages() int { return 1 } +func (i int64OffsetIndex) Offset(int) int64 { return 0 } +func (i int64OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i int64OffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type int96OffsetIndex struct{ page *int96Page } + +func (i int96OffsetIndex) NumPages() int { return 1 } +func (i int96OffsetIndex) Offset(int) int64 { return 0 } +func (i int96OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i int96OffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type floatOffsetIndex struct{ page *floatPage } + +func (i floatOffsetIndex) NumPages() int { return 1 } +func (i floatOffsetIndex) Offset(int) int64 { return 0 } +func (i floatOffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i floatOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type doubleOffsetIndex struct{ page *doublePage } + +func (i doubleOffsetIndex) NumPages() int { return 1 } +func (i doubleOffsetIndex) Offset(int) int64 { return 0 } +func (i doubleOffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i doubleOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type byteArrayOffsetIndex struct{ page *byteArrayPage } + +func (i byteArrayOffsetIndex) NumPages() int { return 1 } +func (i byteArrayOffsetIndex) Offset(int) int64 { return 0 } +func (i byteArrayOffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i byteArrayOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type fixedLenByteArrayOffsetIndex struct{ page *fixedLenByteArrayPage } + +func (i fixedLenByteArrayOffsetIndex) NumPages() int { return 1 } +func (i fixedLenByteArrayOffsetIndex) Offset(int) int64 { return 0 } +func (i fixedLenByteArrayOffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i fixedLenByteArrayOffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type uint32OffsetIndex struct{ page *uint32Page } + +func (i uint32OffsetIndex) NumPages() int { return 1 } +func (i uint32OffsetIndex) Offset(int) int64 { return 0 } +func (i uint32OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i uint32OffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type uint64OffsetIndex struct{ page *uint64Page } + +func (i uint64OffsetIndex) NumPages() int { return 1 } +func (i uint64OffsetIndex) Offset(int) int64 { return 0 } +func (i uint64OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i uint64OffsetIndex) FirstRowIndex(int) int64 { return 0 } + +type be128OffsetIndex struct{ page *be128Page } + +func (i be128OffsetIndex) NumPages() int { return 1 } +func (i be128OffsetIndex) Offset(int) int64 { return 0 } +func (i be128OffsetIndex) CompressedPageSize(int) int64 { return i.page.Size() } +func (i be128OffsetIndex) FirstRowIndex(int) int64 { return 0 } diff --git a/vendor/github.com/parquet-go/parquet-go/order.go b/vendor/github.com/parquet-go/parquet-go/order.go new file mode 100644 index 00000000000..6fef7bba2fa --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/order.go @@ -0,0 +1,102 @@ +package parquet + +import ( + "bytes" + + "github.com/parquet-go/bitpack/unsafecast" +) + +func orderOfBool(data []bool) int { + switch len(data) { + case 0, 1: + return 0 + default: + k := 0 + i := 0 + + if data[0] { // true => false: descending + k = -1 + i = streakOfTrue(data) + if i == len(data) { + k = +1 + } else { + i += streakOfFalse(data[i:]) + } + } else { // false => true: ascending + k = +1 + i = streakOfFalse(data) + i += streakOfTrue(data[i:]) + } + + if i != len(data) { + k = 0 + } + return k + } +} + +func streakOfTrue(data []bool) int { + if i := bytes.IndexByte(unsafecast.Slice[byte](data), 0); i >= 0 { + return i + } + return len(data) +} + +func streakOfFalse(data []bool) int { + if i := bytes.IndexByte(unsafecast.Slice[byte](data), 1); i >= 0 { + return i + } + return len(data) +} + +func orderOfBytes(data [][]byte) int { + switch len(data) { + case 0, 1: + return 0 + } + data = skipBytesStreak(data) + if len(data) < 2 { + return 1 + } + ordering := bytes.Compare(data[0], data[1]) + switch { + case ordering < 0: + if bytesAreInAscendingOrder(data[1:]) { + return +1 + } + case ordering > 0: + if bytesAreInDescendingOrder(data[1:]) { + return -1 + } + } + return 0 +} + +func skipBytesStreak(data [][]byte) [][]byte { + for i := 1; i < len(data); i++ { + if !bytes.Equal(data[i], data[0]) { + return data[i-1:] + } + } + return data[len(data)-1:] +} + +func bytesAreInAscendingOrder(data [][]byte) bool { + for i := len(data) - 1; i > 0; i-- { + k := bytes.Compare(data[i-1], data[i]) + if k > 0 { + return false + } + } + return true +} + +func bytesAreInDescendingOrder(data [][]byte) bool { + for i := len(data) - 1; i > 0; i-- { + k := bytes.Compare(data[i-1], data[i]) + if k < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/parquet-go/parquet-go/order_amd64.go b/vendor/github.com/parquet-go/parquet-go/order_amd64.go new file mode 100644 index 00000000000..8ac597d62a7 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/order_amd64.go @@ -0,0 +1,21 @@ +//go:build !purego + +package parquet + +//go:noescape +func orderOfInt32(data []int32) int + +//go:noescape +func orderOfInt64(data []int64) int + +//go:noescape +func orderOfUint32(data []uint32) int + +//go:noescape +func orderOfUint64(data []uint64) int + +//go:noescape +func orderOfFloat32(data []float32) int + +//go:noescape +func orderOfFloat64(data []float64) int diff --git a/vendor/github.com/parquet-go/parquet-go/order_amd64.s b/vendor/github.com/parquet-go/parquet-go/order_amd64.s new file mode 100644 index 00000000000..651d2510bc6 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/order_amd64.s @@ -0,0 +1,547 @@ +//go:build !purego + +#include "textflag.h" + +#define UNDEFINED 0 +#define ASCENDING 1 +#define DESCENDING -1 + +DATA shift1x32<>+0(SB)/4, $1 +DATA shift1x32<>+4(SB)/4, $2 +DATA shift1x32<>+8(SB)/4, $3 +DATA shift1x32<>+12(SB)/4, $4 +DATA shift1x32<>+16(SB)/4, $5 +DATA shift1x32<>+20(SB)/4, $6 +DATA shift1x32<>+24(SB)/4, $7 +DATA shift1x32<>+28(SB)/4, $8 +DATA shift1x32<>+32(SB)/4, $9 +DATA shift1x32<>+36(SB)/4, $10 +DATA shift1x32<>+40(SB)/4, $11 +DATA shift1x32<>+44(SB)/4, $12 +DATA shift1x32<>+48(SB)/4, $13 +DATA shift1x32<>+52(SB)/4, $14 +DATA shift1x32<>+56(SB)/4, $15 +DATA shift1x32<>+60(SB)/4, $15 +GLOBL shift1x32<>(SB), RODATA|NOPTR, $64 + +DATA shift1x64<>+0(SB)/4, $1 +DATA shift1x64<>+8(SB)/4, $2 +DATA shift1x64<>+16(SB)/4, $3 +DATA shift1x64<>+24(SB)/4, $4 +DATA shift1x64<>+32(SB)/4, $5 +DATA shift1x64<>+40(SB)/4, $6 +DATA shift1x64<>+48(SB)/4, $7 +DATA shift1x64<>+56(SB)/4, $7 +GLOBL shift1x64<>(SB), RODATA|NOPTR, $64 + +// func orderOfInt32(data []int32) int +TEXT ·orderOfInt32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $16 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $4, AX + SHLQ $4, AX + MOVQ $15, CX + IDIVQ CX + IMULQ $15, AX + DECQ R9 + + VMOVDQU32 shift1x32<>(SB), Z2 + KXORW K2, K2, K2 +testAscending15: + VMOVDQU32 (R8)(SI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VPCMPD $2, Z1, Z0, K1 + KORTESTW K2, K1 + JNC testDescending15 + ADDQ $15, SI + CMPQ SI, AX + JNE testAscending15 + VZEROUPPER + JMP testAscending +testDescending15: + VMOVDQU32 (R8)(DI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VPCMPD $5, Z1, Z0, K1 + KORTESTW K2, K1 + JNC undefined15 + ADDQ $15, DI + CMPQ DI, AX + JNE testDescending15 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVL (R8)(SI*4), BX + MOVL 4(R8)(SI*4), DX + INCQ SI + CMPL BX, DX + JLE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVL (R8)(DI*4), BX + MOVL 4(R8)(DI*4), DX + INCQ DI + CMPL BX, DX + JGE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined15: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET + +// func orderOfInt64(data []int64) int +TEXT ·orderOfInt64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $8 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $3, AX + SHLQ $3, AX + MOVQ $7, CX + IDIVQ CX + IMULQ $7, AX + DECQ R9 + + VMOVDQU64 shift1x64<>(SB), Z2 + KXORB K2, K2, K2 +testAscending7: + VMOVDQU64 (R8)(SI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VPCMPQ $2, Z1, Z0, K1 + KORTESTB K2, K1 + JNC testDescending7 + ADDQ $7, SI + CMPQ SI, AX + JNE testAscending7 + VZEROUPPER + JMP testAscending +testDescending7: + VMOVDQU64 (R8)(DI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VPCMPQ $5, Z1, Z0, K1 + KORTESTB K2, K1 + JNC undefined7 + ADDQ $7, DI + CMPQ DI, AX + JNE testDescending7 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVQ (R8)(SI*8), BX + MOVQ 8(R8)(SI*8), DX + INCQ SI + CMPQ BX, DX + JLE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVQ (R8)(DI*8), BX + MOVQ 8(R8)(DI*8), DX + INCQ DI + CMPQ BX, DX + JGE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined7: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET + +// func orderOfUint32(data []uint32) int +TEXT ·orderOfUint32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $16 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $4, AX + SHLQ $4, AX + MOVQ $15, CX + IDIVQ CX + IMULQ $15, AX + DECQ R9 + + VMOVDQU32 shift1x32<>(SB), Z2 + KXORW K2, K2, K2 +testAscending15: + VMOVDQU32 (R8)(SI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VPCMPUD $2, Z1, Z0, K1 + KORTESTW K2, K1 + JNC testDescending15 + ADDQ $15, SI + CMPQ SI, AX + JNE testAscending15 + VZEROUPPER + JMP testAscending +testDescending15: + VMOVDQU32 (R8)(DI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VPCMPUD $5, Z1, Z0, K1 + KORTESTW K2, K1 + JNC undefined15 + ADDQ $15, DI + CMPQ DI, AX + JNE testDescending15 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVL (R8)(SI*4), BX + MOVL 4(R8)(SI*4), DX + INCQ SI + CMPL BX, DX + JBE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVL (R8)(DI*4), BX + MOVL 4(R8)(DI*4), DX + INCQ DI + CMPL BX, DX + JAE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined15: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET + +// func orderOfUint64(data []uint64) int +TEXT ·orderOfUint64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $8 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $3, AX + SHLQ $3, AX + MOVQ $7, CX + IDIVQ CX + IMULQ $7, AX + DECQ R9 + + VMOVDQU64 shift1x64<>(SB), Z2 + KXORB K2, K2, K2 +testAscending7: + VMOVDQU64 (R8)(SI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VPCMPUQ $2, Z1, Z0, K1 + KORTESTB K2, K1 + JNC testDescending7 + ADDQ $7, SI + CMPQ SI, AX + JNE testAscending7 + VZEROUPPER + JMP testAscending +testDescending7: + VMOVDQU64 (R8)(DI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VPCMPUQ $5, Z1, Z0, K1 + KORTESTB K2, K1 + JNC undefined7 + ADDQ $7, DI + CMPQ DI, AX + JNE testDescending7 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVQ (R8)(SI*8), BX + MOVQ 8(R8)(SI*8), DX + INCQ SI + CMPQ BX, DX + JBE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVQ (R8)(DI*8), BX + MOVQ 8(R8)(DI*8), DX + INCQ DI + CMPQ BX, DX + JAE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined7: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET + +// func orderOfFloat32(data []float32) int +TEXT ·orderOfFloat32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $16 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $4, AX + SHLQ $4, AX + MOVQ $15, CX + IDIVQ CX + IMULQ $15, AX + DECQ R9 + + VMOVDQU32 shift1x32<>(SB), Z2 + KXORW K2, K2, K2 +testAscending15: + VMOVDQU32 (R8)(SI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VCMPPS $2, Z1, Z0, K1 + KORTESTW K2, K1 + JNC testDescending15 + ADDQ $15, SI + CMPQ SI, AX + JNE testAscending15 + VZEROUPPER + JMP testAscending +testDescending15: + VMOVDQU32 (R8)(DI*4), Z0 + VMOVDQU32 Z2, Z1 + VPERMI2D Z0, Z0, Z1 + VCMPPS $5, Z1, Z0, K1 + KORTESTW K2, K1 + JNC undefined15 + ADDQ $15, DI + CMPQ DI, AX + JNE testDescending15 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVLQZX (R8)(SI*4), BX + MOVLQZX 4(R8)(SI*4), DX + INCQ SI + MOVQ BX, X0 + MOVQ DX, X1 + UCOMISS X1, X0 + JBE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVLQZX (R8)(DI*4), BX + MOVLQZX 4(R8)(DI*4), DX + INCQ DI + MOVQ BX, X0 + MOVQ DX, X1 + UCOMISS X1, X0 + JAE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined15: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET + +// func orderOfFloat64(data []uint64) int +TEXT ·orderOfFloat64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), R8 + MOVQ data_len+8(FP), R9 + XORQ SI, SI + XORQ DI, DI + + CMPQ R9, $2 + JB undefined + + CMPB ·hasAVX512VL(SB), $0 + JE test + + CMPQ R9, $8 + JB test + + XORQ DX, DX + MOVQ R9, AX + SHRQ $3, AX + SHLQ $3, AX + MOVQ $7, CX + IDIVQ CX + IMULQ $7, AX + DECQ R9 + + VMOVDQU64 shift1x64<>(SB), Z2 + KXORB K2, K2, K2 +testAscending7: + VMOVDQU64 (R8)(SI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VCMPPD $2, Z1, Z0, K1 + KORTESTB K2, K1 + JNC testDescending7 + ADDQ $7, SI + CMPQ SI, AX + JNE testAscending7 + VZEROUPPER + JMP testAscending +testDescending7: + VMOVDQU64 (R8)(DI*8), Z0 + VMOVDQU64 Z2, Z1 + VPERMI2Q Z0, Z0, Z1 + VCMPPD $5, Z1, Z0, K1 + KORTESTB K2, K1 + JNC undefined7 + ADDQ $7, DI + CMPQ DI, AX + JNE testDescending7 + VZEROUPPER + JMP testDescending + +test: + DECQ R9 +testAscending: + CMPQ SI, R9 + JAE ascending + MOVQ (R8)(SI*8), BX + MOVQ 8(R8)(SI*8), DX + INCQ SI + MOVQ BX, X0 + MOVQ DX, X1 + UCOMISD X1, X0 + JBE testAscending + JMP testDescending +ascending: + MOVQ $ASCENDING, ret+24(FP) + RET +testDescending: + CMPQ DI, R9 + JAE descending + MOVQ (R8)(DI*8), BX + MOVQ 8(R8)(DI*8), DX + INCQ DI + MOVQ BX, X0 + MOVQ DX, X1 + UCOMISD X1, X0 + JAE testDescending + JMP undefined +descending: + MOVQ $DESCENDING, ret+24(FP) + RET +undefined7: + VZEROUPPER +undefined: + MOVQ $UNDEFINED, ret+24(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/order_purego.go b/vendor/github.com/parquet-go/parquet-go/order_purego.go new file mode 100644 index 00000000000..44c4d7905e3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/order_purego.go @@ -0,0 +1,42 @@ +//go:build purego || !amd64 + +package parquet + +import "cmp" + +func orderOfInt32(data []int32) int { return orderOf(data) } +func orderOfInt64(data []int64) int { return orderOf(data) } +func orderOfUint32(data []uint32) int { return orderOf(data) } +func orderOfUint64(data []uint64) int { return orderOf(data) } +func orderOfFloat32(data []float32) int { return orderOf(data) } +func orderOfFloat64(data []float64) int { return orderOf(data) } + +func orderOf[T cmp.Ordered](data []T) int { + if len(data) > 1 { + if orderIsAscending(data) { + return +1 + } + if orderIsDescending(data) { + return -1 + } + } + return 0 +} + +func orderIsAscending[T cmp.Ordered](data []T) bool { + for i := len(data) - 1; i > 0; i-- { + if data[i-1] > data[i] { + return false + } + } + return true +} + +func orderIsDescending[T cmp.Ordered](data []T) bool { + for i := len(data) - 1; i > 0; i-- { + if data[i-1] < data[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/parquet-go/parquet-go/page.go b/vendor/github.com/parquet-go/parquet-go/page.go new file mode 100644 index 00000000000..605fcfb33e1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page.go @@ -0,0 +1,414 @@ +package parquet + +import ( + "errors" + "fmt" + "io" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/debug" +) + +// Page values represent sequences of parquet values. From the Parquet +// documentation: "Column chunks are a chunk of the data for a particular +// column. They live in a particular row group and are guaranteed to be +// contiguous in the file. Column chunks are divided up into pages. A page is +// conceptually an indivisible unit (in terms of compression and encoding). +// There can be multiple page types which are interleaved in a column chunk." +// +// https://github.com/apache/parquet-format#glossary +type Page interface { + // Returns the type of values read from this page. + // + // The returned type can be used to encode the page data, in the case of + // an indexed page (which has a dictionary), the type is configured to + // encode the indexes stored in the page rather than the plain values. + Type() Type + + // Returns the column index that this page belongs to. + Column() int + + // If the page contains indexed values, calling this method returns the + // dictionary in which the values are looked up. Otherwise, the method + // returns nil. + Dictionary() Dictionary + + // Returns the number of rows, values, and nulls in the page. The number of + // rows may be less than the number of values in the page if the page is + // part of a repeated column. + NumRows() int64 + NumValues() int64 + NumNulls() int64 + + // Returns the page's min and max values. + // + // The third value is a boolean indicating whether the page bounds were + // available. Page bounds may not be known if the page contained no values + // or only nulls, or if they were read from a parquet file which had neither + // page statistics nor a page index. + Bounds() (min, max Value, ok bool) + + // Returns the size of the page in bytes (uncompressed). + Size() int64 + + // Returns a reader exposing the values contained in the page. + // + // Depending on the underlying implementation, the returned reader may + // support reading an array of typed Go values by implementing interfaces + // like parquet.Int32Reader. Applications should use type assertions on + // the returned reader to determine whether those optimizations are + // available. + // + // In the data page format version 1, it wasn't specified whether pages + // must start with a new row. Legacy writers have produced parquet files + // where row values were overlapping between two consecutive pages. + // As a result, the values read must not be assumed to start at the + // beginning of a row, unless the program knows that it is only working + // with parquet files that used the data page format version 2 (which is + // the default behavior for parquet-go). + Values() ValueReader + + // Returns a new page which is as slice of the receiver between row indexes + // i and j. + Slice(i, j int64) Page + + // Expose the lists of repetition and definition levels of the page. + // + // The returned slices may be empty when the page has no repetition or + // definition levels. + RepetitionLevels() []byte + DefinitionLevels() []byte + + // Returns the in-memory buffer holding the page values. + // + // The intent is for the returned value to be used as input parameter when + // calling the Encode method of the associated Type. + // + // The slices referenced by the encoding.Values may be the same across + // multiple calls to this method, applications must treat the content as + // immutable. + Data() encoding.Values +} + +// PageReader is an interface implemented by types that support producing a +// sequence of pages. +type PageReader interface { + // Reads and returns the next page from the sequence. When all pages have + // been read, or if the sequence was closed, the method returns io.EOF. + ReadPage() (Page, error) +} + +// PageWriter is an interface implemented by types that support writing pages +// to an underlying storage medium. +type PageWriter interface { + WritePage(Page) (int64, error) +} + +// Pages is an interface implemented by page readers returned by calling the +// Pages method of ColumnChunk instances. +type Pages interface { + PageReader + RowSeeker + io.Closer +} + +// AsyncPages wraps the given Pages instance to perform page reads +// asynchronously in a separate goroutine. +// +// Performing page reads asynchronously is important when the application may +// be reading pages from a high latency backend, and the last +// page read may be processed while initiating reading of the next page. +func AsyncPages(pages Pages) Pages { + read := make(chan asyncPage) + seek := make(chan asyncSeek, 1) + init := make(chan struct{}) + done := make(chan struct{}) + + go readPages(pages, read, seek, init, done) + + p := &asyncPages{ + read: read, + seek: seek, + init: init, + done: done, + } + + // If the pages object gets garbage collected without Close being called, + // this finalizer would ensure that the goroutine is stopped and doesn't + // leak. + debug.SetFinalizer(p, func(p *asyncPages) { p.Close() }) + return p +} + +type asyncPages struct { + read chan asyncPage + seek chan asyncSeek + init chan struct{} + done chan struct{} + version int64 +} + +type asyncPage struct { + page Page + err error + version int64 +} + +type asyncSeek struct { + rowIndex int64 + version int64 +} + +func (pages *asyncPages) Close() (err error) { + if pages.init != nil { + close(pages.init) + pages.init = nil + } + if pages.done != nil { + close(pages.done) + pages.done = nil + } + for p := range pages.read { + Release(p.page) + + // Capture the last error, which is the value returned from closing the + // underlying Pages instance. + err = p.err + } + pages.seek = nil + return err +} + +func (pages *asyncPages) ReadPage() (Page, error) { + pages.start() + for { + p, ok := <-pages.read + if !ok { + return nil, io.EOF + } + // Because calls to SeekToRow might be made concurrently to reading + // pages, it is possible for ReadPage to see pages that were read before + // the last SeekToRow call. + // + // A version number is attached to each page read asynchronously to + // discard outdated pages and ensure that we maintain a consistent view + // of the sequence of pages read. + if p.version == pages.version { + return p.page, p.err + } + + // the page is being dropped here b/c it was the wrong version + Release(p.page) + } +} + +func (pages *asyncPages) SeekToRow(rowIndex int64) error { + if pages.seek == nil { + return io.ErrClosedPipe + } + // First flush the channel in case SeekToRow is called twice or more in a + // row, otherwise we would block if readPages had already exited. + select { + case <-pages.seek: + default: + pages.version++ + } + // The seek channel has a capacity of 1 to allow the first SeekToRow call to + // be non-blocking. + // + // If SeekToRow calls are performed faster than they can be handled by the + // goroutine reading pages, this path might become a contention point. + pages.seek <- asyncSeek{rowIndex: rowIndex, version: pages.version} + pages.start() + return nil +} + +func (pages *asyncPages) start() { + if pages.init != nil { + close(pages.init) + pages.init = nil + } +} + +func readPages(pages Pages, read chan<- asyncPage, seek <-chan asyncSeek, init, done <-chan struct{}) { + defer func() { + read <- asyncPage{err: pages.Close(), version: -1} + close(read) + }() + + // To avoid reading pages before the first SeekToRow call, we wait for the + // reader to be initialized, which means it either received a call to + // ReadPage, SeekToRow, or Close. + select { + case <-init: + case <-done: + return + } + + // If SeekToRow was invoked before ReadPage, the seek channel contains the + // new position of the reader. + // + // Note that we have a default case in this select because we don't want to + // block if the first call was ReadPage and no values were ever produced to + // the seek channel. + var seekTo asyncSeek + select { + case seekTo = <-seek: + default: + seekTo.rowIndex = -1 + } + + var err error + + for { + var page Page + + // if err is not fatal we consider the underlying pages object to be in an unknown state + // and we only repeatedly return that error + if !isFatalError(err) { + if seekTo.rowIndex >= 0 { + err = pages.SeekToRow(seekTo.rowIndex) + if err == nil { + seekTo.rowIndex = -1 + continue + } + } else { + page, err = pages.ReadPage() + } + } + + select { + case read <- asyncPage{ + page: page, + err: err, + version: seekTo.version, + }: + case seekTo = <-seek: + Release(page) + case <-done: + Release(page) + return + } + } +} + +func isFatalError(err error) bool { + return err != nil && err != io.EOF && !errors.Is(err, ErrSeekOutOfRange) // ErrSeekOutOfRange can be returned from FilePages but is recoverable +} + +type singlePage struct { + page Page + seek int64 + numRows int64 +} + +func (r *singlePage) ReadPage() (Page, error) { + if r.page != nil { + if r.seek < r.numRows { + seek := r.seek + r.seek = r.numRows + if seek > 0 { + return r.page.Slice(seek, r.numRows), nil + } + return r.page, nil + } + } + return nil, io.EOF +} + +func (r *singlePage) SeekToRow(rowIndex int64) error { + r.seek = rowIndex + return nil +} + +func (r *singlePage) Close() error { + r.page = nil + r.seek = 0 + return nil +} + +func onePage(page Page) Pages { + return &singlePage{page: page, numRows: page.NumRows()} +} + +// CopyPages copies pages from src to dst, returning the number of values that +// were copied. +// +// The function returns any error it encounters reading or writing pages, except +// for io.EOF from the reader which indicates that there were no more pages to +// read. +func CopyPages(dst PageWriter, src PageReader) (numValues int64, err error) { + for { + p, err := src.ReadPage() + if err != nil { + if err == io.EOF { + err = nil + } + return numValues, err + } + n, err := dst.WritePage(p) + numValues += n + Release(p) + if err != nil { + return numValues, err + } + } +} + +// errorPage is an implementation of the Page interface which always errors when +// values are read from it. +type errorPage struct { + typ Type + err error + columnIndex int +} + +func newErrorPage(typ Type, columnIndex int, msg string, args ...any) *errorPage { + return &errorPage{ + typ: typ, + err: fmt.Errorf(msg, args...), + columnIndex: columnIndex, + } +} + +func (page *errorPage) Type() Type { return page.typ } +func (page *errorPage) Column() int { return page.columnIndex } +func (page *errorPage) Dictionary() Dictionary { return nil } +func (page *errorPage) NumRows() int64 { return 1 } +func (page *errorPage) NumValues() int64 { return 1 } +func (page *errorPage) NumNulls() int64 { return 0 } +func (page *errorPage) Bounds() (min, max Value, ok bool) { return } +func (page *errorPage) Slice(i, j int64) Page { return page } +func (page *errorPage) Size() int64 { return 1 } +func (page *errorPage) RepetitionLevels() []byte { return nil } +func (page *errorPage) DefinitionLevels() []byte { return nil } +func (page *errorPage) Data() encoding.Values { return encoding.Values{} } +func (page *errorPage) Values() ValueReader { return errorPageValues{page: page} } + +type errorPageValues struct{ page *errorPage } + +func (r errorPageValues) ReadValues([]Value) (int, error) { return 0, r.page.err } +func (r errorPageValues) Close() error { return nil } + +func errPageBoundsOutOfRange(i, j, n int64) error { + return fmt.Errorf("page bounds out of range [%d:%d]: with length %d", i, j, n) +} + +var ( + _ Page = (*optionalPage)(nil) + _ Page = (*repeatedPage)(nil) + _ Page = (*booleanPage)(nil) + _ Page = (*int32Page)(nil) + _ Page = (*int64Page)(nil) + _ Page = (*int96Page)(nil) + _ Page = (*floatPage)(nil) + _ Page = (*doublePage)(nil) + _ Page = (*byteArrayPage)(nil) + _ Page = (*fixedLenByteArrayPage)(nil) + _ Page = (*uint32Page)(nil) + _ Page = (*uint64Page)(nil) + _ Page = (*be128Page)(nil) + _ Page = (*nullPage)(nil) + _ Pages = (*singlePage)(nil) + _ PageReader = (*singlePage)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/page_be128.go b/vendor/github.com/parquet-go/parquet-go/page_be128.go new file mode 100644 index 00000000000..17374aea237 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_be128.go @@ -0,0 +1,99 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/encoding" +) + +type be128Page struct { + typ Type + values [][16]byte + columnIndex int16 +} + +func newBE128Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *be128Page { + return &be128Page{ + typ: typ, + values: values.Uint128()[:numValues], + columnIndex: ^columnIndex, + } +} + +func (page *be128Page) Type() Type { return page.typ } + +func (page *be128Page) Column() int { return int(^page.columnIndex) } + +func (page *be128Page) Dictionary() Dictionary { return nil } + +func (page *be128Page) NumRows() int64 { return int64(len(page.values)) } + +func (page *be128Page) NumValues() int64 { return int64(len(page.values)) } + +func (page *be128Page) NumNulls() int64 { return 0 } + +func (page *be128Page) Size() int64 { return 16 * int64(len(page.values)) } + +func (page *be128Page) RepetitionLevels() []byte { return nil } + +func (page *be128Page) DefinitionLevels() []byte { return nil } + +func (page *be128Page) Data() encoding.Values { return encoding.Uint128Values(page.values) } + +func (page *be128Page) Values() ValueReader { return &be128PageValues{page: page} } + +func (page *be128Page) min() []byte { return minBE128(page.values) } + +func (page *be128Page) max() []byte { return maxBE128(page.values) } + +func (page *be128Page) bounds() (min, max []byte) { return boundsBE128(page.values) } + +func (page *be128Page) Bounds() (min, max Value, ok bool) { + if ok = len(page.values) > 0; ok { + minBytes, maxBytes := page.bounds() + min = page.makeValueBytes(minBytes) + max = page.makeValueBytes(maxBytes) + } + return min, max, ok +} + +func (page *be128Page) Slice(i, j int64) Page { + return &be128Page{ + typ: page.typ, + values: page.values[i:j], + columnIndex: page.columnIndex, + } +} + +func (page *be128Page) makeValue(v *[16]byte) Value { + return page.makeValueBytes(v[:]) +} + +func (page *be128Page) makeValueBytes(v []byte) Value { + value := makeValueBytes(FixedLenByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +func (page *be128Page) makeValueString(v string) Value { + value := makeValueString(FixedLenByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +type be128PageValues struct { + page *be128Page + offset int +} + +func (r *be128PageValues) ReadValues(values []Value) (n int, err error) { + for n < len(values) && r.offset < len(r.page.values) { + values[n] = r.page.makeValue(&r.page.values[r.offset]) + r.offset++ + n++ + } + if r.offset == len(r.page.values) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_boolean.go b/vendor/github.com/parquet-go/parquet-go/page_boolean.go new file mode 100644 index 00000000000..61234205a0b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_boolean.go @@ -0,0 +1,157 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type booleanPage struct { + typ Type + bits memory.SliceBuffer[byte] + offset int32 + numValues int32 + columnIndex int16 +} + +func newBooleanPage(typ Type, columnIndex int16, numValues int32, values encoding.Values) *booleanPage { + return &booleanPage{ + typ: typ, + bits: memory.SliceBufferFrom(values.Boolean()[:bitpack.ByteCount(uint(numValues))]), + numValues: numValues, + columnIndex: ^columnIndex, + } +} + +func (page *booleanPage) Type() Type { return page.typ } + +func (page *booleanPage) Column() int { return int(^page.columnIndex) } + +func (page *booleanPage) Dictionary() Dictionary { return nil } + +func (page *booleanPage) NumRows() int64 { return int64(page.numValues) } + +func (page *booleanPage) NumValues() int64 { return int64(page.numValues) } + +func (page *booleanPage) NumNulls() int64 { return 0 } + +func (page *booleanPage) Size() int64 { return int64(page.bits.Len()) } + +func (page *booleanPage) RepetitionLevels() []byte { return nil } + +func (page *booleanPage) DefinitionLevels() []byte { return nil } + +func (page *booleanPage) Data() encoding.Values { return encoding.BooleanValues(page.bits.Slice()) } + +func (page *booleanPage) Values() ValueReader { return &booleanPageValues{page: page} } + +func (page *booleanPage) valueAt(i int) bool { + bits := page.bits.Slice() + j := uint32(int(page.offset)+i) / 8 + k := uint32(int(page.offset)+i) % 8 + return ((bits[j] >> k) & 1) != 0 +} + +func (page *booleanPage) min() bool { + for i := range int(page.numValues) { + if !page.valueAt(i) { + return false + } + } + return page.numValues > 0 +} + +func (page *booleanPage) max() bool { + for i := range int(page.numValues) { + if page.valueAt(i) { + return true + } + } + return false +} + +func (page *booleanPage) bounds() (min, max bool) { + hasFalse, hasTrue := false, false + + for i := range int(page.numValues) { + v := page.valueAt(i) + if v { + hasTrue = true + } else { + hasFalse = true + } + if hasTrue && hasFalse { + break + } + } + + min = !hasFalse + max = hasTrue + return min, max +} + +func (page *booleanPage) Bounds() (min, max Value, ok bool) { + if ok = page.numValues > 0; ok { + minBool, maxBool := page.bounds() + min = page.makeValue(minBool) + max = page.makeValue(maxBool) + } + return min, max, ok +} + +func (page *booleanPage) Slice(i, j int64) Page { + lowWithOffset := i + int64(page.offset) + highWithOffset := j + int64(page.offset) + + off := lowWithOffset / 8 + end := highWithOffset / 8 + + if (highWithOffset % 8) != 0 { + end++ + } + + return &booleanPage{ + typ: page.typ, + bits: memory.SliceBufferFrom(page.bits.Slice()[off:end]), + offset: int32(lowWithOffset % 8), + numValues: int32(j - i), + columnIndex: page.columnIndex, + } +} + +func (page *booleanPage) makeValue(v bool) Value { + value := makeValueBoolean(v) + value.columnIndex = page.columnIndex + return value +} + +type booleanPageValues struct { + page *booleanPage + offset int +} + +func (r *booleanPageValues) ReadBooleans(values []bool) (n int, err error) { + for n < len(values) && r.offset < int(r.page.numValues) { + values[n] = r.page.valueAt(r.offset) + r.offset++ + n++ + } + if r.offset == int(r.page.numValues) { + err = io.EOF + } + return n, err +} + +func (r *booleanPageValues) ReadValues(values []Value) (n int, err error) { + for n < len(values) && r.offset < int(r.page.numValues) { + values[n] = r.page.makeValue(r.page.valueAt(r.offset)) + r.offset++ + n++ + } + if r.offset == int(r.page.numValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_bounds.go b/vendor/github.com/parquet-go/parquet-go/page_bounds.go new file mode 100644 index 00000000000..8278e25ce00 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_bounds.go @@ -0,0 +1,25 @@ +package parquet + +import "bytes" + +func boundsFixedLenByteArray(data []byte, size int) (min, max []byte) { + if len(data) > 0 { + min = data[:size] + max = data[:size] + + for i, j := size, 2*size; j <= len(data); { + item := data[i:j] + + if bytes.Compare(item, min) < 0 { + min = item + } + if bytes.Compare(item, max) > 0 { + max = item + } + + i += size + j += size + } + } + return min, max +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.go b/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.go new file mode 100644 index 00000000000..9cb513b407c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.go @@ -0,0 +1,129 @@ +//go:build !purego + +package parquet + +// The min-max algorithms combine looking for the min and max values in a single +// pass over the data. While the behavior is the same as calling functions to +// look for the min and max values independently, doing both operations at the +// same time means that we only load the data from memory once. When working on +// large arrays the algorithms are limited by memory bandwidth, computing both +// the min and max together shrinks by half the amount of data read from memory. +// +// The following benchmarks results were highlighting the benefits of combining +// the min-max search, compared to calling the min and max functions separately: +// +// name old time/op new time/op delta +// BoundsInt64/10240KiB 590µs ±15% 330µs ±10% -44.01% (p=0.000 n=10+10) +// +// name old speed new speed delta +// BoundsInt64/10240KiB 17.9GB/s ±13% 31.8GB/s ±11% +78.13% (p=0.000 n=10+10) +// +// As expected, since the functions are memory-bound in those cases, and load +// half as much data, we see significant improvements. The gains are not 2x because +// running more AVX-512 instructions in the tight loops causes more contention +// on CPU ports. +// +// Optimizations being trade offs, using min/max functions independently appears +// to yield better throughput when the data resides in CPU caches: +// +// name old time/op new time/op delta +// BoundsInt64/4KiB 52.1ns ± 0% 46.2ns ± 1% -12.65% (p=0.000 n=10+10) +// +// name old speed new speed delta +// BoundsInt64/4KiB 78.6GB/s ± 0% 88.6GB/s ± 1% +11.23% (p=0.000 n=10+10) +// +// The probable explanation is that in those cases the algorithms are not +// memory-bound anymore, but limited by contention on CPU ports, and the +// individual min/max functions are able to better parallelize the work due +// to running less instructions per loop. The performance starts to equalize +// around 256KiB, and degrade beyond 1MiB, so we use this threshold to determine +// which approach to prefer. +const combinedBoundsThreshold = 1 * 1024 * 1024 + +//go:noescape +func combinedBoundsBool(data []bool) (min, max bool) + +//go:noescape +func combinedBoundsInt32(data []int32) (min, max int32) + +//go:noescape +func combinedBoundsInt64(data []int64) (min, max int64) + +//go:noescape +func combinedBoundsUint32(data []uint32) (min, max uint32) + +//go:noescape +func combinedBoundsUint64(data []uint64) (min, max uint64) + +//go:noescape +func combinedBoundsFloat32(data []float32) (min, max float32) + +//go:noescape +func combinedBoundsFloat64(data []float64) (min, max float64) + +//go:noescape +func combinedBoundsBE128(data [][16]byte) (min, max []byte) + +func boundsInt32(data []int32) (min, max int32) { + if 4*len(data) >= combinedBoundsThreshold { + return combinedBoundsInt32(data) + } + min = minInt32(data) + max = maxInt32(data) + return +} + +func boundsInt64(data []int64) (min, max int64) { + if 8*len(data) >= combinedBoundsThreshold { + return combinedBoundsInt64(data) + } + min = minInt64(data) + max = maxInt64(data) + return +} + +func boundsUint32(data []uint32) (min, max uint32) { + if 4*len(data) >= combinedBoundsThreshold { + return combinedBoundsUint32(data) + } + min = minUint32(data) + max = maxUint32(data) + return +} + +func boundsUint64(data []uint64) (min, max uint64) { + if 8*len(data) >= combinedBoundsThreshold { + return combinedBoundsUint64(data) + } + min = minUint64(data) + max = maxUint64(data) + return +} + +func boundsFloat32(data []float32) (min, max float32) { + if 4*len(data) >= combinedBoundsThreshold { + return combinedBoundsFloat32(data) + } + min = minFloat32(data) + max = maxFloat32(data) + return +} + +func boundsFloat64(data []float64) (min, max float64) { + if 8*len(data) >= combinedBoundsThreshold { + return combinedBoundsFloat64(data) + } + min = minFloat64(data) + max = maxFloat64(data) + return +} + +func boundsBE128(data [][16]byte) (min, max []byte) { + // TODO: min/max BE128 is really complex to vectorize, and the returns + // were barely better than doing the min and max independently, for all + // input sizes. We should revisit if we find ways to improve the min or + // max algorithms which can be transposed to the combined version. + min = minBE128(data) + max = maxBE128(data) + return +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.s b/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.s new file mode 100644 index 00000000000..c6d172a5e1a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_bounds_amd64.s @@ -0,0 +1,551 @@ +//go:build !purego + +#include "textflag.h" + +#define bswap128lo 0x08080A0B0C0D0E0F +#define bswap128hi 0x0001020304050607 + +DATA bswap128+0(SB)/8, $bswap128lo +DATA bswap128+8(SB)/8, $bswap128hi +DATA bswap128+16(SB)/8, $bswap128lo +DATA bswap128+24(SB)/8, $bswap128hi +DATA bswap128+32(SB)/8, $bswap128lo +DATA bswap128+40(SB)/8, $bswap128hi +DATA bswap128+48(SB)/8, $bswap128lo +DATA bswap128+56(SB)/8, $bswap128hi +GLOBL bswap128(SB), RODATA|NOPTR, $64 + +DATA indexes128+0(SB)/8, $0 +DATA indexes128+8(SB)/8, $0 +DATA indexes128+16(SB)/8, $1 +DATA indexes128+24(SB)/8, $1 +DATA indexes128+32(SB)/8, $2 +DATA indexes128+40(SB)/8, $2 +DATA indexes128+48(SB)/8, $3 +DATA indexes128+56(SB)/8, $3 +GLOBL indexes128(SB), RODATA|NOPTR, $64 + +DATA swap64+0(SB)/8, $4 +DATA swap64+8(SB)/8, $5 +DATA swap64+16(SB)/8, $6 +DATA swap64+24(SB)/8, $7 +DATA swap64+32(SB)/8, $2 +DATA swap64+40(SB)/8, $3 +DATA swap64+48(SB)/8, $0 +DATA swap64+56(SB)/8, $1 +GLOBL swap64(SB), RODATA|NOPTR, $64 + +DATA swap32+0(SB)/4, $8 +DATA swap32+4(SB)/4, $9 +DATA swap32+8(SB)/4, $10 +DATA swap32+12(SB)/4, $11 +DATA swap32+16(SB)/4, $12 +DATA swap32+20(SB)/4, $13 +DATA swap32+24(SB)/4, $14 +DATA swap32+28(SB)/4, $15 +DATA swap32+32(SB)/4, $4 +DATA swap32+36(SB)/4, $5 +DATA swap32+40(SB)/4, $6 +DATA swap32+44(SB)/4, $7 +DATA swap32+48(SB)/4, $2 +DATA swap32+52(SB)/4, $3 +DATA swap32+56(SB)/4, $0 +DATA swap32+60(SB)/4, $1 +GLOBL swap32(SB), RODATA|NOPTR, $64 + +// func combinedBoundsInt32(data []int32) (min, max int32) +TEXT ·combinedBoundsInt32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), R8 // min + MOVLQZX (AX), R9 // max + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 + VPBROADCASTD (AX), Z3 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMINSD Z1, Z0, Z0 + VPMINSD Z2, Z0, Z0 + VPMAXSD Z1, Z3, Z3 + VPMAXSD Z2, Z3, Z3 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VMOVDQU32 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VPMINSD Y1, Y0, Y0 + VPMAXSD Y2, Y3, Y3 + + VMOVDQU32 swap32+32(SB), Y1 + VMOVDQU32 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VPMINSD X1, X0, X0 + VPMAXSD X2, X3, X3 + + VMOVDQU32 swap32+48(SB), X1 + VMOVDQU32 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VPMINSD X1, X0, X0 + VPMAXSD X2, X3, X3 + VZEROUPPER + + MOVQ X0, BX + MOVQ X3, DX + MOVL BX, R8 + MOVL DX, R9 + SHRQ $32, BX + SHRQ $32, DX + CMPL BX, R8 + CMOVLLT BX, R8 + CMPL DX, R9 + CMOVLGT DX, R9 + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, R8 + CMOVLLT DX, R8 + CMPL DX, R9 + CMOVLGT DX, R9 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL R8, min+24(FP) + MOVL R9, max+28(FP) + RET + +// func combinedBoundsInt64(data []int64) (min, max int64) +TEXT ·combinedBoundsInt64(SB), NOSPLIT, $-40 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), R8 // min + MOVQ (AX), R9 // max + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $16 + JB loop + + MOVQ CX, DI + SHRQ $4, DI + SHLQ $4, DI + VPBROADCASTQ (AX), Z0 + VPBROADCASTQ (AX), Z3 +loop16: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VPMINSQ Z1, Z0, Z0 + VPMINSQ Z2, Z0, Z0 + VPMAXSQ Z1, Z3, Z3 + VPMAXSQ Z2, Z3, Z3 + ADDQ $16, SI + CMPQ SI, DI + JNE loop16 + + VMOVDQU32 swap32+0(SB), Z1 + VMOVDQU32 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VPMINSQ Y1, Y0, Y0 + VPMAXSQ Y2, Y3, Y3 + + VMOVDQU32 swap32+32(SB), Y1 + VMOVDQU32 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VPMINSQ X1, X0, X0 + VPMAXSQ X2, X3, X3 + + VMOVDQU32 swap32+48(SB), X1 + VMOVDQU32 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VPMINSQ X1, X0, X0 + VPMAXSQ X2, X3, X3 + VZEROUPPER + + MOVQ X0, R8 + MOVQ X3, R9 + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, R8 + CMOVQLT DX, R8 + CMPQ DX, R9 + CMOVQGT DX, R9 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ R8, min+24(FP) + MOVQ R9, max+32(FP) + RET + +// func combinedBoundsUint32(data []uint32) (min, max uint32) +TEXT ·combinedBoundsUint32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), R8 // min + MOVLQZX (AX), R9 // max + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 + VPBROADCASTD (AX), Z3 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMINUD Z1, Z0, Z0 + VPMINUD Z2, Z0, Z0 + VPMAXUD Z1, Z3, Z3 + VPMAXUD Z2, Z3, Z3 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VMOVDQU32 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VPMINUD Y1, Y0, Y0 + VPMAXUD Y2, Y3, Y3 + + VMOVDQU32 swap32+32(SB), Y1 + VMOVDQU32 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VPMINUD X1, X0, X0 + VPMAXUD X2, X3, X3 + + VMOVDQU32 swap32+48(SB), X1 + VMOVDQU32 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VPMINUD X1, X0, X0 + VPMAXUD X2, X3, X3 + VZEROUPPER + + MOVQ X0, BX + MOVQ X3, DX + MOVL BX, R8 + MOVL DX, R9 + SHRQ $32, BX + SHRQ $32, DX + CMPL BX, R8 + CMOVLCS BX, R8 + CMPL DX, R9 + CMOVLHI DX, R9 + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, R8 + CMOVLCS DX, R8 + CMPL DX, R9 + CMOVLHI DX, R9 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL R8, min+24(FP) + MOVL R9, max+28(FP) + RET + +// func combinedBoundsUint64(data []uint64) (min, max uint64) +TEXT ·combinedBoundsUint64(SB), NOSPLIT, $-40 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), R8 // min + MOVQ (AX), R9 // max + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $16 + JB loop + + MOVQ CX, DI + SHRQ $4, DI + SHLQ $4, DI + VPBROADCASTQ (AX), Z0 + VPBROADCASTQ (AX), Z3 +loop16: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VPMINUQ Z1, Z0, Z0 + VPMINUQ Z2, Z0, Z0 + VPMAXUQ Z1, Z3, Z3 + VPMAXUQ Z2, Z3, Z3 + ADDQ $16, SI + CMPQ SI, DI + JNE loop16 + + VMOVDQU32 swap32+0(SB), Z1 + VMOVDQU32 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VPMINUQ Y1, Y0, Y0 + VPMAXUQ Y2, Y3, Y3 + + VMOVDQU32 swap32+32(SB), Y1 + VMOVDQU32 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VPMINUQ X1, X0, X0 + VPMAXUQ X2, X3, X3 + + VMOVDQU32 swap32+48(SB), X1 + VMOVDQU32 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VPMINUQ X1, X0, X0 + VPMAXUQ X2, X3, X3 + VZEROUPPER + + MOVQ X0, R8 + MOVQ X3, R9 + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, R8 + CMOVQCS DX, R8 + CMPQ DX, R9 + CMOVQHI DX, R9 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ R8, min+24(FP) + MOVQ R9, max+32(FP) + RET + +// func combinedBoundsFloat32(data []float32) (min, max float32) +TEXT ·combinedBoundsFloat32(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORPS X0, X0 + XORPS X1, X1 + XORQ SI, SI + MOVLQZX (AX), R8 // min + MOVLQZX (AX), R9 // max + MOVQ R8, X0 + MOVQ R9, X1 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 + VPBROADCASTD (AX), Z3 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VMINPS Z1, Z0, Z0 + VMINPS Z2, Z0, Z0 + VMAXPS Z1, Z3, Z3 + VMAXPS Z2, Z3, Z3 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VMOVDQU32 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VMINPS Y1, Y0, Y0 + VMAXPS Y2, Y3, Y3 + + VMOVDQU32 swap32+32(SB), Y1 + VMOVDQU32 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VMINPS X1, X0, X0 + VMAXPS X2, X3, X3 + + VMOVDQU32 swap32+48(SB), X1 + VMOVDQU32 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VMINPS X1, X0, X0 + VMAXPS X2, X3, X3 + VZEROUPPER + + MOVAPS X0, X1 + MOVAPS X3, X2 + + PSRLQ $32, X1 + MOVQ X0, R8 + MOVQ X1, R10 + UCOMISS X0, X1 + CMOVLCS R10, R8 + + PSRLQ $32, X2 + MOVQ X3, R9 + MOVQ X2, R11 + UCOMISS X3, X2 + CMOVLHI R11, R9 + + CMPQ SI, CX + JE done + MOVQ R8, X0 + MOVQ R9, X1 +loop: + MOVLQZX (AX)(SI*4), DX + MOVQ DX, X2 + UCOMISS X0, X2 + CMOVLCS DX, R8 + UCOMISS X1, X2 + CMOVLHI DX, R9 + MOVQ R8, X0 + MOVQ R9, X1 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL R8, min+24(FP) + MOVL R9, max+28(FP) + RET + +// func combinedBoundsFloat64(data []float64) (min, max float64) +TEXT ·combinedBoundsFloat64(SB), NOSPLIT, $-40 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ R8, R8 + XORQ R9, R9 + + CMPQ CX, $0 + JE done + XORPD X0, X0 + XORPD X1, X1 + XORQ SI, SI + MOVQ (AX), R8 // min + MOVQ (AX), R9 // max + MOVQ R8, X0 + MOVQ R9, X1 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $16 + JB loop + + MOVQ CX, DI + SHRQ $4, DI + SHLQ $4, DI + VPBROADCASTQ (AX), Z0 + VPBROADCASTQ (AX), Z3 +loop16: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMINPD Z1, Z0, Z0 + VMINPD Z2, Z0, Z0 + VMAXPD Z1, Z3, Z3 + VMAXPD Z2, Z3, Z3 + ADDQ $16, SI + CMPQ SI, DI + JNE loop16 + + VMOVDQU64 swap32+0(SB), Z1 + VMOVDQU64 swap32+0(SB), Z2 + VPERMI2D Z0, Z0, Z1 + VPERMI2D Z3, Z3, Z2 + VMINPD Y1, Y0, Y0 + VMAXPD Y2, Y3, Y3 + + VMOVDQU64 swap32+32(SB), Y1 + VMOVDQU64 swap32+32(SB), Y2 + VPERMI2D Y0, Y0, Y1 + VPERMI2D Y3, Y3, Y2 + VMINPD X1, X0, X0 + VMAXPD X2, X3, X3 + + VMOVDQU64 swap32+48(SB), X1 + VMOVDQU64 swap32+48(SB), X2 + VPERMI2D X0, X0, X1 + VPERMI2D X3, X3, X2 + VMINPD X1, X0, X0 + VMAXPD X2, X3, X1 + VZEROUPPER + + MOVQ X0, R8 + MOVQ X1, R9 + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + MOVQ DX, X2 + UCOMISD X0, X2 + CMOVQCS DX, R8 + UCOMISD X1, X2 + CMOVQHI DX, R9 + MOVQ R8, X0 + MOVQ R9, X1 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ R8, min+24(FP) + MOVQ R9, max+32(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/page_bounds_purego.go b/vendor/github.com/parquet-go/parquet-go/page_bounds_purego.go new file mode 100644 index 00000000000..0b5a2474f82 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_bounds_purego.go @@ -0,0 +1,143 @@ +//go:build purego || !amd64 + +package parquet + +import ( + "encoding/binary" +) + +func boundsInt32(data []int32) (min, max int32) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsInt64(data []int64) (min, max int64) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsUint32(data []uint32) (min, max uint32) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsUint64(data []uint64) (min, max uint64) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsFloat32(data []float32) (min, max float32) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsFloat64(data []float64) (min, max float64) { + if len(data) > 0 { + min = data[0] + max = data[0] + + for _, v := range data[1:] { + if v < min { + min = v + } + if v > max { + max = v + } + } + } + return min, max +} + +func boundsBE128(data [][16]byte) (min, max []byte) { + if len(data) > 0 { + minHi := binary.BigEndian.Uint64(data[0][:8]) + maxHi := minHi + minIndex := 0 + maxIndex := 0 + for i := 1; i < len(data); i++ { + hi := binary.BigEndian.Uint64(data[i][:8]) + lo := binary.BigEndian.Uint64(data[i][8:]) + switch { + case hi < minHi: + minHi, minIndex = hi, i + case hi == minHi: + minLo := binary.BigEndian.Uint64(data[minIndex][8:]) + if lo < minLo { + minHi, minIndex = hi, i + } + } + switch { + case hi > maxHi: + maxHi, maxIndex = hi, i + case hi == maxHi: + maxLo := binary.BigEndian.Uint64(data[maxIndex][8:]) + if lo > maxLo { + maxHi, maxIndex = hi, i + } + } + } + min = data[minIndex][:] + max = data[maxIndex][:] + } + return min, max +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_byte_array.go b/vendor/github.com/parquet-go/parquet-go/page_byte_array.go new file mode 100644 index 00000000000..dbe54073269 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_byte_array.go @@ -0,0 +1,204 @@ +package parquet + +import ( + "bytes" + "io" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type byteArrayPage struct { + typ Type + values memory.SliceBuffer[byte] + offsets memory.SliceBuffer[uint32] + columnIndex int16 +} + +func newByteArrayPage(typ Type, columnIndex int16, numValues int32, values encoding.Values) *byteArrayPage { + data, offsets := values.ByteArray() + return &byteArrayPage{ + typ: typ, + values: memory.SliceBufferFrom(data), + offsets: memory.SliceBufferFrom(offsets[:numValues+1]), + columnIndex: ^columnIndex, + } +} + +func (page *byteArrayPage) Type() Type { return page.typ } + +func (page *byteArrayPage) Column() int { return int(^page.columnIndex) } + +func (page *byteArrayPage) Dictionary() Dictionary { return nil } + +func (page *byteArrayPage) NumRows() int64 { return int64(page.len()) } + +func (page *byteArrayPage) NumValues() int64 { return int64(page.len()) } + +func (page *byteArrayPage) NumNulls() int64 { return 0 } + +func (page *byteArrayPage) Size() int64 { + return int64(page.values.Len()) + 4*int64(page.offsets.Len()) +} + +func (page *byteArrayPage) RepetitionLevels() []byte { return nil } + +func (page *byteArrayPage) DefinitionLevels() []byte { return nil } + +func (page *byteArrayPage) Data() encoding.Values { + return encoding.ByteArrayValues(page.values.Slice(), page.offsets.Slice()) +} + +func (page *byteArrayPage) Values() ValueReader { return &byteArrayPageValues{page: page} } + +func (page *byteArrayPage) len() int { return page.offsets.Len() - 1 } + +func (page *byteArrayPage) index(i int) []byte { + offsets := page.offsets.Slice() + values := page.values.Slice() + j := offsets[i+0] + k := offsets[i+1] + return values[j:k:k] +} + +func (page *byteArrayPage) min() (min []byte) { + if n := page.len(); n > 0 { + min = page.index(0) + + for i := 1; i < n; i++ { + v := page.index(i) + + if bytes.Compare(v, min) < 0 { + min = v + } + } + } + return min +} + +func (page *byteArrayPage) max() (max []byte) { + if n := page.len(); n > 0 { + max = page.index(0) + + for i := 1; i < n; i++ { + v := page.index(i) + + if bytes.Compare(v, max) > 0 { + max = v + } + } + } + return max +} + +func (page *byteArrayPage) bounds() (min, max []byte) { + if n := page.len(); n > 0 { + min = page.index(0) + max = min + + for i := 1; i < n; i++ { + v := page.index(i) + + switch { + case bytes.Compare(v, min) < 0: + min = v + case bytes.Compare(v, max) > 0: + max = v + } + } + } + return min, max +} + +func (page *byteArrayPage) Bounds() (min, max Value, ok bool) { + if ok = page.offsets.Len() > 1; ok { + minBytes, maxBytes := page.bounds() + min = page.makeValueBytes(minBytes) + max = page.makeValueBytes(maxBytes) + } + return min, max, ok +} + +func (page *byteArrayPage) cloneValues() memory.SliceBuffer[byte] { + return page.values.Clone() +} + +func (page *byteArrayPage) cloneOffsets() memory.SliceBuffer[uint32] { + return page.offsets.Clone() +} + +func (page *byteArrayPage) Slice(i, j int64) Page { + return &byteArrayPage{ + typ: page.typ, + values: page.values, + offsets: memory.SliceBufferFrom(page.offsets.Slice()[i : j+1]), + columnIndex: page.columnIndex, + } +} + +func (page *byteArrayPage) makeValueBytes(v []byte) Value { + value := makeValueBytes(ByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +func (page *byteArrayPage) makeValueString(v string) Value { + value := makeValueString(ByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +type byteArrayPageValues struct { + page *byteArrayPage + offset int +} + +func (r *byteArrayPageValues) Read(b []byte) (int, error) { + _, n, err := r.readByteArrays(b) + return n, err +} + +func (r *byteArrayPageValues) ReadRequired(values []byte) (int, error) { + return r.ReadByteArrays(values) +} + +func (r *byteArrayPageValues) ReadByteArrays(values []byte) (int, error) { + n, _, err := r.readByteArrays(values) + return n, err +} + +func (r *byteArrayPageValues) readByteArrays(values []byte) (c, n int, err error) { + numValues := r.page.len() + for r.offset < numValues { + b := r.page.index(r.offset) + k := plain.ByteArrayLengthSize + len(b) + if k > (len(values) - n) { + break + } + plain.PutByteArrayLength(values[n:], len(b)) + n += plain.ByteArrayLengthSize + n += copy(values[n:], b) + r.offset++ + c++ + } + if r.offset == numValues { + err = io.EOF + } else if n == 0 && len(values) > 0 { + err = io.ErrShortBuffer + } + return c, n, err +} + +func (r *byteArrayPageValues) ReadValues(values []Value) (n int, err error) { + numValues := r.page.len() + for n < len(values) && r.offset < numValues { + values[n] = r.page.makeValueBytes(r.page.index(r.offset)) + r.offset++ + n++ + } + if r.offset == numValues { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_double.go b/vendor/github.com/parquet-go/parquet-go/page_double.go new file mode 100644 index 00000000000..92f6343843f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_double.go @@ -0,0 +1,107 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type doublePage struct { + typ Type + values memory.SliceBuffer[float64] + columnIndex int16 +} + +func newDoublePage(typ Type, columnIndex int16, numValues int32, values encoding.Values) *doublePage { + return &doublePage{ + typ: typ, + values: memory.SliceBufferFrom(values.Double()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *doublePage) Type() Type { return page.typ } + +func (page *doublePage) Column() int { return int(^page.columnIndex) } + +func (page *doublePage) Dictionary() Dictionary { return nil } + +func (page *doublePage) NumRows() int64 { return int64(page.values.Len()) } + +func (page *doublePage) NumValues() int64 { return int64(page.values.Len()) } + +func (page *doublePage) NumNulls() int64 { return 0 } + +func (page *doublePage) Size() int64 { return 8 * int64(page.values.Len()) } + +func (page *doublePage) RepetitionLevels() []byte { return nil } + +func (page *doublePage) DefinitionLevels() []byte { return nil } + +func (page *doublePage) Data() encoding.Values { return encoding.DoubleValues(page.values.Slice()) } + +func (page *doublePage) Values() ValueReader { return &doublePageValues{page: page} } + +func (page *doublePage) min() float64 { return minFloat64(page.values.Slice()) } + +func (page *doublePage) max() float64 { return maxFloat64(page.values.Slice()) } + +func (page *doublePage) bounds() (min, max float64) { return boundsFloat64(page.values.Slice()) } + +func (page *doublePage) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minFloat, maxFloat := page.bounds() + min = page.makeValue(minFloat) + max = page.makeValue(maxFloat) + } + return min, max, ok +} + +func (page *doublePage) Slice(i, j int64) Page { + return &doublePage{ + typ: page.typ, + values: memory.SliceBufferFrom(page.values.Slice()[i:j]), + columnIndex: page.columnIndex, + } +} + +func (page *doublePage) makeValue(v float64) Value { + value := makeValueDouble(v) + value.columnIndex = page.columnIndex + return value +} + +type doublePageValues struct { + page *doublePage + offset int +} + +func (r *doublePageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadDoubles(unsafecast.Slice[float64](b)) + return 8 * n, err +} + +func (r *doublePageValues) ReadDoubles(values []float64) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *doublePageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_fixed_len_byte_array.go b/vendor/github.com/parquet-go/parquet-go/page_fixed_len_byte_array.go new file mode 100644 index 00000000000..3ad46a9d809 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_fixed_len_byte_array.go @@ -0,0 +1,133 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type fixedLenByteArrayPage struct { + typ Type + data memory.SliceBuffer[byte] + size int + columnIndex int16 +} + +func newFixedLenByteArrayPage(typ Type, columnIndex int16, numValues int32, values encoding.Values) *fixedLenByteArrayPage { + data, size := values.FixedLenByteArray() + return &fixedLenByteArrayPage{ + typ: typ, + data: memory.SliceBufferFrom(data[:int(numValues)*size]), + size: size, + columnIndex: ^columnIndex, + } +} + +func (page *fixedLenByteArrayPage) Type() Type { return page.typ } + +func (page *fixedLenByteArrayPage) Column() int { return int(^page.columnIndex) } + +func (page *fixedLenByteArrayPage) Dictionary() Dictionary { return nil } + +func (page *fixedLenByteArrayPage) NumRows() int64 { return int64(page.data.Len() / page.size) } + +func (page *fixedLenByteArrayPage) NumValues() int64 { return int64(page.data.Len() / page.size) } + +func (page *fixedLenByteArrayPage) NumNulls() int64 { return 0 } + +func (page *fixedLenByteArrayPage) Size() int64 { return int64(page.data.Len()) } + +func (page *fixedLenByteArrayPage) RepetitionLevels() []byte { return nil } + +func (page *fixedLenByteArrayPage) DefinitionLevels() []byte { return nil } + +func (page *fixedLenByteArrayPage) Data() encoding.Values { + return encoding.FixedLenByteArrayValues(page.data.Slice(), page.size) +} + +func (page *fixedLenByteArrayPage) Values() ValueReader { + return &fixedLenByteArrayPageValues{page: page} +} + +func (page *fixedLenByteArrayPage) min() []byte { + return minFixedLenByteArray(page.data.Slice(), page.size) +} + +func (page *fixedLenByteArrayPage) max() []byte { + return maxFixedLenByteArray(page.data.Slice(), page.size) +} + +func (page *fixedLenByteArrayPage) bounds() (min, max []byte) { + return boundsFixedLenByteArray(page.data.Slice(), page.size) +} + +func (page *fixedLenByteArrayPage) Bounds() (min, max Value, ok bool) { + if ok = page.data.Len() > 0; ok { + minBytes, maxBytes := page.bounds() + min = page.makeValueBytes(minBytes) + max = page.makeValueBytes(maxBytes) + } + return min, max, ok +} + +func (page *fixedLenByteArrayPage) Slice(i, j int64) Page { + data := page.data.Slice() + return &fixedLenByteArrayPage{ + typ: page.typ, + data: memory.SliceBufferFrom(data[i*int64(page.size) : j*int64(page.size)]), + size: page.size, + columnIndex: page.columnIndex, + } +} + +func (page *fixedLenByteArrayPage) makeValueBytes(v []byte) Value { + value := makeValueBytes(FixedLenByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +func (page *fixedLenByteArrayPage) makeValueString(v string) Value { + value := makeValueString(FixedLenByteArray, v) + value.columnIndex = page.columnIndex + return value +} + +type fixedLenByteArrayPageValues struct { + page *fixedLenByteArrayPage + offset int +} + +func (r *fixedLenByteArrayPageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadFixedLenByteArrays(b) + return n * r.page.size, err +} + +func (r *fixedLenByteArrayPageValues) ReadRequired(values []byte) (int, error) { + return r.ReadFixedLenByteArrays(values) +} + +func (r *fixedLenByteArrayPageValues) ReadFixedLenByteArrays(values []byte) (n int, err error) { + data := r.page.data.Slice() + n = copy(values, data[r.offset:]) / r.page.size + r.offset += n * r.page.size + if r.offset == len(data) { + err = io.EOF + } else if n == 0 && len(values) > 0 { + err = io.ErrShortBuffer + } + return n, err +} + +func (r *fixedLenByteArrayPageValues) ReadValues(values []Value) (n int, err error) { + data := r.page.data.Slice() + for n < len(values) && r.offset < len(data) { + values[n] = r.page.makeValueBytes(data[r.offset : r.offset+r.page.size]) + r.offset += r.page.size + n++ + } + if r.offset == len(data) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_float.go b/vendor/github.com/parquet-go/parquet-go/page_float.go new file mode 100644 index 00000000000..3c6c092dd8e --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_float.go @@ -0,0 +1,107 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type floatPage struct { + typ Type + values memory.SliceBuffer[float32] + columnIndex int16 +} + +func newFloatPage(typ Type, columnIndex int16, numValues int32, values encoding.Values) *floatPage { + return &floatPage{ + typ: typ, + values: memory.SliceBufferFrom(values.Float()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *floatPage) Type() Type { return page.typ } + +func (page *floatPage) Column() int { return int(^page.columnIndex) } + +func (page *floatPage) Dictionary() Dictionary { return nil } + +func (page *floatPage) NumRows() int64 { return int64(page.values.Len()) } + +func (page *floatPage) NumValues() int64 { return int64(page.values.Len()) } + +func (page *floatPage) NumNulls() int64 { return 0 } + +func (page *floatPage) Size() int64 { return 4 * int64(page.values.Len()) } + +func (page *floatPage) RepetitionLevels() []byte { return nil } + +func (page *floatPage) DefinitionLevels() []byte { return nil } + +func (page *floatPage) Data() encoding.Values { return encoding.FloatValues(page.values.Slice()) } + +func (page *floatPage) Values() ValueReader { return &floatPageValues{page: page} } + +func (page *floatPage) min() float32 { return minFloat32(page.values.Slice()) } + +func (page *floatPage) max() float32 { return maxFloat32(page.values.Slice()) } + +func (page *floatPage) bounds() (min, max float32) { return boundsFloat32(page.values.Slice()) } + +func (page *floatPage) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minFloat32, maxFloat32 := page.bounds() + min = page.makeValue(minFloat32) + max = page.makeValue(maxFloat32) + } + return min, max, ok +} + +func (page *floatPage) Slice(i, j int64) Page { + return &floatPage{ + typ: page.typ, + values: memory.SliceBufferFrom(page.values.Slice()[i:j]), + columnIndex: page.columnIndex, + } +} + +func (page *floatPage) makeValue(v float32) Value { + value := makeValueFloat(v) + value.columnIndex = page.columnIndex + return value +} + +type floatPageValues struct { + page *floatPage + offset int +} + +func (r *floatPageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadFloats(unsafecast.Slice[float32](b)) + return 4 * n, err +} + +func (r *floatPageValues) ReadFloats(values []float32) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *floatPageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_header.go b/vendor/github.com/parquet-go/parquet-go/page_header.go new file mode 100644 index 00000000000..14f912b11ba --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_header.go @@ -0,0 +1,221 @@ +package parquet + +import ( + "fmt" + + "github.com/parquet-go/parquet-go/format" +) + +// PageHeader is an interface implemented by parquet page headers. +type PageHeader interface { + // Returns the number of values in the page (including nulls). + NumValues() int64 + + // Returns the page encoding. + Encoding() format.Encoding + + // Returns the parquet format page type. + PageType() format.PageType +} + +// DataPageHeader is a specialization of the PageHeader interface implemented by +// data pages. +type DataPageHeader interface { + PageHeader + + // Returns the encoding of the repetition level section. + RepetitionLevelEncoding() format.Encoding + + // Returns the encoding of the definition level section. + DefinitionLevelEncoding() format.Encoding + + // Returns the number of null values in the page. + NullCount() int64 + + // Returns the minimum value in the page based on the ordering rules of the + // column's logical type. + // + // As an optimization, the method may return the same slice across multiple + // calls. Programs must treat the returned value as immutable to prevent + // unpredictable behaviors. + // + // If the page only contains only null values, an empty slice is returned. + MinValue() []byte + + // Returns the maximum value in the page based on the ordering rules of the + // column's logical type. + // + // As an optimization, the method may return the same slice across multiple + // calls. Programs must treat the returned value as immutable to prevent + // unpredictable behaviors. + // + // If the page only contains only null values, an empty slice is returned. + MaxValue() []byte +} + +// DictionaryPageHeader is an implementation of the PageHeader interface +// representing dictionary pages. +type DictionaryPageHeader struct { + header *format.DictionaryPageHeader +} + +func (dict DictionaryPageHeader) NumValues() int64 { + return int64(dict.header.NumValues) +} + +func (dict DictionaryPageHeader) Encoding() format.Encoding { + return dict.header.Encoding +} + +func (dict DictionaryPageHeader) PageType() format.PageType { + return format.DictionaryPage +} + +func (dict DictionaryPageHeader) IsSorted() bool { + return dict.header.IsSorted +} + +func (dict DictionaryPageHeader) String() string { + return fmt.Sprintf("DICTIONARY_PAGE_HEADER{NumValues=%d,Encoding=%s,IsSorted=%t}", + dict.header.NumValues, + dict.header.Encoding, + dict.header.IsSorted) +} + +// DataPageHeaderV1 is an implementation of the DataPageHeader interface +// representing data pages version 1. +type DataPageHeaderV1 struct { + header *format.DataPageHeader +} + +func (v1 DataPageHeaderV1) NumValues() int64 { + return int64(v1.header.NumValues) +} + +func (v1 DataPageHeaderV1) RepetitionLevelEncoding() format.Encoding { + return v1.header.RepetitionLevelEncoding +} + +func (v1 DataPageHeaderV1) DefinitionLevelEncoding() format.Encoding { + return v1.header.DefinitionLevelEncoding +} + +func (v1 DataPageHeaderV1) Encoding() format.Encoding { + return v1.header.Encoding +} + +func (v1 DataPageHeaderV1) PageType() format.PageType { + return format.DataPage +} + +func (v1 DataPageHeaderV1) NullCount() int64 { + return v1.header.Statistics.NullCount +} + +func (v1 DataPageHeaderV1) MinValue() []byte { + return v1.header.Statistics.MinValue +} + +func (v1 DataPageHeaderV1) MaxValue() []byte { + return v1.header.Statistics.MaxValue +} + +func (v1 DataPageHeaderV1) String() string { + return fmt.Sprintf("DATA_PAGE_HEADER{NumValues=%d,Encoding=%s}", + v1.header.NumValues, + v1.header.Encoding) +} + +// DataPageHeaderV2 is an implementation of the DataPageHeader interface +// representing data pages version 2. +type DataPageHeaderV2 struct { + header *format.DataPageHeaderV2 +} + +func (v2 DataPageHeaderV2) NumValues() int64 { + return int64(v2.header.NumValues) +} + +func (v2 DataPageHeaderV2) NumNulls() int64 { + return int64(v2.header.NumNulls) +} + +func (v2 DataPageHeaderV2) NumRows() int64 { + return int64(v2.header.NumRows) +} + +func (v2 DataPageHeaderV2) RepetitionLevelsByteLength() int64 { + return int64(v2.header.RepetitionLevelsByteLength) +} + +func (v2 DataPageHeaderV2) DefinitionLevelsByteLength() int64 { + return int64(v2.header.DefinitionLevelsByteLength) +} + +func (v2 DataPageHeaderV2) RepetitionLevelEncoding() format.Encoding { + return format.RLE +} + +func (v2 DataPageHeaderV2) DefinitionLevelEncoding() format.Encoding { + return format.RLE +} + +func (v2 DataPageHeaderV2) Encoding() format.Encoding { + return v2.header.Encoding +} + +func (v2 DataPageHeaderV2) PageType() format.PageType { + return format.DataPageV2 +} + +func (v2 DataPageHeaderV2) NullCount() int64 { + return v2.header.Statistics.NullCount +} + +func (v2 DataPageHeaderV2) MinValue() []byte { + return v2.header.Statistics.MinValue +} + +func (v2 DataPageHeaderV2) MaxValue() []byte { + return v2.header.Statistics.MaxValue +} + +func (v2 DataPageHeaderV2) IsCompressed() bool { + return v2.header.IsCompressed == nil || *v2.header.IsCompressed +} + +func (v2 DataPageHeaderV2) String() string { + return fmt.Sprintf("DATA_PAGE_HEADER_V2{NumValues=%d,NumNulls=%d,NumRows=%d,Encoding=%s,IsCompressed=%t}", + v2.header.NumValues, + v2.header.NumNulls, + v2.header.NumRows, + v2.header.Encoding, + v2.IsCompressed()) +} + +type unknownPageHeader struct { + header *format.PageHeader +} + +func (u unknownPageHeader) NumValues() int64 { + return 0 +} + +func (u unknownPageHeader) Encoding() format.Encoding { + return -1 +} + +func (u unknownPageHeader) PageType() format.PageType { + return u.header.Type +} + +func (u unknownPageHeader) String() string { + return fmt.Sprintf("UNKNOWN_PAGE_HEADER{Type=%d}", u.header.Type) +} + +var ( + _ PageHeader = DictionaryPageHeader{} + _ DataPageHeader = DataPageHeaderV1{} + _ DataPageHeader = DataPageHeaderV2{} + _ PageHeader = unknownPageHeader{} +) diff --git a/vendor/github.com/parquet-go/parquet-go/page_int32.go b/vendor/github.com/parquet-go/parquet-go/page_int32.go new file mode 100644 index 00000000000..ff826989323 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_int32.go @@ -0,0 +1,108 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type int32Page struct { + typ Type + values memory.SliceBuffer[int32] + columnIndex int16 +} + +func newInt32Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *int32Page { + return &int32Page{ + typ: typ, + values: memory.SliceBufferFrom(values.Int32()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *int32Page) Type() Type { return page.typ } + +func (page *int32Page) Column() int { return int(^page.columnIndex) } + +func (page *int32Page) Dictionary() Dictionary { return nil } + +func (page *int32Page) NumRows() int64 { return int64(page.values.Len()) } + +func (page *int32Page) NumValues() int64 { return int64(page.values.Len()) } + +func (page *int32Page) NumNulls() int64 { return 0 } + +func (page *int32Page) Size() int64 { return 4 * int64(page.values.Len()) } + +func (page *int32Page) RepetitionLevels() []byte { return nil } + +func (page *int32Page) DefinitionLevels() []byte { return nil } + +func (page *int32Page) Data() encoding.Values { return encoding.Int32Values(page.values.Slice()) } + +func (page *int32Page) Values() ValueReader { return &int32PageValues{page: page} } + +func (page *int32Page) min() int32 { return minInt32(page.values.Slice()) } + +func (page *int32Page) max() int32 { return maxInt32(page.values.Slice()) } + +func (page *int32Page) bounds() (min, max int32) { return boundsInt32(page.values.Slice()) } + +func (page *int32Page) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minInt32, maxInt32 := page.bounds() + min = page.makeValue(minInt32) + max = page.makeValue(maxInt32) + } + return min, max, ok +} + +func (page *int32Page) Slice(i, j int64) Page { + sliced := &int32Page{ + typ: page.typ, + columnIndex: page.columnIndex, + } + sliced.values.Append(page.values.Slice()[i:j]...) + return sliced +} + +func (page *int32Page) makeValue(v int32) Value { + value := makeValueInt32(v) + value.columnIndex = page.columnIndex + return value +} + +type int32PageValues struct { + page *int32Page + offset int +} + +func (r *int32PageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadInt32s(unsafecast.Slice[int32](b)) + return 4 * n, err +} + +func (r *int32PageValues) ReadInt32s(values []int32) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *int32PageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_int64.go b/vendor/github.com/parquet-go/parquet-go/page_int64.go new file mode 100644 index 00000000000..418dbc25083 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_int64.go @@ -0,0 +1,108 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type int64Page struct { + typ Type + values memory.SliceBuffer[int64] + columnIndex int16 +} + +func newInt64Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *int64Page { + return &int64Page{ + typ: typ, + values: memory.SliceBufferFrom(values.Int64()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *int64Page) Type() Type { return page.typ } + +func (page *int64Page) Column() int { return int(^page.columnIndex) } + +func (page *int64Page) Dictionary() Dictionary { return nil } + +func (page *int64Page) NumRows() int64 { return int64(page.values.Len()) } + +func (page *int64Page) NumValues() int64 { return int64(page.values.Len()) } + +func (page *int64Page) NumNulls() int64 { return 0 } + +func (page *int64Page) Size() int64 { return 8 * int64(page.values.Len()) } + +func (page *int64Page) RepetitionLevels() []byte { return nil } + +func (page *int64Page) DefinitionLevels() []byte { return nil } + +func (page *int64Page) Data() encoding.Values { return encoding.Int64Values(page.values.Slice()) } + +func (page *int64Page) Values() ValueReader { return &int64PageValues{page: page} } + +func (page *int64Page) min() int64 { return minInt64(page.values.Slice()) } + +func (page *int64Page) max() int64 { return maxInt64(page.values.Slice()) } + +func (page *int64Page) bounds() (min, max int64) { return boundsInt64(page.values.Slice()) } + +func (page *int64Page) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minInt64, maxInt64 := page.bounds() + min = page.makeValue(minInt64) + max = page.makeValue(maxInt64) + } + return min, max, ok +} + +func (page *int64Page) Slice(i, j int64) Page { + sliced := &int64Page{ + typ: page.typ, + columnIndex: page.columnIndex, + } + sliced.values.Append(page.values.Slice()[i:j]...) + return sliced +} + +func (page *int64Page) makeValue(v int64) Value { + value := makeValueInt64(v) + value.columnIndex = page.columnIndex + return value +} + +type int64PageValues struct { + page *int64Page + offset int +} + +func (r *int64PageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadInt64s(unsafecast.Slice[int64](b)) + return 8 * n, err +} + +func (r *int64PageValues) ReadInt64s(values []int64) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *int64PageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_int96.go b/vendor/github.com/parquet-go/parquet-go/page_int96.go new file mode 100644 index 00000000000..7cc1d6eb199 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_int96.go @@ -0,0 +1,107 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" +) + +type int96Page struct { + typ Type + values []deprecated.Int96 + columnIndex int16 +} + +func newInt96Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *int96Page { + return &int96Page{ + typ: typ, + values: values.Int96()[:numValues], + columnIndex: ^columnIndex, + } +} + +func (page *int96Page) Type() Type { return page.typ } + +func (page *int96Page) Column() int { return int(^page.columnIndex) } + +func (page *int96Page) Dictionary() Dictionary { return nil } + +func (page *int96Page) NumRows() int64 { return int64(len(page.values)) } + +func (page *int96Page) NumValues() int64 { return int64(len(page.values)) } + +func (page *int96Page) NumNulls() int64 { return 0 } + +func (page *int96Page) Size() int64 { return 12 * int64(len(page.values)) } + +func (page *int96Page) RepetitionLevels() []byte { return nil } + +func (page *int96Page) DefinitionLevels() []byte { return nil } + +func (page *int96Page) Data() encoding.Values { return encoding.Int96Values(page.values) } + +func (page *int96Page) Values() ValueReader { return &int96PageValues{page: page} } + +func (page *int96Page) min() deprecated.Int96 { return deprecated.MinInt96(page.values) } + +func (page *int96Page) max() deprecated.Int96 { return deprecated.MaxInt96(page.values) } + +func (page *int96Page) bounds() (min, max deprecated.Int96) { + return deprecated.MinMaxInt96(page.values) +} + +func (page *int96Page) Bounds() (min, max Value, ok bool) { + if ok = len(page.values) > 0; ok { + minInt96, maxInt96 := page.bounds() + min = page.makeValue(minInt96) + max = page.makeValue(maxInt96) + } + return min, max, ok +} + +func (page *int96Page) Slice(i, j int64) Page { + return &int96Page{ + typ: page.typ, + values: page.values[i:j], + columnIndex: page.columnIndex, + } +} + +func (page *int96Page) makeValue(v deprecated.Int96) Value { + value := makeValueInt96(v) + value.columnIndex = page.columnIndex + return value +} + +type int96PageValues struct { + page *int96Page + offset int +} + +func (r *int96PageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadInt96s(unsafecast.Slice[deprecated.Int96](b)) + return 12 * n, err +} + +func (r *int96PageValues) ReadInt96s(values []deprecated.Int96) (n int, err error) { + n = copy(values, r.page.values[r.offset:]) + r.offset += n + if r.offset == len(r.page.values) { + err = io.EOF + } + return n, err +} + +func (r *int96PageValues) ReadValues(values []Value) (n int, err error) { + for n < len(values) && r.offset < len(r.page.values) { + values[n] = r.page.makeValue(r.page.values[r.offset]) + r.offset++ + n++ + } + if r.offset == len(r.page.values) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_max.go b/vendor/github.com/parquet-go/parquet-go/page_max.go new file mode 100644 index 00000000000..f6afecf6a68 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_max.go @@ -0,0 +1,23 @@ +package parquet + +import ( + "bytes" +) + +func maxFixedLenByteArray(data []byte, size int) (max []byte) { + if len(data) > 0 { + max = data[:size] + + for i, j := size, 2*size; j <= len(data); { + item := data[i:j] + + if bytes.Compare(item, max) > 0 { + max = item + } + + i += size + j += size + } + } + return max +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_max_amd64.go b/vendor/github.com/parquet-go/parquet-go/page_max_amd64.go new file mode 100644 index 00000000000..2ac1de2f066 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_max_amd64.go @@ -0,0 +1,24 @@ +//go:build !purego + +package parquet + +//go:noescape +func maxInt32(data []int32) int32 + +//go:noescape +func maxInt64(data []int64) int64 + +//go:noescape +func maxUint32(data []uint32) uint32 + +//go:noescape +func maxUint64(data []uint64) uint64 + +//go:noescape +func maxFloat32(data []float32) float32 + +//go:noescape +func maxFloat64(data []float64) float64 + +//go:noescape +func maxBE128(data [][16]byte) []byte diff --git a/vendor/github.com/parquet-go/parquet-go/page_max_amd64.s b/vendor/github.com/parquet-go/parquet-go/page_max_amd64.s new file mode 100644 index 00000000000..8159583d1fd --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_max_amd64.s @@ -0,0 +1,598 @@ +//go:build !purego + +#include "textflag.h" + +// func maxInt32(data []int32) int32 +TEXT ·maxInt32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMAXSD Z1, Z0, Z0 + VPMAXSD Z2, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMAXSD Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMAXSD X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMAXSD X1, X0, X0 + VZEROUPPER + + MOVQ X0, DX + MOVL DX, BX + SHRQ $32, DX + CMPL DX, BX + CMOVLGT DX, BX + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, BX + CMOVLGT DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func maxInt64(data []int64) int64 +TEXT ·maxInt64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VPMAXSQ Z1, Z2, Z5 + VPMAXSQ Z3, Z4, Z6 + VPMAXSQ Z5, Z6, Z1 + VPMAXSQ Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMAXSQ Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMAXSQ X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMAXSQ X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, BX + CMOVQGT DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// func maxUint32(data []int32) int32 +TEXT ·maxUint32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMAXUD Z1, Z0, Z0 + VPMAXUD Z2, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMAXUD Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMAXUD X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMAXUD X1, X0, X0 + VZEROUPPER + + MOVQ X0, DX + MOVL DX, BX + SHRQ $32, DX + CMPL DX, BX + CMOVLHI DX, BX + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, BX + CMOVLHI DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func maxUint64(data []uint64) uint64 +TEXT ·maxUint64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VPMAXUQ Z1, Z2, Z5 + VPMAXUQ Z3, Z4, Z6 + VPMAXUQ Z5, Z6, Z1 + VPMAXUQ Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMAXUQ Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMAXUQ X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMAXUQ X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, BX + CMOVQHI DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// func maxFloat32(data []float32) float32 +TEXT ·maxFloat32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORPS X0, X0 + XORPS X1, X1 + XORQ SI, SI + MOVLQZX (AX), BX + MOVQ BX, X0 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $64 + JB loop + + MOVQ CX, DI + SHRQ $6, DI + SHLQ $6, DI + VPBROADCASTD (AX), Z0 +loop64: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VMOVDQU32 128(AX)(SI*4), Z3 + VMOVDQU32 192(AX)(SI*4), Z4 + VMAXPS Z1, Z2, Z5 + VMAXPS Z3, Z4, Z6 + VMAXPS Z5, Z6, Z1 + VMAXPS Z1, Z0, Z0 + ADDQ $64, SI + CMPQ SI, DI + JNE loop64 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VMAXPS Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VMAXPS X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VMAXPS X1, X0, X0 + VZEROUPPER + + MOVAPS X0, X1 + PSRLQ $32, X1 + MOVQ X0, BX + MOVQ X1, DX + UCOMISS X0, X1 + CMOVLHI DX, BX + + CMPQ SI, CX + JE done + MOVQ BX, X0 +loop: + MOVLQZX (AX)(SI*4), DX + MOVQ DX, X1 + UCOMISS X0, X1 + CMOVLHI DX, BX + MOVQ BX, X0 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func maxFloat64(data []float64) float64 +TEXT ·maxFloat64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORPD X0, X0 + XORPD X1, X1 + XORQ SI, SI + MOVQ (AX), BX + MOVQ BX, X0 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VMAXPD Z1, Z2, Z5 + VMAXPD Z3, Z4, Z6 + VMAXPD Z5, Z6, Z1 + VMAXPD Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU64 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VMAXPD Y1, Y0, Y0 + + VMOVDQU64 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VMAXPD X1, X0, X0 + + VMOVDQU64 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VMAXPD X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + MOVQ DX, X1 + UCOMISD X0, X1 + CMOVQHI DX, BX + MOVQ BX, X0 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// vpmaxu128 is a macro comparing unsigned 128 bits values held in the +// `srcValues` and `maxValues` vectors. The `srcIndexes` and `maxIndexes` +// vectors contain the indexes of elements in the value vectors. Remaining +// K and R arguments are mask and general purpose registers needed to hold +// temporary values during the computation. The last M argument is a mask +// generated by vpmaxu128mask. +// +// The routine uses AVX-512 instructions (VPCMPUQ, VPBLENDMQ) to implement +// the comparison of 128 bits values. The values are expected to be stored +// in the vectors as a little-endian pair of two consecutive quad words. +// +// The results are written to the `maxValues` and `maxIndexes` vectors, +// overwriting the inputs. `srcValues` and `srcIndexes` are read-only +// parameters. +// +// At a high level, for two pairs of quad words formaxg two 128 bits values +// A and B, the test implemented by this macro is: +// +// A[1] > B[1] || (A[1] == B[1] && A[0] > B[0]) +// +// Values in the source vector that evaluate to true on this expression are +// written to the vector of maximum values, and their indexes are written to +// the vector of indexes. +#define vpmaxu128(srcValues, srcIndexes, maxValues, maxIndexes, K1, K2, R1, R2, R3, M) \ + VPCMPUQ $0, maxValues, srcValues, K1 \ + VPCMPUQ $6, maxValues, srcValues, K2 \ + KMOVB K1, R1 \ + KMOVB K2, R2 \ + MOVB R2, R3 \ + SHLB $1, R3 \ + ANDB R3, R1 \ + ORB R2, R1 \ + ANDB M, R1 \ + MOVB R1, R2 \ + SHRB $1, R2 \ + ORB R2, R1 \ + KMOVB R1, K1 \ + VPBLENDMQ srcValues, maxValues, K1, maxValues \ + VPBLENDMQ srcIndexes, maxIndexes, K1, maxIndexes + +// vpmaxu128mask is a macro used to initialize the mask passed as last argument +// to vpmaxu128. The argument M is intended to be a general purpose register. +// +// The bit mask is used to merge the results of the "greater than" and "equal" +// comparison that are performed on each lane of maximum vectors. The upper bits +// are used to compute results of the operation to determine which of the pairs +// of quad words representing the 128 bits elements are the maximums. +#define vpmaxu128mask(M) MOVB $0b10101010, M + +// func maxBE128(data [][16]byte) []byte +TEXT ·maxBE128(SB), NOSPLIT, $-48 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + CMPQ CX, $0 + JE null + + SHLQ $4, CX + MOVQ CX, DX // len + MOVQ AX, BX // max + ADDQ AX, CX // end + + CMPQ DX, $256 + JB loop + + CMPB ·hasAVX512MinMaxBE128(SB), $0 + JE loop + + // Z19 holds a vector of the count by which we increment the vectors of + // swap at each loop iteration. + MOVQ $16, DI + VPBROADCASTQ DI, Z19 + + // Z31 holds the shuffle mask used to convert 128 bits elements from big to + // little endian so we can apply vectorized comparison instructions. + VMOVDQU64 bswap128(SB), Z31 + + // These vectors hold four lanes of maximum values found in the input. + VBROADCASTI64X2 (AX), Z0 + VPSHUFB Z31, Z0, Z0 + VMOVDQU64 Z0, Z5 + VMOVDQU64 Z0, Z10 + VMOVDQU64 Z0, Z15 + + // These vectors hold four lanes of swap of maximum values. + // + // We initialize them at zero because we broadcast the first value of the + // input in the vectors that track the maximums of each lane; in other + // words, we assume the maximum value is at the first offset and work our + // way up from there. + VPXORQ Z2, Z2, Z2 + VPXORQ Z7, Z7, Z7 + VPXORQ Z12, Z12, Z12 + VPXORQ Z17, Z17, Z17 + + // These vectors are used to compute the swap of maximum values held + // in [Z1, Z5, Z10, Z15]. Each vector holds a contiguous sequence of + // swap; for example, Z3 is initialized with [0, 1, 2, 3]. At each + // loop iteration, the swap are incremented by the number of elements + // consumed from the input (4x4=16). + VMOVDQU64 indexes128(SB), Z3 + VPXORQ Z8, Z8, Z8 + VPXORQ Z13, Z13, Z13 + VPXORQ Z18, Z18, Z18 + MOVQ $4, DI + VPBROADCASTQ DI, Z1 + VPADDQ Z1, Z3, Z8 + VPADDQ Z1, Z8, Z13 + VPADDQ Z1, Z13, Z18 + + // This bit mask is used to merge the results of the "less than" and "equal" + // comparison that we perform on each lane of maximum vectors. We use the + // upper bits to compute four results of the operation which determines + // which of the pair of quad words representing the 128 bits elements is the + // maximum. + vpmaxu128mask(DI) + SHRQ $8, DX + SHLQ $8, DX + ADDQ AX, DX +loop16: + // Compute 4x4 maximum values in vector registers, along with their swap + // in the input array. + VMOVDQU64 (AX), Z1 + VMOVDQU64 64(AX), Z6 + VMOVDQU64 128(AX), Z11 + VMOVDQU64 192(AX), Z16 + VPSHUFB Z31, Z1, Z1 + VPSHUFB Z31, Z6, Z6 + VPSHUFB Z31, Z11, Z11 + VPSHUFB Z31, Z16, Z16 + vpmaxu128(Z1, Z3, Z0, Z2, K1, K2, R8, R9, R10, DI) + vpmaxu128(Z6, Z8, Z5, Z7, K3, K4, R11, R12, R13, DI) + vpmaxu128(Z11, Z13, Z10, Z12, K1, K2, R8, R9, R10, DI) + vpmaxu128(Z16, Z18, Z15, Z17, K3, K4, R11, R12, R13, DI) + VPADDQ Z19, Z3, Z3 + VPADDQ Z19, Z8, Z8 + VPADDQ Z19, Z13, Z13 + VPADDQ Z19, Z18, Z18 + ADDQ $256, AX + CMPQ AX, DX + JB loop16 + + // After the loop completed, we need to merge the lanes that each contain + // 4 maximum values (so 16 total candidate at this stage). The results are + // reduced into 4 candidates in Z0, with their swap in Z2. + vpmaxu128(Z10, Z12, Z0, Z2, K1, K2, R8, R9, R10, DI) + vpmaxu128(Z15, Z17, Z5, Z7, K3, K4, R11, R12, R13, DI) + vpmaxu128(Z5, Z7, Z0, Z2, K1, K2, R8, R9, R10, DI) + + // Further reduce the results by swapping the upper and lower parts of the + // vector registers, and comparing them to determaxe which values are the + // smallest. We compare 2x2 values at this step, then 2x1 values at the next + // to find the index of the maximum. + VMOVDQU64 swap64+0(SB), Z1 + VMOVDQU64 swap64+0(SB), Z3 + VPERMI2Q Z0, Z0, Z1 + VPERMI2Q Z2, Z2, Z3 + vpmaxu128(Y1, Y3, Y0, Y2, K1, K2, R8, R9, R10, DI) + + VMOVDQU64 swap64+32(SB), Y1 + VMOVDQU64 swap64+32(SB), Y3 + VPERMI2Q Y0, Y0, Y1 + VPERMI2Q Y2, Y2, Y3 + vpmaxu128(X1, X3, X0, X2, K1, K2, R8, R9, R10, DI) + VZEROUPPER + + // Extract the index of the maximum value computed in the lower 64 bits of + // X2 and position the BX pointer at the index of the maximum value. + MOVQ X2, DX + SHLQ $4, DX + ADDQ DX, BX + CMPQ AX, CX + JE done + + // Unless the input was aligned on 256 bytes, we need to perform a few more + // iterations on the remaining elements. + // + // This loop is also taken if the CPU has no support for AVX-512. +loop: + MOVQ (AX), R8 + MOVQ (BX), R9 + BSWAPQ R8 + BSWAPQ R9 + CMPQ R8, R9 + JA more + JB next + MOVQ 8(AX), R8 + MOVQ 8(BX), R9 + BSWAPQ R8 + BSWAPQ R9 + CMPQ R8, R9 + JBE next +more: + MOVQ AX, BX +next: + ADDQ $16, AX + CMPQ AX, CX + JB loop +done: + MOVQ BX, ret_base+24(FP) + MOVQ $16, ret_len+32(FP) + MOVQ $16, ret_cap+40(FP) + RET +null: + XORQ BX, BX + MOVQ BX, ret_base+24(FP) + MOVQ BX, ret_len+32(FP) + MOVQ BX, ret_cap+40(FP) + RET + diff --git a/vendor/github.com/parquet-go/parquet-go/page_max_purego.go b/vendor/github.com/parquet-go/parquet-go/page_max_purego.go new file mode 100644 index 00000000000..0d23d12f683 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_max_purego.go @@ -0,0 +1,72 @@ +//go:build purego || !amd64 + +package parquet + +import ( + "encoding/binary" + "slices" +) + +func maxInt32(data []int32) int32 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxInt64(data []int64) int64 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxUint32(data []uint32) uint32 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxUint64(data []uint64) uint64 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxFloat32(data []float32) float32 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxFloat64(data []float64) float64 { + if len(data) == 0 { + return 0 + } + return slices.Max(data) +} + +func maxBE128(data [][16]byte) (min []byte) { + if len(data) > 0 { + m := binary.BigEndian.Uint64(data[0][:8]) + j := 0 + for i := 1; i < len(data); i++ { + x := binary.BigEndian.Uint64(data[i][:8]) + switch { + case x > m: + m, j = x, i + case x == m: + y := binary.BigEndian.Uint64(data[i][8:]) + n := binary.BigEndian.Uint64(data[j][8:]) + if y > n { + m, j = x, i + } + } + } + min = data[j][:] + } + return min +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_min.go b/vendor/github.com/parquet-go/parquet-go/page_min.go new file mode 100644 index 00000000000..2bc185ac285 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_min.go @@ -0,0 +1,23 @@ +package parquet + +import ( + "bytes" +) + +func minFixedLenByteArray(data []byte, size int) (min []byte) { + if len(data) > 0 { + min = data[:size] + + for i, j := size, 2*size; j <= len(data); { + item := data[i:j] + + if bytes.Compare(item, min) < 0 { + min = item + } + + i += size + j += size + } + } + return min +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_min_amd64.go b/vendor/github.com/parquet-go/parquet-go/page_min_amd64.go new file mode 100644 index 00000000000..c9fd654753c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_min_amd64.go @@ -0,0 +1,24 @@ +//go:build !purego + +package parquet + +//go:noescape +func minInt32(data []int32) int32 + +//go:noescape +func minInt64(data []int64) int64 + +//go:noescape +func minUint32(data []uint32) uint32 + +//go:noescape +func minUint64(data []uint64) uint64 + +//go:noescape +func minFloat32(data []float32) float32 + +//go:noescape +func minFloat64(data []float64) float64 + +//go:noescape +func minBE128(data [][16]byte) []byte diff --git a/vendor/github.com/parquet-go/parquet-go/page_min_amd64.s b/vendor/github.com/parquet-go/parquet-go/page_min_amd64.s new file mode 100644 index 00000000000..7bd2e3a8727 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_min_amd64.s @@ -0,0 +1,592 @@ +//go:build !purego + +#include "textflag.h" + +// func minInt32(data []int32) int32 +TEXT ·minInt32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMINSD Z1, Z0, Z0 + VPMINSD Z2, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMINSD Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMINSD X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMINSD X1, X0, X0 + VZEROUPPER + + MOVQ X0, DX + MOVL DX, BX + SHRQ $32, DX + CMPL DX, BX + CMOVLLT DX, BX + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, BX + CMOVLLT DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func minInt64(data []int64) int64 +TEXT ·minInt64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VPMINSQ Z1, Z2, Z5 + VPMINSQ Z3, Z4, Z6 + VPMINSQ Z5, Z6, Z1 + VPMINSQ Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMINSQ Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMINSQ X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMINSQ X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, BX + CMOVQLT DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// func minUint32(data []int32) int32 +TEXT ·minUint32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVLQZX (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTD (AX), Z0 +loop32: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VPMINUD Z1, Z0, Z0 + VPMINUD Z2, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMINUD Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMINUD X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMINUD X1, X0, X0 + VZEROUPPER + + MOVQ X0, DX + MOVL DX, BX + SHRQ $32, DX + CMPL DX, BX + CMOVLCS DX, BX + + CMPQ SI, CX + JE done +loop: + MOVLQZX (AX)(SI*4), DX + CMPL DX, BX + CMOVLCS DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func minUint64(data []uint64) uint64 +TEXT ·minUint64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORQ SI, SI + MOVQ (AX), BX + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VPMINUQ Z1, Z2, Z5 + VPMINUQ Z3, Z4, Z6 + VPMINUQ Z5, Z6, Z1 + VPMINUQ Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VPMINUQ Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VPMINUQ X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VPMINUQ X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + CMPQ DX, BX + CMOVQCS DX, BX + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// func minFloat32(data []float32) float32 +TEXT ·minFloat32(SB), NOSPLIT, $-28 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORPS X0, X0 + XORPS X1, X1 + XORQ SI, SI + MOVLQZX (AX), BX + MOVQ BX, X0 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $64 + JB loop + + MOVQ CX, DI + SHRQ $6, DI + SHLQ $6, DI + VPBROADCASTD (AX), Z0 +loop64: + VMOVDQU32 (AX)(SI*4), Z1 + VMOVDQU32 64(AX)(SI*4), Z2 + VMOVDQU32 128(AX)(SI*4), Z3 + VMOVDQU32 192(AX)(SI*4), Z4 + VMINPS Z1, Z2, Z5 + VMINPS Z3, Z4, Z6 + VMINPS Z5, Z6, Z1 + VMINPS Z1, Z0, Z0 + ADDQ $64, SI + CMPQ SI, DI + JNE loop64 + + VMOVDQU32 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VMINPS Y1, Y0, Y0 + + VMOVDQU32 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VMINPS X1, X0, X0 + + VMOVDQU32 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VMINPS X1, X0, X0 + VZEROUPPER + + MOVAPS X0, X1 + PSRLQ $32, X1 + MOVQ X0, BX + MOVQ X1, DX + UCOMISS X0, X1 + CMOVLCS DX, BX + + CMPQ SI, CX + JE done + MOVQ BX, X0 +loop: + MOVLQZX (AX)(SI*4), DX + MOVQ DX, X1 + UCOMISS X0, X1 + CMOVLCS DX, BX + MOVQ BX, X0 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVL BX, ret+24(FP) + RET + +// func minFloat64(data []float64) float64 +TEXT ·minFloat64(SB), NOSPLIT, $-32 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + XORQ BX, BX + + CMPQ CX, $0 + JE done + XORPD X0, X0 + XORPD X1, X1 + XORQ SI, SI + MOVQ (AX), BX + MOVQ BX, X0 + + CMPB ·hasAVX512VL(SB), $0 + JE loop + + CMPQ CX, $32 + JB loop + + MOVQ CX, DI + SHRQ $5, DI + SHLQ $5, DI + VPBROADCASTQ (AX), Z0 +loop32: + VMOVDQU64 (AX)(SI*8), Z1 + VMOVDQU64 64(AX)(SI*8), Z2 + VMOVDQU64 128(AX)(SI*8), Z3 + VMOVDQU64 192(AX)(SI*8), Z4 + VMINPD Z1, Z2, Z5 + VMINPD Z3, Z4, Z6 + VMINPD Z5, Z6, Z1 + VMINPD Z1, Z0, Z0 + ADDQ $32, SI + CMPQ SI, DI + JNE loop32 + + VMOVDQU64 swap32+0(SB), Z1 + VPERMI2D Z0, Z0, Z1 + VMINPD Y1, Y0, Y0 + + VMOVDQU64 swap32+32(SB), Y1 + VPERMI2D Y0, Y0, Y1 + VMINPD X1, X0, X0 + + VMOVDQU64 swap32+48(SB), X1 + VPERMI2D X0, X0, X1 + VMINPD X1, X0, X0 + VZEROUPPER + + MOVQ X0, BX + CMPQ SI, CX + JE done +loop: + MOVQ (AX)(SI*8), DX + MOVQ DX, X1 + UCOMISD X0, X1 + CMOVQCS DX, BX + MOVQ BX, X0 + INCQ SI + CMPQ SI, CX + JNE loop +done: + MOVQ BX, ret+24(FP) + RET + +// vpminu128 is a macro comparing unsigned 128 bits values held in the +// `srcValues` and `minValues` vectors. The `srcIndexes` and `minIndexes` +// vectors contain the indexes of elements in the value vectors. Remaining +// K and R arguments are mask and general purpose registers needed to hold +// temporary values during the computation. The last M argument is a mask +// generated by vpminu128mask. +// +// The routine uses AVX-512 instructions (VPCMPUQ, VPBLENDMQ) to implement +// the comparison of 128 bits values. The values are expected to be stored +// in the vectors as a little-endian pair of two consecutive quad words. +// +// The results are written to the `minValues` and `minIndexes` vectors, +// overwriting the inputs. `srcValues` and `srcIndexes` are read-only +// parameters. +// +// At a high level, for two pairs of quad words forming two 128 bits values +// A and B, the test implemented by this macro is: +// +// A[1] < B[1] || (A[1] == B[1] && A[0] < B[0]) +// +// Values in the source vector that evalute to true on this expression are +// written to the vector of minimum values, and their indexes are written to +// the vector of indexes. +#define vpminu128(srcValues, srcIndexes, minValues, minIndexes, K1, K2, R1, R2, R3, M) \ + VPCMPUQ $0, minValues, srcValues, K1 \ + VPCMPUQ $1, minValues, srcValues, K2 \ + KMOVB K1, R1 \ + KMOVB K2, R2 \ + MOVB R2, R3 \ + SHLB $1, R3 \ + ANDB R3, R1 \ + ORB R2, R1 \ + ANDB M, R1 \ + MOVB R1, R2 \ + SHRB $1, R2 \ + ORB R2, R1 \ + KMOVB R1, K1 \ + VPBLENDMQ srcValues, minValues, K1, minValues \ + VPBLENDMQ srcIndexes, minIndexes, K1, minIndexes + +// vpminu128mask is a macro used to initialize the mask passed as last argument +// to vpminu128. The argument M is intended to be a general purpose register. +// +// The bit mask is used to merge the results of the "less than" and "equal" +// comparison that are performed on each lane of minimum vectors. The upper bits +// are used to compute results of the operation to determines which of the pairs +// of quad words representing the 128 bits elements are the minimums. +#define vpminu128mask(M) MOVB $0b10101010, M + +// func minBE128(data [][16]byte) []byte +TEXT ·minBE128(SB), NOSPLIT, $-48 + MOVQ data_base+0(FP), AX + MOVQ data_len+8(FP), CX + CMPQ CX, $0 + JE null + + SHLQ $4, CX + MOVQ CX, DX // len + MOVQ AX, BX // min + ADDQ AX, CX // end + + CMPQ DX, $256 + JB loop + + CMPB ·hasAVX512MinMaxBE128(SB), $0 + JE loop + + // Z19 holds a vector of the count by which we increment the vectors of + // swap at each loop iteration. + MOVQ $16, DI + VPBROADCASTQ DI, Z19 + + // Z31 holds the shuffle mask used to convert 128 bits elements from big to + // little endian so we can apply vectorized comparison instructions. + VMOVDQU64 bswap128(SB), Z31 + + // These vectors hold four lanes of minimum values found in the input. + VBROADCASTI64X2 (AX), Z0 + VPSHUFB Z31, Z0, Z0 + VMOVDQU64 Z0, Z5 + VMOVDQU64 Z0, Z10 + VMOVDQU64 Z0, Z15 + + // These vectors hold four lanes of swap of minimum values. + // + // We initialize them at zero because we broadcast the first value of the + // input in the vectors that track the minimums of each lane; in other + // words, we assume the minimum value is at the first offset and work our + // way up from there. + VPXORQ Z2, Z2, Z2 + VPXORQ Z7, Z7, Z7 + VPXORQ Z12, Z12, Z12 + VPXORQ Z17, Z17, Z17 + + // These vectors are used to compute the swap of minimum values held + // in [Z1, Z5, Z10, Z15]. Each vector holds a contiguous sequence of + // swap; for example, Z3 is initialized with [0, 1, 2, 3]. At each + // loop iteration, the swap are incremented by the number of elements + // consumed from the input (4x4=16). + VMOVDQU64 indexes128(SB), Z3 + VPXORQ Z8, Z8, Z8 + VPXORQ Z13, Z13, Z13 + VPXORQ Z18, Z18, Z18 + MOVQ $4, DI + VPBROADCASTQ DI, Z1 + VPADDQ Z1, Z3, Z8 + VPADDQ Z1, Z8, Z13 + VPADDQ Z1, Z13, Z18 + + vpminu128mask(DI) + SHRQ $8, DX + SHLQ $8, DX + ADDQ AX, DX +loop16: + // Compute 4x4 minimum values in vector registers, along with their swap + // in the input array. + VMOVDQU64 (AX), Z1 + VMOVDQU64 64(AX), Z6 + VMOVDQU64 128(AX), Z11 + VMOVDQU64 192(AX), Z16 + VPSHUFB Z31, Z1, Z1 + VPSHUFB Z31, Z6, Z6 + VPSHUFB Z31, Z11, Z11 + VPSHUFB Z31, Z16, Z16 + vpminu128(Z1, Z3, Z0, Z2, K1, K2, R8, R9, R10, DI) + vpminu128(Z6, Z8, Z5, Z7, K3, K4, R11, R12, R13, DI) + vpminu128(Z11, Z13, Z10, Z12, K1, K2, R8, R9, R10, DI) + vpminu128(Z16, Z18, Z15, Z17, K3, K4, R11, R12, R13, DI) + VPADDQ Z19, Z3, Z3 + VPADDQ Z19, Z8, Z8 + VPADDQ Z19, Z13, Z13 + VPADDQ Z19, Z18, Z18 + ADDQ $256, AX + CMPQ AX, DX + JB loop16 + + // After the loop completed, we need to merge the lanes that each contain + // 4 minimum values (so 16 total candidate at this stage). The results are + // reduced into 4 candidates in Z0, with their swap in Z2. + vpminu128(Z10, Z12, Z0, Z2, K1, K2, R8, R9, R10, DI) + vpminu128(Z15, Z17, Z5, Z7, K3, K4, R11, R12, R13, DI) + vpminu128(Z5, Z7, Z0, Z2, K1, K2, R8, R9, R10, DI) + + // Further reduce the results by swapping the upper and lower parts of the + // vector registers, and comparing them to determine which values are the + // smallest. We compare 2x2 values at this step, then 2x1 values at the next + // to find the index of the minimum. + VMOVDQU64 swap64+0(SB), Z1 + VMOVDQU64 swap64+0(SB), Z3 + VPERMI2Q Z0, Z0, Z1 + VPERMI2Q Z2, Z2, Z3 + vpminu128(Y1, Y3, Y0, Y2, K1, K2, R8, R9, R10, DI) + + VMOVDQU64 swap64+32(SB), Y1 + VMOVDQU64 swap64+32(SB), Y3 + VPERMI2Q Y0, Y0, Y1 + VPERMI2Q Y2, Y2, Y3 + vpminu128(X1, X3, X0, X2, K1, K2, R8, R9, R10, DI) + VZEROUPPER + + // Extract the index of the minimum value computed in the lower 64 bits of + // X2 and position the BX pointer at the index of the minimum value. + MOVQ X2, DX + SHLQ $4, DX + ADDQ DX, BX + CMPQ AX, CX + JE done + + // Unless the input was aligned on 256 bytes, we need to perform a few more + // iterations on the remaining elements. + // + // This loop is also taken if the CPU has no support for AVX-512. +loop: + MOVQ (AX), R8 + MOVQ (BX), R9 + BSWAPQ R8 + BSWAPQ R9 + CMPQ R8, R9 + JB less + JA next + MOVQ 8(AX), R8 + MOVQ 8(BX), R9 + BSWAPQ R8 + BSWAPQ R9 + CMPQ R8, R9 + JAE next +less: + MOVQ AX, BX +next: + ADDQ $16, AX + CMPQ AX, CX + JB loop +done: + MOVQ BX, ret_base+24(FP) + MOVQ $16, ret_len+32(FP) + MOVQ $16, ret_cap+40(FP) + RET +null: + XORQ BX, BX + MOVQ BX, ret_base+24(FP) + MOVQ BX, ret_len+32(FP) + MOVQ BX, ret_cap+40(FP) + RET diff --git a/vendor/github.com/parquet-go/parquet-go/page_min_purego.go b/vendor/github.com/parquet-go/parquet-go/page_min_purego.go new file mode 100644 index 00000000000..e4fad096ff9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_min_purego.go @@ -0,0 +1,72 @@ +//go:build purego || !amd64 + +package parquet + +import ( + "encoding/binary" + "slices" +) + +func minInt32(data []int32) int32 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minInt64(data []int64) int64 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minUint32(data []uint32) uint32 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minUint64(data []uint64) uint64 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minFloat32(data []float32) float32 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minFloat64(data []float64) float64 { + if len(data) == 0 { + return 0 + } + return slices.Min(data) +} + +func minBE128(data [][16]byte) (min []byte) { + if len(data) > 0 { + m := binary.BigEndian.Uint64(data[0][:8]) + j := 0 + for i := 1; i < len(data); i++ { + x := binary.BigEndian.Uint64(data[i][:8]) + switch { + case x < m: + m, j = x, i + case x == m: + y := binary.BigEndian.Uint64(data[i][8:]) + n := binary.BigEndian.Uint64(data[j][8:]) + if y < n { + m, j = x, i + } + } + } + min = data[j][:] + } + return min +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_null.go b/vendor/github.com/parquet-go/parquet-go/page_null.go new file mode 100644 index 00000000000..ff554b945f0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_null.go @@ -0,0 +1,57 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/encoding" +) + +type nullPage struct { + typ Type + column int + count int +} + +func newNullPage(typ Type, columnIndex int16, numValues int32) *nullPage { + return &nullPage{ + typ: typ, + column: int(columnIndex), + count: int(numValues), + } +} + +func (page *nullPage) Type() Type { return page.typ } +func (page *nullPage) Column() int { return page.column } +func (page *nullPage) Dictionary() Dictionary { return nil } +func (page *nullPage) NumRows() int64 { return int64(page.count) } +func (page *nullPage) NumValues() int64 { return int64(page.count) } +func (page *nullPage) NumNulls() int64 { return int64(page.count) } +func (page *nullPage) Bounds() (min, max Value, ok bool) { return } +func (page *nullPage) Size() int64 { return 1 } +func (page *nullPage) Values() ValueReader { + return &nullPageValues{column: page.column, remain: page.count} +} +func (page *nullPage) Slice(i, j int64) Page { + return &nullPage{column: page.column, count: page.count - int(j-i)} +} +func (page *nullPage) RepetitionLevels() []byte { return nil } +func (page *nullPage) DefinitionLevels() []byte { return nil } +func (page *nullPage) Data() encoding.Values { return encoding.Values{} } + +type nullPageValues struct { + column int + remain int +} + +func (r *nullPageValues) ReadValues(values []Value) (n int, err error) { + columnIndex := ^int16(r.column) + values = values[:min(r.remain, len(values))] + for i := range values { + values[i] = Value{columnIndex: columnIndex} + } + r.remain -= len(values) + if r.remain == 0 { + err = io.EOF + } + return len(values), err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_optional.go b/vendor/github.com/parquet-go/parquet-go/page_optional.go new file mode 100644 index 00000000000..9c51d1978ca --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_optional.go @@ -0,0 +1,112 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/encoding" +) + +type optionalPage struct { + base Page + maxDefinitionLevel byte + definitionLevels []byte +} + +func newOptionalPage(base Page, maxDefinitionLevel byte, definitionLevels []byte) *optionalPage { + return &optionalPage{ + base: base, + maxDefinitionLevel: maxDefinitionLevel, + definitionLevels: definitionLevels, + } +} + +func (page *optionalPage) Type() Type { return page.base.Type() } + +func (page *optionalPage) Column() int { return page.base.Column() } + +func (page *optionalPage) Dictionary() Dictionary { return page.base.Dictionary() } + +func (page *optionalPage) NumRows() int64 { return int64(len(page.definitionLevels)) } + +func (page *optionalPage) NumValues() int64 { return int64(len(page.definitionLevels)) } + +func (page *optionalPage) NumNulls() int64 { + return int64(countLevelsNotEqual(page.definitionLevels, page.maxDefinitionLevel)) +} + +func (page *optionalPage) Bounds() (min, max Value, ok bool) { return page.base.Bounds() } + +func (page *optionalPage) Size() int64 { return int64(len(page.definitionLevels)) + page.base.Size() } + +func (page *optionalPage) RepetitionLevels() []byte { return nil } + +func (page *optionalPage) DefinitionLevels() []byte { return page.definitionLevels } + +func (page *optionalPage) Data() encoding.Values { return page.base.Data() } + +func (page *optionalPage) Values() ValueReader { + return &optionalPageValues{ + page: page, + values: page.base.Values(), + } +} + +func (page *optionalPage) Slice(i, j int64) Page { + maxDefinitionLevel := page.maxDefinitionLevel + definitionLevels := page.definitionLevels + numNulls1 := int64(countLevelsNotEqual(definitionLevels[:i], maxDefinitionLevel)) + numNulls2 := int64(countLevelsNotEqual(definitionLevels[i:j], maxDefinitionLevel)) + return newOptionalPage( + page.base.Slice(i-numNulls1, j-(numNulls1+numNulls2)), + maxDefinitionLevel, + definitionLevels[i:j:j], + ) +} + +type optionalPageValues struct { + page *optionalPage + values ValueReader + offset int +} + +func (r *optionalPageValues) ReadValues(values []Value) (n int, err error) { + maxDefinitionLevel := r.page.maxDefinitionLevel + definitionLevels := r.page.definitionLevels + columnIndex := ^int16(r.page.Column()) + + for n < len(values) && r.offset < len(definitionLevels) { + for n < len(values) && r.offset < len(definitionLevels) && definitionLevels[r.offset] != maxDefinitionLevel { + values[n] = Value{ + definitionLevel: definitionLevels[r.offset], + columnIndex: columnIndex, + } + r.offset++ + n++ + } + + i := n + j := r.offset + for i < len(values) && j < len(definitionLevels) && definitionLevels[j] == maxDefinitionLevel { + i++ + j++ + } + + if n < i { + for j, err = r.values.ReadValues(values[n:i]); j > 0; j-- { + values[n].definitionLevel = maxDefinitionLevel + r.offset++ + n++ + } + // Do not return on an io.EOF here as we may still have null values to read. + if err != nil && err != io.EOF { + return n, err + } + err = nil + } + } + + if r.offset == len(definitionLevels) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_repeated.go b/vendor/github.com/parquet-go/parquet-go/page_repeated.go new file mode 100644 index 00000000000..2d850a66e2a --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_repeated.go @@ -0,0 +1,172 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/parquet-go/encoding" +) + +type repeatedPage struct { + base Page + maxRepetitionLevel byte + maxDefinitionLevel byte + definitionLevels []byte + repetitionLevels []byte +} + +func newRepeatedPage(base Page, maxRepetitionLevel, maxDefinitionLevel byte, repetitionLevels, definitionLevels []byte) *repeatedPage { + return &repeatedPage{ + base: base, + maxRepetitionLevel: maxRepetitionLevel, + maxDefinitionLevel: maxDefinitionLevel, + definitionLevels: definitionLevels, + repetitionLevels: repetitionLevels, + } +} + +func (page *repeatedPage) Type() Type { return page.base.Type() } + +func (page *repeatedPage) Column() int { return page.base.Column() } + +func (page *repeatedPage) Dictionary() Dictionary { return page.base.Dictionary() } + +func (page *repeatedPage) NumRows() int64 { return int64(countLevelsEqual(page.repetitionLevels, 0)) } + +func (page *repeatedPage) NumValues() int64 { return int64(len(page.definitionLevels)) } + +func (page *repeatedPage) NumNulls() int64 { + return int64(countLevelsNotEqual(page.definitionLevels, page.maxDefinitionLevel)) +} + +func (page *repeatedPage) Bounds() (min, max Value, ok bool) { return page.base.Bounds() } + +func (page *repeatedPage) Size() int64 { + return int64(len(page.repetitionLevels)) + int64(len(page.definitionLevels)) + page.base.Size() +} + +func (page *repeatedPage) RepetitionLevels() []byte { return page.repetitionLevels } + +func (page *repeatedPage) DefinitionLevels() []byte { return page.definitionLevels } + +func (page *repeatedPage) Data() encoding.Values { return page.base.Data() } + +func (page *repeatedPage) Values() ValueReader { + return &repeatedPageValues{ + page: page, + values: page.base.Values(), + } +} + +func (page *repeatedPage) Slice(i, j int64) Page { + numRows := page.NumRows() + if i < 0 || i > numRows { + panic(errPageBoundsOutOfRange(i, j, numRows)) + } + if j < 0 || j > numRows { + panic(errPageBoundsOutOfRange(i, j, numRows)) + } + if i > j { + panic(errPageBoundsOutOfRange(i, j, numRows)) + } + + maxRepetitionLevel := page.maxRepetitionLevel + maxDefinitionLevel := page.maxDefinitionLevel + repetitionLevels := page.repetitionLevels + definitionLevels := page.definitionLevels + + rowIndex0 := 0 + rowIndex1 := len(repetitionLevels) + rowIndex2 := len(repetitionLevels) + + for k, def := range repetitionLevels { + if def == 0 { + if rowIndex0 == int(i) { + rowIndex1 = k + break + } + rowIndex0++ + } + } + + for k, def := range repetitionLevels[rowIndex1:] { + if def == 0 { + if rowIndex0 == int(j) { + rowIndex2 = rowIndex1 + k + break + } + rowIndex0++ + } + } + + numNulls1 := countLevelsNotEqual(definitionLevels[:rowIndex1], maxDefinitionLevel) + numNulls2 := countLevelsNotEqual(definitionLevels[rowIndex1:rowIndex2], maxDefinitionLevel) + + i = int64(rowIndex1 - numNulls1) + j = int64(rowIndex2 - (numNulls1 + numNulls2)) + + return newRepeatedPage( + page.base.Slice(i, j), + maxRepetitionLevel, + maxDefinitionLevel, + repetitionLevels[rowIndex1:rowIndex2:rowIndex2], + definitionLevels[rowIndex1:rowIndex2:rowIndex2], + ) +} + +type repeatedPageValues struct { + page *repeatedPage + values ValueReader + offset int +} + +func (r *repeatedPageValues) ReadValues(values []Value) (n int, err error) { + maxDefinitionLevel := r.page.maxDefinitionLevel + definitionLevels := r.page.definitionLevels + repetitionLevels := r.page.repetitionLevels + columnIndex := ^int16(r.page.Column()) + + // While we haven't exceeded the output buffer and we haven't exceeded the page size. + for n < len(values) && r.offset < len(definitionLevels) { + + // While we haven't exceeded the output buffer and we haven't exceeded the + // page size AND the current element's definitionLevel is not the + // maxDefinitionLevel (this is a null value), Create the zero values to be + // returned in this run. + for n < len(values) && r.offset < len(definitionLevels) && definitionLevels[r.offset] != maxDefinitionLevel { + values[n] = Value{ + repetitionLevel: repetitionLevels[r.offset], + definitionLevel: definitionLevels[r.offset], + columnIndex: columnIndex, + } + r.offset++ + n++ + } + + i := n + j := r.offset + // Get the length of the run of non-zero values to be copied. + for i < len(values) && j < len(definitionLevels) && definitionLevels[j] == maxDefinitionLevel { + i++ + j++ + } + + // Copy all the non-zero values in this run. + if n < i { + for j, err = r.values.ReadValues(values[n:i]); j > 0; j-- { + values[n].repetitionLevel = repetitionLevels[r.offset] + values[n].definitionLevel = maxDefinitionLevel + r.offset++ + n++ + } + if err != nil && err != io.EOF { + return n, err + } + err = nil + } + } + + if r.offset == len(definitionLevels) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_uint32.go b/vendor/github.com/parquet-go/parquet-go/page_uint32.go new file mode 100644 index 00000000000..fda24f1c154 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_uint32.go @@ -0,0 +1,108 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type uint32Page struct { + typ Type + values memory.SliceBuffer[uint32] + columnIndex int16 +} + +func newUint32Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *uint32Page { + return &uint32Page{ + typ: typ, + values: memory.SliceBufferFrom(values.Uint32()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *uint32Page) Type() Type { return page.typ } + +func (page *uint32Page) Column() int { return int(^page.columnIndex) } + +func (page *uint32Page) Dictionary() Dictionary { return nil } + +func (page *uint32Page) NumRows() int64 { return int64(page.values.Len()) } + +func (page *uint32Page) NumValues() int64 { return int64(page.values.Len()) } + +func (page *uint32Page) NumNulls() int64 { return 0 } + +func (page *uint32Page) Size() int64 { return 4 * int64(page.values.Len()) } + +func (page *uint32Page) RepetitionLevels() []byte { return nil } + +func (page *uint32Page) DefinitionLevels() []byte { return nil } + +func (page *uint32Page) Data() encoding.Values { return encoding.Uint32Values(page.values.Slice()) } + +func (page *uint32Page) Values() ValueReader { return &uint32PageValues{page: page} } + +func (page *uint32Page) min() uint32 { return minUint32(page.values.Slice()) } + +func (page *uint32Page) max() uint32 { return maxUint32(page.values.Slice()) } + +func (page *uint32Page) bounds() (min, max uint32) { return boundsUint32(page.values.Slice()) } + +func (page *uint32Page) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minUint32, maxUint32 := page.bounds() + min = page.makeValue(minUint32) + max = page.makeValue(maxUint32) + } + return min, max, ok +} + +func (page *uint32Page) Slice(i, j int64) Page { + sliced := &uint32Page{ + typ: page.typ, + columnIndex: page.columnIndex, + } + sliced.values.Append(page.values.Slice()[i:j]...) + return sliced +} + +func (page *uint32Page) makeValue(v uint32) Value { + value := makeValueUint32(v) + value.columnIndex = page.columnIndex + return value +} + +type uint32PageValues struct { + page *uint32Page + offset int +} + +func (r *uint32PageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadUint32s(unsafecast.Slice[uint32](b)) + return 4 * n, err +} + +func (r *uint32PageValues) ReadUint32s(values []uint32) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *uint32PageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/page_uint64.go b/vendor/github.com/parquet-go/parquet-go/page_uint64.go new file mode 100644 index 00000000000..645dc58d606 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/page_uint64.go @@ -0,0 +1,108 @@ +package parquet + +import ( + "io" + + "github.com/parquet-go/bitpack/unsafecast" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +type uint64Page struct { + typ Type + values memory.SliceBuffer[uint64] + columnIndex int16 +} + +func newUint64Page(typ Type, columnIndex int16, numValues int32, values encoding.Values) *uint64Page { + return &uint64Page{ + typ: typ, + values: memory.SliceBufferFrom(values.Uint64()[:numValues]), + columnIndex: ^columnIndex, + } +} + +func (page *uint64Page) Type() Type { return page.typ } + +func (page *uint64Page) Column() int { return int(^page.columnIndex) } + +func (page *uint64Page) Dictionary() Dictionary { return nil } + +func (page *uint64Page) NumRows() int64 { return int64(page.values.Len()) } + +func (page *uint64Page) NumValues() int64 { return int64(page.values.Len()) } + +func (page *uint64Page) NumNulls() int64 { return 0 } + +func (page *uint64Page) Size() int64 { return 8 * int64(page.values.Len()) } + +func (page *uint64Page) RepetitionLevels() []byte { return nil } + +func (page *uint64Page) DefinitionLevels() []byte { return nil } + +func (page *uint64Page) Data() encoding.Values { return encoding.Uint64Values(page.values.Slice()) } + +func (page *uint64Page) Values() ValueReader { return &uint64PageValues{page: page} } + +func (page *uint64Page) min() uint64 { return minUint64(page.values.Slice()) } + +func (page *uint64Page) max() uint64 { return maxUint64(page.values.Slice()) } + +func (page *uint64Page) bounds() (min, max uint64) { return boundsUint64(page.values.Slice()) } + +func (page *uint64Page) Bounds() (min, max Value, ok bool) { + if ok = page.values.Len() > 0; ok { + minUint64, maxUint64 := page.bounds() + min = page.makeValue(minUint64) + max = page.makeValue(maxUint64) + } + return min, max, ok +} + +func (page *uint64Page) Slice(i, j int64) Page { + sliced := &uint64Page{ + typ: page.typ, + columnIndex: page.columnIndex, + } + sliced.values.Append(page.values.Slice()[i:j]...) + return sliced +} + +func (page *uint64Page) makeValue(v uint64) Value { + value := makeValueUint64(v) + value.columnIndex = page.columnIndex + return value +} + +type uint64PageValues struct { + page *uint64Page + offset int +} + +func (r *uint64PageValues) Read(b []byte) (n int, err error) { + n, err = r.ReadUint64s(unsafecast.Slice[uint64](b)) + return 8 * n, err +} + +func (r *uint64PageValues) ReadUint64s(values []uint64) (n int, err error) { + pageValues := r.page.values.Slice() + n = copy(values, pageValues[r.offset:]) + r.offset += n + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} + +func (r *uint64PageValues) ReadValues(values []Value) (n int, err error) { + pageValues := r.page.values.Slice() + for n < len(values) && r.offset < len(pageValues) { + values[n] = r.page.makeValue(pageValues[r.offset]) + r.offset++ + n++ + } + if r.offset == len(pageValues) { + err = io.EOF + } + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/parquet.go b/vendor/github.com/parquet-go/parquet-go/parquet.go new file mode 100644 index 00000000000..666b16eb18c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/parquet.go @@ -0,0 +1,122 @@ +// Copyright 2022 Twilio Inc. + +// Package parquet is a library for working with parquet files. For an overview +// of Parquet's qualities as a storage format, see this blog post: +// https://blog.twitter.com/engineering/en_us/a/2013/dremel-made-simple-with-parquet +// +// Or see the Parquet documentation: https://parquet.apache.org/docs/ +package parquet + +import ( + "io" + "os" + "reflect" +) + +// Read reads and returns rows from the parquet file in the given reader. +// +// The type T defines the type of rows read from r. T must be compatible with +// the file's schema or an error will be returned. The row type might represent +// a subset of the full schema, in which case only a subset of the columns will +// be loaded from r. +// +// This function is provided for convenience to facilitate reading of parquet +// files from arbitrary locations in cases where the data set fit in memory. +func Read[T any](r io.ReaderAt, size int64, options ...ReaderOption) (rows []T, err error) { + config, err := NewReaderConfig(options...) + if err != nil { + return nil, err + } + file, err := OpenFile(r, size) + if err != nil { + return nil, err + } + rows = make([]T, file.NumRows()) + reader := NewGenericReader[T](file, config) + n, err := reader.Read(rows) + if err == io.EOF { + err = nil + } + reader.Close() + return rows[:n], err +} + +// ReadFile reads rows of the parquet file at the given path. +// +// The type T defines the type of rows read from r. T must be compatible with +// the file's schema or an error will be returned. The row type might represent +// a subset of the full schema, in which case only a subset of the columns will +// be loaded from the file. +// +// This function is provided for convenience to facilitate reading of parquet +// files from the file system in cases where the data set fit in memory. +func ReadFile[T any](path string, options ...ReaderOption) (rows []T, err error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + s, err := f.Stat() + if err != nil { + return nil, err + } + return Read[T](f, s.Size()) +} + +// Write writes the given list of rows to a parquet file written to w. +// +// This function is provided for convenience to facilitate the creation of +// parquet files. +func Write[T any](w io.Writer, rows []T, options ...WriterOption) error { + config, err := NewWriterConfig(options...) + if err != nil { + return err + } + writer := NewGenericWriter[T](w, config) + if _, err := writer.Write(rows); err != nil { + return err + } + return writer.Close() +} + +// Write writes the given list of rows to a parquet file written to w. +// +// This function is provided for convenience to facilitate writing parquet +// files to the file system. +func WriteFile[T any](path string, rows []T, options ...WriterOption) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + return Write(f, rows, options...) +} + +func atLeastOne(size int) int { + return atLeast(size, 1) +} + +func atLeast(size, least int) int { + if size < least { + return least + } + return size +} + +func typeNameOf(t reflect.Type) string { + s1 := t.String() + s2 := t.Kind().String() + if s1 == s2 { + return s1 + } + return s1 + " (" + s2 + ")" +} + +func isZero(b []byte) bool { + for _, c := range b { + if c != 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/parquet-go/parquet-go/parquet_amd64.go b/vendor/github.com/parquet-go/parquet-go/parquet_amd64.go new file mode 100644 index 00000000000..64165571d3f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/parquet_amd64.go @@ -0,0 +1,18 @@ +//go:build !purego + +package parquet + +import "golang.org/x/sys/cpu" + +var ( + // This variable is used in x86 assembly source files to gate the use of + // AVX2 instructions depending on whether the CPU supports it. + hasAVX2 = cpu.X86.HasAVX2 + hasAVX512F = cpu.X86.HasAVX512F + hasAVX512VL = cpu.X86.HasAVX512F && cpu.X86.HasAVX512VL + // For min/max functions over big-endian 128 bits values, we need the + // follwing instructions from the DQ set: + // * VPBROADCASTQ (with 64 bits source register) + // * VBROADCASTI64X2 + hasAVX512MinMaxBE128 = cpu.X86.HasAVX512F && cpu.X86.HasAVX512DQ +) diff --git a/vendor/github.com/parquet-go/parquet-go/print.go b/vendor/github.com/parquet-go/parquet-go/print.go new file mode 100644 index 00000000000..0de9eeb83be --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/print.go @@ -0,0 +1,363 @@ +package parquet + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" + "text/tabwriter" +) + +func PrintSchema(w io.Writer, name string, node Node) error { + return PrintSchemaIndent(w, name, node, "\t", "\n") +} + +func PrintSchemaIndent(w io.Writer, name string, node Node, pattern, newline string) error { + pw := &printWriter{writer: w} + pi := &printIndent{} + + if node.Leaf() { + printSchemaWithIndent(pw, "", node, pi) + } else { + pw.WriteString("message ") + + if name == "" { + pw.WriteString("{") + } else { + pw.WriteString(name) + pw.WriteString(" {") + } + + pi.pattern = pattern + pi.newline = newline + pi.repeat = 1 + pi.writeNewLine(pw) + + for _, field := range node.Fields() { + printSchemaWithIndent(pw, field.Name(), field, pi) + pi.writeNewLine(pw) + } + + pw.WriteString("}") + } + + return pw.err +} + +func printSchemaWithIndent(w io.StringWriter, name string, node Node, indent *printIndent) { + indent.writeTo(w) + + switch { + case node.Optional(): + w.WriteString("optional ") + case node.Repeated(): + w.WriteString("repeated ") + default: + w.WriteString("required ") + } + + if node.Leaf() { + t := node.Type() + switch t.Kind() { + case Boolean: + w.WriteString("boolean") + case Int32: + w.WriteString("int32") + case Int64: + w.WriteString("int64") + case Int96: + w.WriteString("int96") + case Float: + w.WriteString("float") + case Double: + w.WriteString("double") + case ByteArray: + w.WriteString("binary") + case FixedLenByteArray: + w.WriteString("fixed_len_byte_array(") + w.WriteString(strconv.Itoa(t.Length())) + w.WriteString(")") + default: + w.WriteString("") + } + + if name != "" { + w.WriteString(" ") + w.WriteString(name) + } + + if annotation := annotationOf(node); annotation != "" { + w.WriteString(" (") + w.WriteString(annotation) + w.WriteString(")") + } + + if id := node.ID(); id != 0 { + w.WriteString(" = ") + w.WriteString(strconv.Itoa(id)) + } + + w.WriteString(";") + } else { + w.WriteString("group") + + if name != "" { + w.WriteString(" ") + w.WriteString(name) + } + + if annotation := annotationOf(node); annotation != "" { + w.WriteString(" (") + w.WriteString(annotation) + w.WriteString(")") + } + + if id := node.ID(); id != 0 { + w.WriteString(" = ") + w.WriteString(strconv.Itoa(id)) + } + + w.WriteString(" {") + indent.writeNewLine(w) + indent.push() + + for _, field := range node.Fields() { + printSchemaWithIndent(w, field.Name(), field, indent) + indent.writeNewLine(w) + } + + indent.pop() + indent.writeTo(w) + w.WriteString("}") + } +} + +func annotationOf(node Node) string { + if logicalType := node.Type().LogicalType(); logicalType != nil { + return logicalType.String() + } + return "" +} + +type printIndent struct { + pattern string + newline string + repeat int +} + +func (i *printIndent) push() { + i.repeat++ +} + +func (i *printIndent) pop() { + i.repeat-- +} + +func (i *printIndent) writeTo(w io.StringWriter) { + if i.pattern != "" { + for n := i.repeat; n > 0; n-- { + w.WriteString(i.pattern) + } + } +} + +func (i *printIndent) writeNewLine(w io.StringWriter) { + if i.newline != "" { + w.WriteString(i.newline) + } +} + +type printWriter struct { + writer io.Writer + err error +} + +func (w *printWriter) Write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + n, err := w.writer.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *printWriter) WriteString(s string) (int, error) { + if w.err != nil { + return 0, w.err + } + n, err := io.WriteString(w.writer, s) + if err != nil { + w.err = err + } + return n, err +} + +var ( + _ io.StringWriter = (*printWriter)(nil) +) + +func sprint(name string, node Node) string { + s := new(strings.Builder) + PrintSchema(s, name, node) + return s.String() +} + +func PrintRowGroup(w io.Writer, rowGroup RowGroup) error { + schema := rowGroup.Schema() + pw := &printWriter{writer: w} + tw := tabwriter.NewWriter(pw, 0, 0, 2, ' ', 0) + + columns := schema.Columns() + header := make([]string, len(columns)) + footer := make([]string, len(columns)) + + for i, column := range columns { + leaf, _ := schema.Lookup(column...) + columnType := leaf.Node.Type() + + header[i] = strings.Join(column, ".") + footer[i] = columnType.String() + } + + // Print header + for i, h := range header { + if i > 0 { + pw.WriteString("\t") + } + pw.WriteString(h) + } + pw.WriteString("\n") + + // Print separator line + for i := range header { + if i > 0 { + pw.WriteString("\t") + } + pw.WriteString(strings.Repeat("-", len(header[i]))) + } + pw.WriteString("\n") + + rowbuf := make([]Row, defaultRowBufferSize) + cells := make([]string, 0, len(columns)) + rows := rowGroup.Rows() + defer rows.Close() + + for { + n, err := rows.ReadRows(rowbuf) + + for _, row := range rowbuf[:n] { + cells = cells[:0] + + for _, value := range row { + columnIndex := value.Column() + + for len(cells) <= columnIndex { + cells = append(cells, "") + } + + if cells[columnIndex] == "" { + cells[columnIndex] = value.String() + } else { + cells[columnIndex] += "," + value.String() + } + } + + // Print row + for i, cell := range cells { + if i > 0 { + tw.Write([]byte("\t")) + } + tw.Write([]byte(cell)) + } + tw.Write([]byte("\n")) + } + + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + } + + // Print footer + for i := range header { + if i > 0 { + pw.WriteString("\t") + } + pw.WriteString(strings.Repeat("-", len(header[i]))) + } + pw.WriteString("\n") + for i, f := range footer { + if i > 0 { + pw.WriteString("\t") + } + pw.WriteString(f) + } + pw.WriteString("\n") + + tw.Flush() + fmt.Fprintf(pw, "%d rows\n\n", rowGroup.NumRows()) + return pw.err +} + +func PrintColumnChunk(w io.Writer, columnChunk ColumnChunk) error { + pw := &printWriter{writer: w} + pw.WriteString(columnChunk.Type().String()) + pw.WriteString("\n--------------------------------------------------------------------------------\n") + + values := [42]Value{} + pages := columnChunk.Pages() + numPages, numValues := int64(0), int64(0) + + defer pages.Close() + for { + p, err := pages.ReadPage() + if err != nil { + if !errors.Is(err, io.EOF) { + return err + } + break + } + + numPages++ + n := p.NumValues() + if n == 0 { + fmt.Fprintf(pw, "*** page %d, no values ***\n", numPages) + } else { + fmt.Fprintf(pw, "*** page %d, values %d to %d ***\n", numPages, numValues+1, numValues+n) + printPage(w, p, values[:], numValues+1) + numValues += n + } + + pw.WriteString("\n") + } + + return pw.err +} + +func PrintPage(w io.Writer, page Page) error { + return printPage(w, page, make([]Value, 42), 0) +} + +func printPage(w io.Writer, page Page, values []Value, numValues int64) error { + r := page.Values() + for { + n, err := r.ReadValues(values[:]) + for i, v := range values[:n] { + _, err := fmt.Fprintf(w, "value %d: %+v\n", numValues+int64(i), v) + if err != nil { + return err + } + } + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return err + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/reader.go b/vendor/github.com/parquet-go/parquet-go/reader.go new file mode 100644 index 00000000000..fd4dc31b40f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/reader.go @@ -0,0 +1,671 @@ +package parquet + +import ( + "errors" + "fmt" + "io" + "reflect" + + "github.com/parquet-go/parquet-go/format" +) + +// GenericReader is similar to a Reader but uses a type parameter to define the +// Go type representing the schema of rows being read. +// +// See GenericWriter for details about the benefits over the classic Reader API. +type GenericReader[T any] struct { + base Reader + read readFunc[T] +} + +// NewGenericReader is like NewReader but returns GenericReader[T] suited to write +// rows of Go type T. +// +// The type parameter T should be a map, struct, or any. Any other types will +// cause a panic at runtime. Type checking is a lot more effective when the +// generic parameter is a struct type, using map and interface types is somewhat +// similar to using a Writer. +// +// If the option list may explicitly declare a schema, it must be compatible +// with the schema generated from T. +func NewGenericReader[T any](input io.ReaderAt, options ...ReaderOption) *GenericReader[T] { + c, err := NewReaderConfig(options...) + if err != nil { + panic(err) + } + + f, err := openFile(input) + if err != nil { + panic(err) + } + + rowGroup := fileRowGroupOf(f) + + t := typeOf[T]() + if c.Schema == nil { + if t == nil { + c.Schema = rowGroup.Schema() + } else { + c.Schema = schemaOf(dereference(t), c.SchemaConfig.StructTags...) + } + } + + r := &GenericReader[T]{ + base: Reader{ + file: reader{ + file: f, + schema: c.Schema, + rowGroup: rowGroup, + }, + }, + } + + if !EqualNodes(c.Schema, f.schema) { + r.base.file.rowGroup = convertRowGroupTo(r.base.file.rowGroup, c.Schema) + } + + r.base.read.init(r.base.file.schema, r.base.file.rowGroup) + r.read = readFuncOf[T](t, r.base.file.schema) + return r +} + +func NewGenericRowGroupReader[T any](rowGroup RowGroup, options ...ReaderOption) *GenericReader[T] { + c, err := NewReaderConfig(options...) + if err != nil { + panic(err) + } + + t := typeOf[T]() + if c.Schema == nil { + if t == nil { + c.Schema = rowGroup.Schema() + } else { + c.Schema = schemaOf(dereference(t), c.SchemaConfig.StructTags...) + } + } + + r := &GenericReader[T]{ + base: Reader{ + file: reader{ + schema: c.Schema, + rowGroup: rowGroup, + }, + }, + } + + if !EqualNodes(c.Schema, rowGroup.Schema()) { + r.base.file.rowGroup = convertRowGroupTo(r.base.file.rowGroup, c.Schema) + } + + r.base.read.init(r.base.file.schema, r.base.file.rowGroup) + r.read = readFuncOf[T](t, r.base.file.schema) + return r +} + +func (r *GenericReader[T]) Reset() { + r.base.Reset() +} + +// Read reads the next rows from the reader into the given rows slice up to len(rows). +// +// The returned values are safe to reuse across Read calls and do not share +// memory with the reader's underlying page buffers. +// +// The method returns the number of rows read and io.EOF when no more rows +// can be read from the reader. +func (r *GenericReader[T]) Read(rows []T) (int, error) { + return r.read(r, rows) +} + +func (r *GenericReader[T]) ReadRows(rows []Row) (int, error) { + return r.base.ReadRows(rows) +} + +func (r *GenericReader[T]) Schema() *Schema { + return r.base.Schema() +} + +func (r *GenericReader[T]) NumRows() int64 { + return r.base.NumRows() +} + +func (r *GenericReader[T]) SeekToRow(rowIndex int64) error { + return r.base.SeekToRow(rowIndex) +} + +func (r *GenericReader[T]) Close() error { + return r.base.Close() +} + +// File returns a FileView of the underlying parquet file. +func (r *GenericReader[T]) File() FileView { + return r.base.File() +} + +// readRows reads the next rows from the reader into the given rows slice up to len(rows). +// +// The returned values are safe to reuse across readRows calls and do not share +// memory with the reader's underlying page buffers. +// +// The method returns the number of rows read and io.EOF when no more rows +// can be read from the reader. +func (r *GenericReader[T]) readRows(rows []T) (int, error) { + nRequest := len(rows) + if cap(r.base.rowbuf) < nRequest { + r.base.rowbuf = make([]Row, nRequest) + } else { + r.base.rowbuf = r.base.rowbuf[:nRequest] + } + + var n, nTotal int + var err error + for { + // ReadRows reads the minimum remaining rows in a column page across all columns + // of the underlying reader, unless the length of the slice passed to it is smaller. + // In that case, ReadRows will read the number of rows equal to the length of the + // given slice argument. We limit that length to never be more than requested + // because sequential reads can cross page boundaries. + n, err = r.base.ReadRows(r.base.rowbuf[:nRequest-nTotal]) + if n > 0 { + schema := r.base.Schema() + + for i, row := range r.base.rowbuf[:n] { + if err2 := schema.Reconstruct(&rows[nTotal+i], row); err2 != nil { + return nTotal + i, err2 + } + } + } + nTotal += n + if n == 0 || nTotal == nRequest || err != nil { + break + } + } + + return nTotal, err +} + +var ( + _ Rows = (*GenericReader[any])(nil) + _ RowReaderWithSchema = (*Reader)(nil) + + _ Rows = (*GenericReader[struct{}])(nil) + _ RowReaderWithSchema = (*GenericReader[struct{}])(nil) + + _ Rows = (*GenericReader[map[struct{}]struct{}])(nil) + _ RowReaderWithSchema = (*GenericReader[map[struct{}]struct{}])(nil) +) + +type readFunc[T any] func(*GenericReader[T], []T) (int, error) + +func readFuncOf[T any](t reflect.Type, schema *Schema) readFunc[T] { + if t == nil { + return (*GenericReader[T]).readRows + } + switch t.Kind() { + case reflect.Interface, reflect.Map: + return (*GenericReader[T]).readRows + + case reflect.Struct: + return (*GenericReader[T]).readRows + + case reflect.Pointer: + if e := t.Elem(); e.Kind() == reflect.Struct { + return (*GenericReader[T]).readRows + } + } + panic("cannot create reader for values of type " + t.String()) +} + +// Deprecated: A Reader reads Go values from parquet files. +// +// This example showcases a typical use of parquet readers: +// +// reader := parquet.NewReader(file) +// rows := []RowType{} +// for { +// row := RowType{} +// err := reader.Read(&row) +// if err != nil { +// if err == io.EOF { +// break +// } +// ... +// } +// rows = append(rows, row) +// } +// if err := reader.Close(); err != nil { +// ... +// } +// +// For programs building with Go 1.18 or later, the GenericReader[T] type +// supersedes this one. +type Reader struct { + seen reflect.Type + file reader + read reader + rowIndex int64 + rowbuf []Row +} + +// NewReader constructs a parquet reader reading rows from the given +// io.ReaderAt. +// +// In order to read parquet rows, the io.ReaderAt must be converted to a +// parquet.File. If r is already a parquet.File it is used directly; otherwise, +// the io.ReaderAt value is expected to either have a `Size() int64` method or +// implement io.Seeker in order to determine its size. +// +// The function panics if the reader configuration is invalid. Programs that +// cannot guarantee the validity of the options passed to NewReader should +// construct the reader configuration independently prior to calling this +// function: +// +// config, err := parquet.NewReaderConfig(options...) +// if err != nil { +// // handle the configuration error +// ... +// } else { +// // this call to create a reader is guaranteed not to panic +// reader := parquet.NewReader(input, config) +// ... +// } +func NewReader(input io.ReaderAt, options ...ReaderOption) *Reader { + c, err := NewReaderConfig(options...) + if err != nil { + panic(err) + } + + f, err := openFile(input) + if err != nil { + panic(err) + } + + r := &Reader{ + file: reader{ + file: f, + schema: f.schema, + rowGroup: fileRowGroupOf(f), + }, + } + + if c.Schema != nil { + r.file.schema = c.Schema + r.file.rowGroup = convertRowGroupTo(r.file.rowGroup, c.Schema) + } + + r.read.init(r.file.schema, r.file.rowGroup) + return r +} + +func openFile(input io.ReaderAt) (*File, error) { + f, _ := input.(*File) + if f != nil { + return f, nil + } + n, err := sizeOf(input) + if err != nil { + return nil, err + } + return OpenFile(input, n) +} + +func fileRowGroupOf(f *File) RowGroup { + switch rowGroups := f.RowGroups(); len(rowGroups) { + case 0: + return newEmptyRowGroup(f.Schema()) + case 1: + return rowGroups[0] + default: + // TODO: should we attempt to merge the row groups via MergeRowGroups + // to preserve the global order of sorting columns within the file? + return MultiRowGroup(rowGroups...) + } +} + +// NewRowGroupReader constructs a new Reader which reads rows from the RowGroup +// passed as argument. +func NewRowGroupReader(rowGroup RowGroup, options ...ReaderOption) *Reader { + c, err := NewReaderConfig(options...) + if err != nil { + panic(err) + } + + if c.Schema != nil { + rowGroup = convertRowGroupTo(rowGroup, c.Schema) + } + + r := &Reader{ + file: reader{ + schema: rowGroup.Schema(), + rowGroup: rowGroup, + }, + } + + r.read.init(r.file.schema, r.file.rowGroup) + return r +} + +func convertRowGroupTo(rowGroup RowGroup, schema *Schema) RowGroup { + if rowGroupSchema := rowGroup.Schema(); !EqualNodes(schema, rowGroupSchema) { + conv, err := Convert(schema, rowGroupSchema) + if err != nil { + // TODO: this looks like something we should not be panicking on, + // but the current NewReader API does not offer a mechanism to + // report errors. + panic(err) + } + rowGroup = ConvertRowGroup(rowGroup, conv) + } + return rowGroup +} + +func sizeOf(r io.ReaderAt) (int64, error) { + switch f := r.(type) { + case interface{ Size() int64 }: + return f.Size(), nil + case io.Seeker: + off, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + end, err := f.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + _, err = f.Seek(off, io.SeekStart) + return end, err + default: + return 0, fmt.Errorf("cannot determine length of %T", r) + } +} + +// Reset repositions the reader at the beginning of the underlying parquet file. +func (r *Reader) Reset() { + r.file.Reset() + r.read.Reset() + r.rowIndex = 0 + clearRows(r.rowbuf) +} + +// Read reads the next row from r. The type of the row must match the schema +// of the underlying parquet file or an error will be returned. +// +// The method returns io.EOF when no more rows can be read from r. +func (r *Reader) Read(row any) error { + if rowType := dereference(reflect.TypeOf(row)); rowType.Kind() == reflect.Struct { + if r.seen != rowType { + if err := r.updateReadSchema(rowType); err != nil { + return fmt.Errorf("cannot read parquet row into go value of type %T: %w", row, err) + } + } + } + + if err := r.read.SeekToRow(r.rowIndex); err != nil { + if errors.Is(err, io.ErrClosedPipe) { + return io.EOF + } + return fmt.Errorf("seeking reader to row %d: %w", r.rowIndex, err) + } + + if cap(r.rowbuf) == 0 { + r.rowbuf = make([]Row, 1) + } else { + r.rowbuf = r.rowbuf[:1] + } + + n, err := r.read.ReadRows(r.rowbuf[:]) + if n == 0 { + return err + } + + r.rowIndex++ + return r.read.schema.Reconstruct(row, r.rowbuf[0]) +} + +func (r *Reader) updateReadSchema(rowType reflect.Type) error { + schema := schemaOf(rowType) + + if EqualNodes(schema, r.file.schema) { + r.read.init(schema, r.file.rowGroup) + } else { + conv, err := Convert(schema, r.file.schema) + if err != nil { + return err + } + r.read.init(schema, ConvertRowGroup(r.file.rowGroup, conv)) + } + + r.seen = rowType + return nil +} + +// ReadRows reads the next rows from r into the given Row buffer. +// +// The returned values are laid out in the order expected by the +// parquet.(*Schema).Reconstruct method. +// +// The method returns io.EOF when no more rows can be read from r. +func (r *Reader) ReadRows(rows []Row) (int, error) { + if err := r.file.SeekToRow(r.rowIndex); err != nil { + return 0, err + } + n, err := r.file.ReadRows(rows) + r.rowIndex += int64(n) + return n, err +} + +// Schema returns the schema of rows read by r. +func (r *Reader) Schema() *Schema { return r.file.schema } + +// NumRows returns the number of rows that can be read from r. +func (r *Reader) NumRows() int64 { return r.file.rowGroup.NumRows() } + +// SeekToRow positions r at the given row index. +func (r *Reader) SeekToRow(rowIndex int64) error { + if err := r.file.SeekToRow(rowIndex); err != nil { + return err + } + r.rowIndex = rowIndex + return nil +} + +// Close closes the reader, preventing more rows from being read. +func (r *Reader) Close() error { + if err := r.read.Close(); err != nil { + return err + } + if err := r.file.Close(); err != nil { + return err + } + return nil +} + +// reader is a subtype used in the implementation of Reader to support the two +// use cases of either reading rows calling the ReadRow method (where full rows +// are read from the underlying parquet file), or calling the Read method to +// read rows into Go values, potentially doing partial reads on a subset of the +// columns due to using a converted row group view. +type reader struct { + file *File + schema *Schema + rowGroup RowGroup + rows Rows + rowIndex int64 +} + +func (r *reader) init(schema *Schema, rowGroup RowGroup) { + r.schema = schema + r.rowGroup = rowGroup + r.Reset() +} + +func (r *reader) Reset() { + r.rowIndex = 0 + + if rows, ok := r.rows.(interface{ Reset() }); ok { + // This optimization works for the common case where the underlying type + // of the Rows instance is rowGroupRows, which should be true in most + // cases since even external implementations of the RowGroup interface + // can construct values of this type via the NewRowGroupRowReader + // function. + // + // Foreign implementations of the Rows interface may also define a Reset + // method in order to participate in this optimization. + rows.Reset() + return + } + + if r.rows != nil { + r.rows.Close() + r.rows = nil + } +} + +func (r *reader) ReadRows(rows []Row) (int, error) { + if r.rowGroup == nil { + return 0, io.EOF + } + if r.rows == nil { + r.rows = r.rowGroup.Rows() + if r.rowIndex > 0 { + if err := r.rows.SeekToRow(r.rowIndex); err != nil { + return 0, err + } + } + } + n, err := r.rows.ReadRows(rows) + r.rowIndex += int64(n) + return n, err +} + +func (r *reader) SeekToRow(rowIndex int64) error { + if r.rowGroup == nil { + return io.ErrClosedPipe + } + if rowIndex != r.rowIndex { + if r.rows != nil { + if err := r.rows.SeekToRow(rowIndex); err != nil { + return err + } + } + r.rowIndex = rowIndex + } + return nil +} + +func (r *reader) Close() (err error) { + r.rowGroup = nil + if r.rows != nil { + err = r.rows.Close() + } + return err +} + +var ( + _ Rows = (*Reader)(nil) + _ RowReaderWithSchema = (*Reader)(nil) + + _ RowReader = (*reader)(nil) + _ RowSeeker = (*reader)(nil) +) + +type readerFileView struct { + reader *reader + schema *Schema +} + +// File returns a FileView of the parquet file being read. +// Only available if Reader was created with a File. +func (r *Reader) File() FileView { + if r.file.schema == nil || r.file.file == nil { + return nil + } + return &readerFileView{ + &r.file, + r.file.schema, + } +} + +func (r *readerFileView) Metadata() *format.FileMetaData { + if r.reader.file != nil { + return r.reader.file.Metadata() + } + return nil +} + +func (r *readerFileView) Schema() *Schema { + return r.schema +} + +func (r *readerFileView) NumRows() int64 { + return r.reader.rowGroup.NumRows() +} + +func (r *readerFileView) Lookup(key string) (string, bool) { + if meta := r.Metadata(); meta != nil { + return lookupKeyValueMetadata(meta.KeyValueMetadata, key) + } + return "", false +} + +func (r *readerFileView) Size() int64 { + if r.reader.file != nil { + return r.reader.file.Size() + } + return 0 +} + +func (r *readerFileView) ColumnIndexes() []format.ColumnIndex { + if r.reader.file != nil { + return r.reader.file.ColumnIndexes() + } + return nil +} + +func (r *readerFileView) OffsetIndexes() []format.OffsetIndex { + if r.reader.file != nil { + return r.reader.file.OffsetIndexes() + } + return nil +} + +func (r *readerFileView) Root() *Column { + if meta := r.Metadata(); meta != nil { + root, _ := openColumns(nil, meta, r.ColumnIndexes(), r.OffsetIndexes()) + return root + } + return nil +} + +func (r *readerFileView) RowGroups() []RowGroup { + file := r.reader.file + if file == nil { + return nil + } + columns := makeLeafColumns(r.Root()) + fileRowGroups := makeFileRowGroups(file, columns) + return makeRowGroups(fileRowGroups) +} + +func makeLeafColumns(root *Column) []*Column { + columns := make([]*Column, 0, numLeafColumnsOf(root)) + root.forEachLeaf(func(c *Column) { columns = append(columns, c) }) + return columns +} + +func makeFileRowGroups(file *File, columns []*Column) []FileRowGroup { + rowGroups := file.metadata.RowGroups + fileRowGroups := make([]FileRowGroup, len(rowGroups)) + for i := range fileRowGroups { + fileRowGroups[i].init(file, columns, &rowGroups[i]) + } + return fileRowGroups +} + +func makeRowGroups(fileRowGroups []FileRowGroup) []RowGroup { + rowGroups := make([]RowGroup, len(fileRowGroups)) + for i := range fileRowGroups { + rowGroups[i] = &fileRowGroups[i] + } + return rowGroups +} diff --git a/vendor/github.com/parquet-go/parquet-go/row.go b/vendor/github.com/parquet-go/parquet-go/row.go new file mode 100644 index 00000000000..a662d1fb12d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/row.go @@ -0,0 +1,903 @@ +package parquet + +import ( + "errors" + "fmt" + "io" + "reflect" +) + +const ( + defaultRowBufferSize = 42 +) + +// Row represents a parquet row as a slice of values. +// +// Each value should embed a column index, repetition level, and definition +// level allowing the program to determine how to reconstruct the original +// object from the row. +type Row []Value + +// MakeRow constructs a Row from a list of column values. +// +// The function panics if the column indexes of values in each column do not +// match their position in the argument list. +func MakeRow(columns ...[]Value) Row { return AppendRow(nil, columns...) } + +// AppendRow appends to row the given list of column values. +// +// AppendRow can be used to construct a Row value from columns, while retaining +// the underlying memory buffer to avoid reallocation; for example: +// +// The function panics if the column indexes of values in each column do not +// match their position in the argument list. +func AppendRow(row Row, columns ...[]Value) Row { + numValues := 0 + + for expectedColumnIndex, column := range columns { + numValues += len(column) + + for _, value := range column { + if value.columnIndex != ^int16(expectedColumnIndex) { + panic(fmt.Sprintf("value of column %d has column index %d", expectedColumnIndex, value.Column())) + } + } + } + + if capacity := cap(row) - len(row); capacity < numValues { + row = append(make(Row, 0, len(row)+numValues), row...) + } + + return appendRow(row, columns) +} + +func appendRow(row Row, columns [][]Value) Row { + for _, column := range columns { + row = append(row, column...) + } + return row +} + +// Clone creates a copy of the row which shares no pointers. +// +// This method is useful to capture rows after a call to RowReader.ReadRows when +// values need to be retained before the next call to ReadRows or after the lifespan +// of the reader. +func (row Row) Clone() Row { + clone := make(Row, len(row)) + for i := range row { + clone[i] = row[i].Clone() + } + return clone +} + +// Equal returns true if row and other contain the same sequence of values. +func (row Row) Equal(other Row) bool { + if len(row) != len(other) { + return false + } + for i := range row { + if !Equal(row[i], other[i]) { + return false + } + if row[i].repetitionLevel != other[i].repetitionLevel { + return false + } + if row[i].definitionLevel != other[i].definitionLevel { + return false + } + if row[i].columnIndex != other[i].columnIndex { + return false + } + } + return true +} + +// Range calls f for each column of row. +func (row Row) Range(f func(columnIndex int, columnValues []Value) bool) { + columnIndex := 0 + + for i := 0; i < len(row); { + j := i + 1 + + for j < len(row) && row[j].columnIndex == ^int16(columnIndex) { + j++ + } + + if !f(columnIndex, row[i:j:j]) { + break + } + + columnIndex++ + i = j + } +} + +// RowSeeker is an interface implemented by readers of parquet rows which can be +// positioned at a specific row index. +type RowSeeker interface { + // Positions the stream on the given row index. + // + // Some implementations of the interface may only allow seeking forward. + // + // The method returns io.ErrClosedPipe if the stream had already been closed. + SeekToRow(int64) error +} + +// RowReader reads a sequence of parquet rows. +type RowReader interface { + // ReadRows reads rows from the reader, returning the number of rows read + // into the buffer, and any error that occurred. + // + // When all rows have been read, the reader returns io.EOF to indicate the + // end of the sequence. It is valid for the reader to return both a non-zero + // number of rows and a non-nil error (including io.EOF). + // + // The buffer of rows passed as argument will be used to store values of + // each row read from the reader. If the rows are not nil, the backing array + // of the slices will be used as an optimization to avoid re-allocating new + // arrays. + // + // The application is expected to handle the case where ReadRows returns + // less rows than requested and no error, by looking at the first returned + // value from ReadRows, which is the number of rows that were read. + ReadRows([]Row) (int, error) +} + +// RowReaderFrom reads parquet rows from reader. +type RowReaderFrom interface { + ReadRowsFrom(RowReader) (int64, error) +} + +// RowReaderWithSchema is an extension of the RowReader interface which +// advertises the schema of rows returned by ReadRow calls. +type RowReaderWithSchema interface { + RowReader + Schema() *Schema +} + +// RowReadSeeker is an interface implemented by row readers which support +// seeking to arbitrary row positions. +type RowReadSeeker interface { + RowReader + RowSeeker +} + +// RowReadCloser is an interface implemented by row readers which require +// closing when done. +type RowReadCloser interface { + RowReader + io.Closer +} + +// RowReadSeekCloser is an interface implemented by row readers which support +// seeking to arbitrary row positions and required closing the reader when done. +type RowReadSeekCloser interface { + RowReader + RowSeeker + io.Closer +} + +// RowWriter writes parquet rows to an underlying medium. +type RowWriter interface { + // Writes rows to the writer, returning the number of rows written and any + // error that occurred. + // + // Because columnar operations operate on independent columns of values, + // writes of rows may not be atomic operations, and could result in some + // rows being partially written. The method returns the number of rows that + // were successfully written, but if an error occurs, values of the row(s) + // that failed to be written may have been partially committed to their + // columns. For that reason, applications should consider a write error as + // fatal and assume that they need to discard the state, they cannot retry + // the write nor recover the underlying file. + WriteRows([]Row) (int, error) +} + +// RowWriterTo writes parquet rows to a writer. +type RowWriterTo interface { + WriteRowsTo(RowWriter) (int64, error) +} + +// RowWriterWithSchema is an extension of the RowWriter interface which +// advertises the schema of rows expected to be passed to WriteRow calls. +type RowWriterWithSchema interface { + RowWriter + Schema() *Schema +} + +// RowReaderFunc is a function type implementing the RowReader interface. +type RowReaderFunc func([]Row) (int, error) + +func (f RowReaderFunc) ReadRows(rows []Row) (int, error) { return f(rows) } + +// RowWriterFunc is a function type implementing the RowWriter interface. +type RowWriterFunc func([]Row) (int, error) + +func (f RowWriterFunc) WriteRows(rows []Row) (int, error) { return f(rows) } + +// MultiRowWriter constructs a RowWriter which dispatches writes to all the +// writers passed as arguments. +// +// When writing rows, if any of the writers returns an error, the operation is +// aborted and the error returned. If one of the writers did not error, but did +// not write all the rows, the operation is aborted and io.ErrShortWrite is +// returned. +// +// Rows are written sequentially to each writer in the order they are given to +// this function. +func MultiRowWriter(writers ...RowWriter) RowWriter { + m := &multiRowWriter{writers: make([]RowWriter, len(writers))} + copy(m.writers, writers) + return m +} + +type multiRowWriter struct{ writers []RowWriter } + +func (m *multiRowWriter) WriteRows(rows []Row) (int, error) { + for _, w := range m.writers { + n, err := w.WriteRows(rows) + if err != nil { + return n, err + } + if n != len(rows) { + return n, io.ErrShortWrite + } + } + return len(rows), nil +} + +type forwardRowSeeker struct { + rows RowReader + seek int64 + index int64 +} + +func (r *forwardRowSeeker) ReadRows(rows []Row) (int, error) { + for { + n, err := r.rows.ReadRows(rows) + + if n > 0 && r.index < r.seek { + skip := r.seek - r.index + r.index += int64(n) + if skip >= int64(n) { + continue + } + + for i, j := 0, int(skip); j < n; i++ { + rows[i] = append(rows[i][:0], rows[j]...) + } + + n -= int(skip) + } + + return n, err + } +} + +func (r *forwardRowSeeker) SeekToRow(rowIndex int64) error { + if rowIndex >= r.index { + r.seek = rowIndex + return nil + } + return fmt.Errorf( + "SeekToRow: %T does not implement parquet.RowSeeker: cannot seek backward from row %d to %d", + r.rows, + r.index, + rowIndex, + ) +} + +// CopyRows copies rows from src to dst. +// +// The underlying types of src and dst are tested to determine if they expose +// information about the schema of rows that are read and expected to be +// written. If the schema information are available but do not match, the +// function will attempt to automatically convert the rows from the source +// schema to the destination. +// +// As an optimization, the src argument may implement RowWriterTo to bypass +// the default row copy logic and provide its own. The dst argument may also +// implement RowReaderFrom for the same purpose. +// +// The function returns the number of rows written, or any error encountered +// other than io.EOF. +func CopyRows(dst RowWriter, src RowReader) (int64, error) { + return copyRows(dst, src, nil) +} + +func copyRows(dst RowWriter, src RowReader, buf []Row) (written int64, err error) { + targetSchema := targetSchemaOf(dst) + sourceSchema := sourceSchemaOf(src) + + if targetSchema != nil && sourceSchema != nil { + if !EqualNodes(targetSchema, sourceSchema) { + conv, err := Convert(targetSchema, sourceSchema) + if err != nil { + return 0, err + } + // The conversion effectively disables a potential optimization + // if the source reader implemented RowWriterTo. It is a trade off + // we are making to optimize for safety rather than performance. + // + // Entering this code path should not be the common case tho, it is + // most often used when parquet schemas are evolving, but we expect + // that the majority of files of an application to be sharing a + // common schema. + src = ConvertRowReader(src, conv) + } + } + + if wt, ok := src.(RowWriterTo); ok { + return wt.WriteRowsTo(dst) + } + + if rf, ok := dst.(RowReaderFrom); ok { + return rf.ReadRowsFrom(src) + } + + if len(buf) == 0 { + buf = make([]Row, defaultRowBufferSize) + } + + defer clearRows(buf) + + for { + rn, err := src.ReadRows(buf) + + if rn > 0 { + wn, err := dst.WriteRows(buf[:rn]) + if err != nil { + return written, err + } + + written += int64(wn) + } + + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return written, err + } + + if rn == 0 { + return written, io.ErrNoProgress + } + } +} + +func makeRows(n int) []Row { + buf := make([]Value, n) + row := make([]Row, n) + for i := range row { + row[i] = buf[i : i : i+1] + } + return row +} + +func clearRows(rows []Row) { + for i, values := range rows { + clearValues(values) + rows[i] = values[:0] + } +} + +func sourceSchemaOf(r RowReader) *Schema { + if rrs, ok := r.(RowReaderWithSchema); ok { + return rrs.Schema() + } + return nil +} + +func targetSchemaOf(w RowWriter) *Schema { + if rws, ok := w.(RowWriterWithSchema); ok { + return rws.Schema() + } + return nil +} + +// ============================================================================= +// Functions returning closures are marked with "go:noinline" below to prevent +// losing naming information of the closure in stack traces. +// +// Because some of the functions are very short (simply return a closure), the +// compiler inlines when at their call site, which result in the closure being +// named something like parquet.deconstructFuncOf.func2 instead of the original +// parquet.deconstructFuncOfLeaf.func1; the latter being much more meaningful +// when reading CPU or memory profiles. +// ============================================================================= + +// deconstructFunc accepts a row, the current levels, the value to deserialize +// the current column onto, and returns the row minus the deserialied value(s) +// It recurses until it hits a leaf node, then deserializes that value +// individually as the base case. +type deconstructFunc func([][]Value, columnLevels, reflect.Value) + +func deconstructFuncOf(columnIndex int16, node Node) (int16, deconstructFunc) { + switch { + case node.Optional(): + return deconstructFuncOfOptional(columnIndex, node) + case node.Repeated(): + return deconstructFuncOfRepeated(columnIndex, node) + case isList(node): + return deconstructFuncOfList(columnIndex, node) + case isMap(node): + return deconstructFuncOfMap(columnIndex, node) + default: + return deconstructFuncOfRequired(columnIndex, node) + } +} + +//go:noinline +func deconstructFuncOfOptional(columnIndex int16, node Node) (int16, deconstructFunc) { + columnIndex, deconstruct := deconstructFuncOf(columnIndex, Required(node)) + return columnIndex, func(columns [][]Value, levels columnLevels, value reflect.Value) { + if value.IsValid() { + if value.IsZero() { + value = reflect.Value{} + } else { + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + levels.definitionLevel++ + } + } + deconstruct(columns, levels, value) + } +} + +//go:noinline +func deconstructFuncOfRepeated(columnIndex int16, node Node) (int16, deconstructFunc) { + columnIndex, deconstruct := deconstructFuncOf(columnIndex, Required(node)) + return columnIndex, func(columns [][]Value, levels columnLevels, value reflect.Value) { + if value.Kind() == reflect.Interface { + value = value.Elem() + } + + if !value.IsValid() || value.Len() == 0 { + deconstruct(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + for i, n := 0, value.Len(); i < n; i++ { + deconstruct(columns, levels, value.Index(i)) + levels.repetitionLevel = levels.repetitionDepth + } + } +} + +func deconstructFuncOfRequired(columnIndex int16, node Node) (int16, deconstructFunc) { + switch { + case node.Leaf(): + return deconstructFuncOfLeaf(columnIndex, node) + default: + return deconstructFuncOfGroup(columnIndex, node) + } +} + +func deconstructFuncOfList(columnIndex int16, node Node) (int16, deconstructFunc) { + return deconstructFuncOf(columnIndex, Repeated(listElementOf(node))) +} + +//go:noinline +func deconstructFuncOfMap(columnIndex int16, node Node) (int16, deconstructFunc) { + keyValue := mapKeyValueOf(node) + keyValueType := keyValue.GoType() + keyValueElem := keyValueType.Elem() + keyType := keyValueElem.Field(0).Type + valueType := keyValueElem.Field(1).Type + nextColumnIndex, deconstruct := deconstructFuncOf(columnIndex, schemaOf(keyValueElem)) + return nextColumnIndex, func(columns [][]Value, levels columnLevels, mapValue reflect.Value) { + if !mapValue.IsValid() || mapValue.Len() == 0 { + deconstruct(columns, levels, reflect.Value{}) + return + } + + levels.repetitionDepth++ + levels.definitionLevel++ + + elem := reflect.New(keyValueElem).Elem() + k := elem.Field(0) + v := elem.Field(1) + + for _, key := range mapValue.MapKeys() { + k.Set(key.Convert(keyType)) + v.Set(mapValue.MapIndex(key).Convert(valueType)) + deconstruct(columns, levels, elem) + levels.repetitionLevel = levels.repetitionDepth + } + } +} + +//go:noinline +func deconstructFuncOfGroup(columnIndex int16, node Node) (int16, deconstructFunc) { + fields := node.Fields() + funcs := make([]deconstructFunc, len(fields)) + for i, field := range fields { + columnIndex, funcs[i] = deconstructFuncOf(columnIndex, field) + } + return columnIndex, func(columns [][]Value, levels columnLevels, value reflect.Value) { + if value.IsValid() { + for i, f := range funcs { + f(columns, levels, fields[i].Value(value)) + } + } else { + for _, f := range funcs { + f(columns, levels, value) + } + } + } +} + +//go:noinline +func deconstructFuncOfLeaf(columnIndex int16, node Node) (int16, deconstructFunc) { + if columnIndex > MaxColumnIndex { + panic("row cannot be deconstructed because it has more than 127 columns") + } + typ := node.Type() + kind := typ.Kind() + lt := typ.LogicalType() + valueColumnIndex := ^columnIndex + return columnIndex + 1, func(columns [][]Value, levels columnLevels, value reflect.Value) { + v := Value{} + + if value.IsValid() { + v = makeValue(kind, lt, value) + } + + v.repetitionLevel = levels.repetitionLevel + v.definitionLevel = levels.definitionLevel + v.columnIndex = valueColumnIndex + + columns[columnIndex] = append(columns[columnIndex], v) + } +} + +// "reconstructX" turns a Go value into a Go representation of a Parquet series +// of values + +type reconstructFunc func(reflect.Value, columnLevels, [][]Value) error + +func reconstructFuncOf(columnIndex int16, node Node) (int16, reconstructFunc) { + switch { + case node.Optional(): + return reconstructFuncOfOptional(columnIndex, node) + case node.Repeated(): + return reconstructFuncOfRepeated(columnIndex, node) + case isList(node): + return reconstructFuncOfList(columnIndex, node) + case isMap(node): + return reconstructFuncOfMap(columnIndex, node) + default: + return reconstructFuncOfRequired(columnIndex, node) + } +} + +//go:noinline +func reconstructFuncOfOptional(columnIndex int16, node Node) (int16, reconstructFunc) { + // We convert the optional func to required so that we eventually reach the + // leaf base-case. We're still using the heuristics of optional in the + // returned closure (see levels.definitionLevel++), but we don't actually do + // deserialization here, that happens in the leaf function, hence this line. + nextColumnIndex, reconstruct := reconstructFuncOf(columnIndex, Required(node)) + + return nextColumnIndex, func(value reflect.Value, levels columnLevels, columns [][]Value) error { + levels.definitionLevel++ + + // For empty groups (no columns), we can't check definition levels. + // Treat them as always present (non-null). + if len(columns) > 0 && len(columns[0]) > 0 { + if columns[0][0].definitionLevel < levels.definitionLevel { + value.SetZero() + return nil + } + } + + if value.Kind() == reflect.Ptr { + if value.IsNil() { + value.Set(reflect.New(value.Type().Elem())) + } + value = value.Elem() + } + + return reconstruct(value, levels, columns) + } +} + +func setMakeSlice(v reflect.Value, n int) reflect.Value { + t := v.Type() + if t.Kind() == reflect.Interface { + t = reflect.TypeOf(([]any)(nil)) + } + s := reflect.MakeSlice(t, n, n) + v.Set(s) + return s +} + +func setNullSlice(v reflect.Value) reflect.Value { + t := v.Type() + if t.Kind() == reflect.Interface { + t = reflect.TypeOf(([]any)(nil)) + } + s := reflect.Zero(t) + v.Set(s) + return s +} + +//go:noinline +func reconstructFuncOfRepeated(columnIndex int16, node Node) (int16, reconstructFunc) { + nextColumnIndex, reconstruct := reconstructFuncOf(columnIndex, Required(node)) + return nextColumnIndex, func(value reflect.Value, levels columnLevels, columns [][]Value) error { + levels.repetitionDepth++ + levels.definitionLevel++ + + // Handle empty groups (no columns) + if len(columns) == 0 || len(columns[0]) == 0 { + setMakeSlice(value, 0) + return nil + } + + if columns[0][0].definitionLevel < levels.definitionLevel { + setMakeSlice(value, 0) + return nil + } + + values := make([][]Value, len(columns)) + column := columns[0] + n := 0 + + for i, column := range columns { + values[i] = column[0:0:len(column)] + } + + for i := 0; i < len(column); { + i++ + n++ + + for i < len(column) && column[i].repetitionLevel > levels.repetitionDepth { + i++ + } + } + + value = setMakeSlice(value, n) + + for i := range n { + for j, column := range values { + column = column[:cap(column)] + if len(column) == 0 { + continue + } + + k := 1 + for k < len(column) && column[k].repetitionLevel > levels.repetitionDepth { + k++ + } + + values[j] = column[:k] + } + + if err := reconstruct(value.Index(i), levels, values); err != nil { + return err + } + + for j, column := range values { + values[j] = column[len(column):len(column):cap(column)] + } + + levels.repetitionLevel = levels.repetitionDepth + } + + return nil + } +} + +func reconstructFuncOfRequired(columnIndex int16, node Node) (int16, reconstructFunc) { + switch { + case node.Leaf(): + return reconstructFuncOfLeaf(columnIndex, node) + default: + return reconstructFuncOfGroup(columnIndex, node) + } +} + +func reconstructFuncOfList(columnIndex int16, node Node) (int16, reconstructFunc) { + return reconstructFuncOf(columnIndex, Repeated(listElementOf(node))) +} + +//go:noinline +func reconstructFuncOfMap(columnIndex int16, node Node) (int16, reconstructFunc) { + keyValue := mapKeyValueOf(node) + keyValueType := keyValue.GoType() + keyValueElem := keyValueType.Elem() + nextColumnIndex, reconstruct := reconstructFuncOf(columnIndex, schemaOf(keyValueElem)) + + // Check if the value field of the map is a LIST type + valueNode := fieldByName(keyValue, "value") + valueIsList := valueNode != nil && isList(valueNode) + + return nextColumnIndex, func(value reflect.Value, levels columnLevels, columns [][]Value) error { + levels.repetitionDepth++ + levels.definitionLevel++ + + if columns[0][0].definitionLevel < levels.definitionLevel { + valueType := value.Type() + if valueType.Kind() == reflect.Interface { + value.Set(reflect.ValueOf(map[string]any{})) + } else { + value.Set(reflect.MakeMap(valueType)) + } + return nil + } + + values := make([][]Value, len(columns)) + column := columns[0] + t := value.Type() + if t.Kind() == reflect.Interface { + t = reflect.TypeOf((map[string]any)(nil)) + } + k := t.Key() + v := t.Elem() + n := 0 + + for i, column := range columns { + values[i] = column[0:0:len(column)] + } + + for i := 0; i < len(column); { + i++ + n++ + + for i < len(column) && column[i].repetitionLevel > levels.repetitionDepth { + i++ + } + } + + if value.IsNil() { + m := reflect.MakeMapWithSize(t, n) + value.Set(m) + value = m // track map instead of any for read[any]() + } + + elem := reflect.New(keyValueElem).Elem() + for range n { + for j, column := range values { + column = column[:cap(column)] + k := 1 + + for k < len(column) && column[k].repetitionLevel > levels.repetitionDepth { + k++ + } + + values[j] = column[:k] + } + + if err := reconstruct(elem, levels, values); err != nil { + return err + } + + for j, column := range values { + values[j] = column[len(column):len(column):cap(column)] + } + + mapKey := elem.Field(0).Convert(k) + mapValue := elem.Field(1) + + // If the value is a LIST type, we need to extract the elements from the + // wrapper struct. The wrapper struct has a "List" field containing + // []struct { Element T }, and we need to convert it to []T. + if valueIsList && v.Kind() == reflect.Slice { + mapValue = convertListWrapperToSlice(mapValue, v) + } else { + mapValue = mapValue.Convert(v) + } + + value.SetMapIndex(mapKey, mapValue) + elem.SetZero() + levels.repetitionLevel = levels.repetitionDepth + } + + return nil + } +} + +// convertListWrapperToSlice converts a LIST wrapper struct to a slice. +// The wrapper struct has the form: struct { List []struct { Element T } } +// and this function returns a reflect.Value of type []T containing the elements. +func convertListWrapperToSlice(wrapper reflect.Value, targetSliceType reflect.Type) reflect.Value { + // Get the "List" field from the wrapper struct + listField := wrapper.FieldByName("List") + if !listField.IsValid() { + // If there's no List field, try direct conversion as fallback + return wrapper.Convert(targetSliceType) + } + + // Create the target slice with the same length + n := listField.Len() + result := reflect.MakeSlice(targetSliceType, n, n) + elemType := targetSliceType.Elem() + + // Copy elements from the wrapper to the result slice + for i := range n { + listElem := listField.Index(i) + // Get the "Element" field from each list element struct + elementField := listElem.FieldByName("Element") + if elementField.IsValid() { + result.Index(i).Set(elementField.Convert(elemType)) + } + } + + return result +} + +//go:noinline +func reconstructFuncOfGroup(columnIndex int16, node Node) (int16, reconstructFunc) { + fields := node.Fields() + funcs := make([]reconstructFunc, len(fields)) + columnOffsets := make([]int16, len(fields)) + firstColumnIndex := columnIndex + + for i, field := range fields { + columnIndex, funcs[i] = reconstructFuncOf(columnIndex, field) + columnOffsets[i] = columnIndex - firstColumnIndex + } + + return columnIndex, func(value reflect.Value, levels columnLevels, columns [][]Value) error { + if value.Kind() == reflect.Interface { + value.Set(reflect.MakeMap(reflect.TypeOf((map[string]any)(nil)))) + value = value.Elem() + } + + if value.Kind() == reflect.Map { + elemType := value.Type().Elem() + name := reflect.New(reflect.TypeOf("")).Elem() + elem := reflect.New(elemType).Elem() + + if value.Len() > 0 { + value.Set(reflect.MakeMap(value.Type())) + } + + off := int16(0) + + for i, f := range funcs { + name.SetString(fields[i].Name()) + end := columnOffsets[i] + err := f(elem, levels, columns[off:end:end]) + if err != nil { + return fmt.Errorf("%s → %w", name, err) + } + off = end + value.SetMapIndex(name, elem) + elem.SetZero() + } + } else { + off := int16(0) + + for i, f := range funcs { + end := columnOffsets[i] + err := f(fields[i].Value(value), levels, columns[off:end:end]) + if err != nil { + return fmt.Errorf("%s → %w", fields[i].Name(), err) + } + off = end + } + } + + return nil + } +} + +//go:noinline +func reconstructFuncOfLeaf(columnIndex int16, node Node) (int16, reconstructFunc) { + typ := node.Type() + return columnIndex + 1, func(value reflect.Value, _ columnLevels, columns [][]Value) error { + column := columns[0] + if len(column) == 0 { + return fmt.Errorf("no values found in parquet row for column %d", columnIndex) + } + return typ.AssignValue(value, column[0]) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/row_buffer.go b/vendor/github.com/parquet-go/parquet-go/row_buffer.go new file mode 100644 index 00000000000..9f98e4d057b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/row_buffer.go @@ -0,0 +1,463 @@ +package parquet + +import ( + "io" + "sort" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" +) + +// RowBuffer is an implementation of the RowGroup interface which stores parquet +// rows in memory. +// +// Unlike GenericBuffer which uses a column layout to store values in memory +// buffers, RowBuffer uses a row layout. The use of row layout provides greater +// efficiency when sorting the buffer, which is the primary use case for the +// RowBuffer type. Applications which intend to sort rows prior to writing them +// to a parquet file will often see lower CPU utilization from using a RowBuffer +// than a GenericBuffer. +// +// RowBuffer values are not safe to use concurrently from multiple goroutines. +type RowBuffer[T any] struct { + alloc rowAllocator + schema *Schema + sorting []SortingColumn + rows []Row + values []Value + compare func(Row, Row) int +} + +// NewRowBuffer constructs a new row buffer. +func NewRowBuffer[T any](options ...RowGroupOption) *RowBuffer[T] { + config := DefaultRowGroupConfig() + config.Apply(options...) + if err := config.Validate(); err != nil { + panic(err) + } + + t := typeOf[T]() + if config.Schema == nil && t != nil { + config.Schema = schemaOf(dereference(t)) + } + + if config.Schema == nil { + panic("row buffer must be instantiated with schema or concrete type.") + } + + return &RowBuffer[T]{ + schema: config.Schema, + sorting: config.Sorting.SortingColumns, + compare: config.Schema.Comparator(config.Sorting.SortingColumns...), + } +} + +// Reset clears the content of the buffer without releasing its memory. +func (buf *RowBuffer[T]) Reset() { + for i := range buf.rows { + buf.rows[i] = nil + } + for i := range buf.values { + buf.values[i] = Value{} + } + buf.rows = buf.rows[:0] + buf.values = buf.values[:0] + buf.alloc.reset() +} + +// NumRows returns the number of rows currently written to the buffer. +func (buf *RowBuffer[T]) NumRows() int64 { return int64(len(buf.rows)) } + +// ColumnChunks returns a view of the buffer's columns. +// +// Note that reading columns of a RowBuffer will be less efficient than reading +// columns of a GenericBuffer since the latter uses a column layout. This method +// is mainly exposed to satisfy the RowGroup interface, applications which need +// compute-efficient column scans on in-memory buffers should likely use a +// GenericBuffer instead. +// +// The returned column chunks are snapshots at the time the method is called, +// they remain valid until the next call to Reset on the buffer. +func (buf *RowBuffer[T]) ColumnChunks() []ColumnChunk { + columns := buf.schema.Columns() + chunks := make([]rowBufferColumnChunk, len(columns)) + + for i, column := range columns { + leafColumn, _ := buf.schema.Lookup(column...) + chunks[i] = rowBufferColumnChunk{ + page: rowBufferPage{ + rows: buf.rows, + typ: leafColumn.Node.Type(), + column: leafColumn.ColumnIndex, + maxRepetitionLevel: byte(leafColumn.MaxRepetitionLevel), + maxDefinitionLevel: byte(leafColumn.MaxDefinitionLevel), + }, + } + } + + columnChunks := make([]ColumnChunk, len(chunks)) + for i := range chunks { + columnChunks[i] = &chunks[i] + } + return columnChunks +} + +// SortingColumns returns the list of columns that rows are expected to be +// sorted by. +// +// The list of sorting columns is configured when the buffer is created and used +// when it is sorted. +// +// Note that unless the buffer is explicitly sorted, there are no guarantees +// that the rows it contains will be in the order specified by the sorting +// columns. +func (buf *RowBuffer[T]) SortingColumns() []SortingColumn { return buf.sorting } + +// Schema returns the schema of rows in the buffer. +func (buf *RowBuffer[T]) Schema() *Schema { return buf.schema } + +// Len returns the number of rows in the buffer. +// +// The method contributes to satisfying sort.Interface. +func (buf *RowBuffer[T]) Len() int { return len(buf.rows) } + +// Less compares the rows at index i and j according to the sorting columns +// configured on the buffer. +// +// The method contributes to satisfying sort.Interface. +func (buf *RowBuffer[T]) Less(i, j int) bool { + return buf.compare(buf.rows[i], buf.rows[j]) < 0 +} + +// Swap exchanges the rows at index i and j in the buffer. +// +// The method contributes to satisfying sort.Interface. +func (buf *RowBuffer[T]) Swap(i, j int) { + buf.rows[i], buf.rows[j] = buf.rows[j], buf.rows[i] +} + +// Rows returns a Rows instance exposing rows stored in the buffer. +// +// The rows returned are a snapshot at the time the method is called. +// The returned rows and values read from it remain valid until the next call +// to Reset on the buffer. +func (buf *RowBuffer[T]) Rows() Rows { + return &rowBufferRows{rows: buf.rows, schema: buf.schema} +} + +// Write writes rows to the buffer, returning the number of rows written. +func (buf *RowBuffer[T]) Write(rows []T) (int, error) { + for i := range rows { + off := len(buf.values) + buf.values = buf.schema.Deconstruct(buf.values, &rows[i]) + end := len(buf.values) + row := buf.values[off:end:end] + buf.alloc.capture(row) + buf.rows = append(buf.rows, row) + } + return len(rows), nil +} + +// WriteRows writes parquet rows to the buffer, returing the number of rows +// written. +func (buf *RowBuffer[T]) WriteRows(rows []Row) (int, error) { + for i := range rows { + off := len(buf.values) + buf.values = append(buf.values, rows[i]...) + end := len(buf.values) + row := buf.values[off:end:end] + buf.alloc.capture(row) + buf.rows = append(buf.rows, row) + } + return len(rows), nil +} + +type rowBufferColumnChunk struct{ page rowBufferPage } + +func (c *rowBufferColumnChunk) Type() Type { return c.page.Type() } + +func (c *rowBufferColumnChunk) Column() int { return c.page.Column() } + +func (c *rowBufferColumnChunk) Pages() Pages { return onePage(&c.page) } + +func (c *rowBufferColumnChunk) ColumnIndex() (ColumnIndex, error) { return nil, nil } + +func (c *rowBufferColumnChunk) OffsetIndex() (OffsetIndex, error) { return nil, nil } + +func (c *rowBufferColumnChunk) BloomFilter() BloomFilter { return nil } + +func (c *rowBufferColumnChunk) NumValues() int64 { return c.page.NumValues() } + +type rowBufferPage struct { + rows []Row + typ Type + column int + maxRepetitionLevel byte + maxDefinitionLevel byte +} + +func (p *rowBufferPage) Type() Type { return p.typ } + +func (p *rowBufferPage) Column() int { return p.column } + +func (p *rowBufferPage) Dictionary() Dictionary { return nil } + +func (p *rowBufferPage) NumRows() int64 { return int64(len(p.rows)) } + +func (p *rowBufferPage) NumValues() int64 { + numValues := int64(0) + p.scan(func(value Value) { + if !value.isNull() { + numValues++ + } + }) + return numValues +} + +func (p *rowBufferPage) NumNulls() int64 { + numNulls := int64(0) + p.scan(func(value Value) { + if value.isNull() { + numNulls++ + } + }) + return numNulls +} + +func (p *rowBufferPage) Bounds() (min, max Value, ok bool) { + p.scan(func(value Value) { + if !value.IsNull() { + switch { + case !ok: + min, max, ok = value, value, true + case p.typ.Compare(value, min) < 0: + min = value + case p.typ.Compare(value, max) > 0: + max = value + } + } + }) + return min, max, ok +} + +func (p *rowBufferPage) Size() int64 { return 0 } + +func (p *rowBufferPage) Values() ValueReader { + return &rowBufferPageValueReader{ + page: p, + columnIndex: ^int16(p.column), + } +} + +func (p *rowBufferPage) Clone() Page { + rows := make([]Row, len(p.rows)) + for i := range rows { + rows[i] = p.rows[i].Clone() + } + return &rowBufferPage{ + rows: rows, + typ: p.typ, + column: p.column, + } +} + +func (p *rowBufferPage) Slice(i, j int64) Page { + return &rowBufferPage{ + rows: p.rows[i:j], + typ: p.typ, + column: p.column, + } +} + +func (p *rowBufferPage) RepetitionLevels() (repetitionLevels []byte) { + if p.maxRepetitionLevel != 0 { + repetitionLevels = make([]byte, 0, len(p.rows)) + p.scan(func(value Value) { + repetitionLevels = append(repetitionLevels, value.repetitionLevel) + }) + } + return repetitionLevels +} + +func (p *rowBufferPage) DefinitionLevels() (definitionLevels []byte) { + if p.maxDefinitionLevel != 0 { + definitionLevels = make([]byte, 0, len(p.rows)) + p.scan(func(value Value) { + definitionLevels = append(definitionLevels, value.definitionLevel) + }) + } + return definitionLevels +} + +func (p *rowBufferPage) Data() encoding.Values { + switch p.typ.Kind() { + case Boolean: + values := make([]byte, (len(p.rows)+7)/8) + numValues := 0 + p.scanNonNull(func(value Value) { + if value.boolean() { + i := uint(numValues) / 8 + j := uint(numValues) % 8 + values[i] |= 1 << j + } + numValues++ + }) + return encoding.BooleanValues(values[:(numValues+7)/8]) + + case Int32: + values := make([]int32, 0, len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.int32()) }) + return encoding.Int32Values(values) + + case Int64: + values := make([]int64, 0, len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.int64()) }) + return encoding.Int64Values(values) + + case Int96: + values := make([]deprecated.Int96, 0, len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.int96()) }) + return encoding.Int96Values(values) + + case Float: + values := make([]float32, 0, len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.float()) }) + return encoding.FloatValues(values) + + case Double: + values := make([]float64, 0, len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.double()) }) + return encoding.DoubleValues(values) + + case ByteArray: + values := make([]byte, 0, p.typ.EstimateSize(len(p.rows))) + offsets := make([]uint32, 0, len(p.rows)) + p.scanNonNull(func(value Value) { + offsets = append(offsets, uint32(len(values))) + values = append(values, value.byteArray()...) + }) + offsets = append(offsets, uint32(len(values))) + return encoding.ByteArrayValues(values, offsets) + + case FixedLenByteArray: + length := p.typ.Length() + values := make([]byte, 0, length*len(p.rows)) + p.scanNonNull(func(value Value) { values = append(values, value.byteArray()...) }) + return encoding.FixedLenByteArrayValues(values, length) + + default: + return encoding.Values{} + } +} + +func (p *rowBufferPage) scan(f func(Value)) { + columnIndex := ^int16(p.column) + + for _, row := range p.rows { + for _, value := range row { + if value.columnIndex == columnIndex { + f(value) + } + } + } +} + +func (p *rowBufferPage) scanNonNull(f func(Value)) { + p.scan(func(value Value) { + if !value.isNull() { + f(value) + } + }) +} + +type rowBufferPageValueReader struct { + page *rowBufferPage + rowIndex int + valueIndex int + columnIndex int16 +} + +func (r *rowBufferPageValueReader) ReadValues(values []Value) (n int, err error) { + for n < len(values) && r.rowIndex < len(r.page.rows) { + for n < len(values) && r.valueIndex < len(r.page.rows[r.rowIndex]) { + if v := r.page.rows[r.rowIndex][r.valueIndex]; v.columnIndex == r.columnIndex { + values[n] = v + n++ + } + r.valueIndex++ + } + r.rowIndex++ + r.valueIndex = 0 + } + if r.rowIndex == len(r.page.rows) { + err = io.EOF + } + return n, err +} + +type rowBufferRows struct { + rows []Row + index int + schema *Schema +} + +func (r *rowBufferRows) Close() error { + r.index = -1 + return nil +} + +func (r *rowBufferRows) Schema() *Schema { + return r.schema +} + +func (r *rowBufferRows) SeekToRow(rowIndex int64) error { + if rowIndex < 0 { + return ErrSeekOutOfRange + } + + if r.index < 0 { + return io.ErrClosedPipe + } + + maxRowIndex := int64(len(r.rows)) + if rowIndex > maxRowIndex { + rowIndex = maxRowIndex + } + + r.index = int(rowIndex) + return nil +} + +func (r *rowBufferRows) ReadRows(rows []Row) (n int, err error) { + if r.index < 0 { + return 0, io.EOF + } + + if n = len(r.rows) - r.index; n > len(rows) { + n = len(rows) + } + + for i, row := range r.rows[r.index : r.index+n] { + rows[i] = append(rows[i][:0], row...) + } + + if r.index += n; r.index == len(r.rows) { + err = io.EOF + } + + return n, err +} + +func (r *rowBufferRows) WriteRowsTo(w RowWriter) (int64, error) { + n, err := w.WriteRows(r.rows[r.index:]) + r.index += n + return int64(n), err +} + +var ( + _ RowGroup = (*RowBuffer[any])(nil) + _ RowWriter = (*RowBuffer[any])(nil) + _ sort.Interface = (*RowBuffer[any])(nil) + + _ RowWriterTo = (*rowBufferRows)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/row_builder.go b/vendor/github.com/parquet-go/parquet-go/row_builder.go new file mode 100644 index 00000000000..e7eb96a3791 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/row_builder.go @@ -0,0 +1,202 @@ +package parquet + +// RowBuilder is a type which helps build parquet rows incrementally by adding +// values to columns. +type RowBuilder struct { + columns [][]Value + models []Value + levels []columnLevel + groups []*columnGroup +} + +type columnLevel struct { + repetitionDepth byte + repetitionLevel byte + definitionLevel byte +} + +type columnGroup struct { + baseColumn []Value + members []int16 + startIndex int16 + endIndex int16 + repetitionLevel byte + definitionLevel byte +} + +// NewRowBuilder constructs a RowBuilder which builds rows for the parquet +// schema passed as argument. +func NewRowBuilder(schema Node) *RowBuilder { + if schema.Leaf() { + panic("schema of row builder must be a group") + } + n := numLeafColumnsOf(schema) + b := &RowBuilder{ + columns: make([][]Value, n), + models: make([]Value, n), + levels: make([]columnLevel, n), + } + buffers := make([]Value, len(b.columns)) + for i := range b.columns { + b.columns[i] = buffers[i : i : i+1] + } + topGroup := &columnGroup{baseColumn: []Value{{}}} + endIndex := b.configure(schema, 0, columnLevel{}, topGroup) + topGroup.endIndex = endIndex + b.groups = append(b.groups, topGroup) + return b +} + +func (b *RowBuilder) configure(node Node, columnIndex int16, level columnLevel, group *columnGroup) (endIndex int16) { + switch { + case node.Optional(): + level.definitionLevel++ + endIndex = b.configure(Required(node), columnIndex, level, group) + + for i := columnIndex; i < endIndex; i++ { + b.models[i].kind = 0 // null if not set + b.models[i].ptr = nil + b.models[i].u64 = 0 + } + + case node.Repeated(): + level.definitionLevel++ + + group = &columnGroup{ + startIndex: columnIndex, + repetitionLevel: level.repetitionDepth, + definitionLevel: level.definitionLevel, + } + + level.repetitionDepth++ + endIndex = b.configure(Required(node), columnIndex, level, group) + + for i := columnIndex; i < endIndex; i++ { + b.models[i].kind = 0 // null if not set + b.models[i].ptr = nil + b.models[i].u64 = 0 + } + + group.endIndex = endIndex + b.groups = append(b.groups, group) + + case node.Leaf(): + typ := node.Type() + kind := typ.Kind() + model := makeValueKind(kind) + model.repetitionLevel = level.repetitionLevel + model.definitionLevel = level.definitionLevel + // FIXED_LEN_BYTE_ARRAY is the only type which needs to be given a + // non-nil zero-value if the field is required. + if kind == FixedLenByteArray { + zero := make([]byte, typ.Length()) + model.ptr = &zero[0] + model.u64 = uint64(len(zero)) + } + group.members = append(group.members, columnIndex) + b.models[columnIndex] = model + b.levels[columnIndex] = level + endIndex = columnIndex + 1 + + default: + endIndex = columnIndex + + for _, field := range node.Fields() { + endIndex = b.configure(field, endIndex, level, group) + } + } + return endIndex +} + +// Add adds columnValue to the column at columnIndex. +func (b *RowBuilder) Add(columnIndex int, columnValue Value) { + level := &b.levels[columnIndex] + columnValue.repetitionLevel = level.repetitionLevel + columnValue.definitionLevel = level.definitionLevel + columnValue.columnIndex = ^int16(columnIndex) + level.repetitionLevel = level.repetitionDepth + b.columns[columnIndex] = append(b.columns[columnIndex], columnValue) +} + +// Next must be called to indicate the start of a new repeated record for the +// column at the given index. +// +// If the column index is part of a repeated group, the builder automatically +// starts a new record for all adjacent columns, the application does not need +// to call this method for each column of the repeated group. +// +// Next must be called after adding a sequence of records. +func (b *RowBuilder) Next(columnIndex int) { + for _, group := range b.groups { + if group.startIndex <= int16(columnIndex) && int16(columnIndex) < group.endIndex { + for i := group.startIndex; i < group.endIndex; i++ { + if level := &b.levels[i]; level.repetitionLevel != 0 { + level.repetitionLevel = group.repetitionLevel + } + } + break + } + } +} + +// Reset clears the internal state of b, making it possible to reuse while +// retaining the internal buffers. +func (b *RowBuilder) Reset() { + for i, column := range b.columns { + clearValues(column) + b.columns[i] = column[:0] + } + for i := range b.levels { + b.levels[i].repetitionLevel = 0 + } +} + +// Row materializes the current state of b into a parquet row. +func (b *RowBuilder) Row() Row { + numValues := 0 + for _, column := range b.columns { + numValues += len(column) + } + return b.AppendRow(make(Row, 0, numValues)) +} + +// AppendRow appends the current state of b to row and returns it. +func (b *RowBuilder) AppendRow(row Row) Row { + for _, group := range b.groups { + maxColumn := group.baseColumn + + for _, columnIndex := range group.members { + if column := b.columns[columnIndex]; len(column) > len(maxColumn) { + maxColumn = column + } + } + + if len(maxColumn) != 0 { + columns := b.columns[group.startIndex:group.endIndex] + + for i, column := range columns { + if len(column) < len(maxColumn) { + n := len(column) + column = append(column, maxColumn[n:]...) + + columnIndex := group.startIndex + int16(i) + model := b.models[columnIndex] + + for n < len(column) { + v := &column[n] + v.kind = model.kind + v.ptr = model.ptr + v.u64 = model.u64 + v.definitionLevel = group.definitionLevel + v.columnIndex = ^columnIndex + n++ + } + + columns[i] = column + } + } + } + } + + return appendRow(row, b.columns) +} diff --git a/vendor/github.com/parquet-go/parquet-go/row_group.go b/vendor/github.com/parquet-go/parquet-go/row_group.go new file mode 100644 index 00000000000..66cc65dca72 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/row_group.go @@ -0,0 +1,493 @@ +package parquet + +import ( + "errors" + "fmt" + "io" + + "github.com/parquet-go/parquet-go/internal/debug" +) + +// RowGroup is an interface representing a parquet row group. From the Parquet +// docs, a RowGroup is "a logical horizontal partitioning of the data into rows. +// There is no physical structure that is guaranteed for a row group. A row +// group consists of a column chunk for each column in the dataset." +// +// https://github.com/apache/parquet-format#glossary +type RowGroup interface { + // Returns the number of rows in the group. + NumRows() int64 + + // Returns the list of column chunks in this row group. The chunks are + // ordered in the order of leaf columns from the row group's schema. + // + // If the underlying implementation is not read-only, the returned + // parquet.ColumnChunk may implement other interfaces: for example, + // parquet.ColumnBuffer if the chunk is backed by an in-memory buffer, + // or typed writer interfaces like parquet.Int32Writer depending on the + // underlying type of values that can be written to the chunk. + // + // As an optimization, the row group may return the same slice across + // multiple calls to this method. Applications should treat the returned + // slice as read-only. + ColumnChunks() []ColumnChunk + + // Returns the schema of rows in the group. + Schema() *Schema + + // Returns the list of sorting columns describing how rows are sorted in the + // group. + // + // The method will return an empty slice if the rows are not sorted. + SortingColumns() []SortingColumn + + // Returns a reader exposing the rows of the row group. + // + // As an optimization, the returned parquet.Rows object may implement + // parquet.RowWriterTo, and test the RowWriter it receives for an + // implementation of the parquet.RowGroupWriter interface. + // + // This optimization mechanism is leveraged by the parquet.CopyRows function + // to skip the generic row-by-row copy algorithm and delegate the copy logic + // to the parquet.Rows object. + Rows() Rows +} + +// Rows is an interface implemented by row readers returned by calling the Rows +// method of RowGroup instances. +// +// Applications should call Close when they are done using a Rows instance in +// order to release the underlying resources held by the row sequence. +// +// After calling Close, all attempts to read more rows will return io.EOF. +type Rows interface { + RowReadSeekCloser + Schema() *Schema +} + +// RowGroupReader is an interface implemented by types that expose sequences of +// row groups to the application. +type RowGroupReader interface { + ReadRowGroup() (RowGroup, error) +} + +// RowGroupWriter is an interface implemented by types that allow the program +// to write row groups. +type RowGroupWriter interface { + WriteRowGroup(RowGroup) (int64, error) +} + +// SortingColumn represents a column by which a row group is sorted. +type SortingColumn interface { + // Returns the path of the column in the row group schema, omitting the name + // of the root node. + Path() []string + + // Returns true if the column will sort values in descending order. + Descending() bool + + // Returns true if the column will put null values at the beginning. + NullsFirst() bool +} + +// Ascending constructs a SortingColumn value which dictates to sort the column +// at the path given as argument in ascending order. +func Ascending(path ...string) SortingColumn { return ascending(path) } + +// Descending constructs a SortingColumn value which dictates to sort the column +// at the path given as argument in descending order. +func Descending(path ...string) SortingColumn { return descending(path) } + +// NullsFirst wraps the SortingColumn passed as argument so that it instructs +// the row group to place null values first in the column. +func NullsFirst(sortingColumn SortingColumn) SortingColumn { return nullsFirst{sortingColumn} } + +type ascending []string + +func (asc ascending) String() string { return fmt.Sprintf("ascending(%s)", columnPath(asc)) } +func (asc ascending) Path() []string { return asc } +func (asc ascending) Descending() bool { return false } +func (asc ascending) NullsFirst() bool { return false } + +type descending []string + +func (desc descending) String() string { return fmt.Sprintf("descending(%s)", columnPath(desc)) } +func (desc descending) Path() []string { return desc } +func (desc descending) Descending() bool { return true } +func (desc descending) NullsFirst() bool { return false } + +type nullsFirst struct{ SortingColumn } + +func (nf nullsFirst) String() string { return fmt.Sprintf("nulls_first+%s", nf.SortingColumn) } +func (nf nullsFirst) NullsFirst() bool { return true } + +func searchSortingColumn(sortingColumns []SortingColumn, path columnPath) int { + // There are usually a few sorting columns in a row group, so the linear + // scan is the fastest option and works whether the sorting column list + // is sorted or not. Please revisit this decision if this code path ends + // up being more costly than necessary. + for i, sorting := range sortingColumns { + if path.equal(sorting.Path()) { + return i + } + } + return len(sortingColumns) +} + +func sortingColumnsHavePrefix(sortingColumns, prefix []SortingColumn) bool { + if len(sortingColumns) < len(prefix) { + return false + } + for i, sortingColumn := range prefix { + if !sortingColumnsAreEqual(sortingColumns[i], sortingColumn) { + return false + } + } + return true +} + +func sortingColumnsAreEqual(s1, s2 SortingColumn) bool { + path1 := columnPath(s1.Path()) + path2 := columnPath(s2.Path()) + return path1.equal(path2) && s1.Descending() == s2.Descending() && s1.NullsFirst() == s2.NullsFirst() +} + +type rowGroup struct { + schema *Schema + numRows int64 + columns []ColumnChunk + sorting []SortingColumn +} + +func (r *rowGroup) NumRows() int64 { return r.numRows } +func (r *rowGroup) ColumnChunks() []ColumnChunk { return r.columns } +func (r *rowGroup) SortingColumns() []SortingColumn { return r.sorting } +func (r *rowGroup) Schema() *Schema { return r.schema } +func (r *rowGroup) Rows() Rows { return NewRowGroupRowReader(r) } + +func AsyncRowGroup(base RowGroup) RowGroup { + columnChunks := base.ColumnChunks() + asyncRowGroup := &rowGroup{ + schema: base.Schema(), + numRows: base.NumRows(), + sorting: base.SortingColumns(), + columns: make([]ColumnChunk, len(columnChunks)), + } + asyncColumnChunks := make([]asyncColumnChunk, len(columnChunks)) + for i, columnChunk := range columnChunks { + asyncColumnChunks[i].ColumnChunk = columnChunk + asyncRowGroup.columns[i] = &asyncColumnChunks[i] + } + return asyncRowGroup +} + +type rowGroupRows struct { + schema *Schema + bufsize int + buffers []Value + columns []columnChunkRows + closed bool + rowIndex int64 +} + +type columnChunkRows struct { + offset int32 + length int32 + reader columnChunkValueReader +} + +func (r *rowGroupRows) buffer(i int) []Value { + j := (i + 0) * r.bufsize + k := (i + 1) * r.bufsize + return r.buffers[j:k:k] +} + +// / NewRowGroupRowReader constructs a new row reader for the given row group. +func NewRowGroupRowReader(rowGroup RowGroup) Rows { + return newRowGroupRows(rowGroup.Schema(), rowGroup.ColumnChunks(), defaultValueBufferSize) +} + +func newRowGroupRows(schema *Schema, columns []ColumnChunk, bufferSize int) *rowGroupRows { + r := &rowGroupRows{ + schema: schema, + bufsize: bufferSize, + buffers: make([]Value, len(columns)*bufferSize), + columns: make([]columnChunkRows, len(columns)), + rowIndex: -1, + } + for i, column := range columns { + switch column.Type().Kind() { + case ByteArray, FixedLenByteArray: + // (@mdisibio) - If the column can contain pointers, then we must not repool + // the underlying values buffer because the rows returned to the caller + // reference slices within it. Detaching the entire values buffer is more + // efficient than cloning individual values. + r.columns[i].reader.detach = true + } + r.columns[i].reader.pages = column.Pages() + } + // This finalizer is used to ensure that the goroutines started by calling + // init on the underlying page readers will be shutdown in the event that + // Close isn't called and the rowGroupRows object is garbage collected. + debug.SetFinalizer(r, func(r *rowGroupRows) { r.Close() }) + return r +} + +func (r *rowGroupRows) clear() { + for i, c := range r.columns { + r.columns[i] = columnChunkRows{reader: c.reader} + } + clear(r.buffers) +} + +func (r *rowGroupRows) Reset() { + for i := range r.columns { + r.columns[i].reader.Reset() + } + r.clear() +} + +func (r *rowGroupRows) Close() error { + var errs []error + for i := range r.columns { + c := &r.columns[i] + c.offset = 0 + c.length = 0 + if err := c.reader.Close(); err != nil { + errs = append(errs, err) + } + } + r.clear() + r.closed = true + return errors.Join(errs...) +} + +func (r *rowGroupRows) SeekToRow(rowIndex int64) error { + if r.closed { + return io.ErrClosedPipe + } + if rowIndex != r.rowIndex { + for i := range r.columns { + if err := r.columns[i].reader.SeekToRow(rowIndex); err != nil { + return err + } + } + r.clear() + r.rowIndex = rowIndex + } + return nil +} + +func (r *rowGroupRows) ReadRows(rows []Row) (int, error) { + if r.closed { + return 0, io.EOF + } + + // When this is the first call to ReadRows, we issue a seek to the first row + // because this starts prefetching pages asynchronously on columns. + // + // This condition does not apply if SeekToRow was called before ReadRows, + // only when ReadRows is the very first method called on the row reader. + if r.rowIndex < 0 { + if err := r.SeekToRow(0); err != nil { + return 0, err + } + } + + eofCount := 0 + rowCount := 0 + +readColumnValues: + for columnIndex := range r.columns { + c := &r.columns[columnIndex] + b := r.buffer(columnIndex) + eof := false + + for rowIndex := range rows { + numValuesInRow := 1 + + if columnIndex == 0 { + rows[rowIndex] = rows[rowIndex][:0] + } + + for { + if c.offset == c.length { + n, err := c.reader.ReadValues(b) + c.offset = 0 + c.length = int32(n) + + if n == 0 { + if err == io.EOF { + eof = true + eofCount++ + break + } + return 0, err + } + } + + values := b[c.offset:c.length:c.length] + for numValuesInRow < len(values) && values[numValuesInRow].repetitionLevel != 0 { + numValuesInRow++ + } + if numValuesInRow == 0 { + break + } + + rows[rowIndex] = append(rows[rowIndex], values[:numValuesInRow]...) + rowCount = max(rowCount, rowIndex+1) + c.offset += int32(numValuesInRow) + + if numValuesInRow != len(values) { + break + } + if eof { + continue readColumnValues + } + numValuesInRow = 0 + } + } + } + + var err error + if eofCount > 0 { + err = io.EOF + } + r.rowIndex += int64(rowCount) + return rowCount, err +} + +func (r *rowGroupRows) Schema() *Schema { + return r.schema +} + +type seekRowGroup struct { + base RowGroup + seek int64 + columns []ColumnChunk +} + +func (g *seekRowGroup) NumRows() int64 { + return g.base.NumRows() - g.seek +} + +func (g *seekRowGroup) ColumnChunks() []ColumnChunk { + return g.columns +} + +func (g *seekRowGroup) Schema() *Schema { + return g.base.Schema() +} + +func (g *seekRowGroup) SortingColumns() []SortingColumn { + return g.base.SortingColumns() +} + +func (g *seekRowGroup) Rows() Rows { + rows := g.base.Rows() + rows.SeekToRow(g.seek) + return rows +} + +type seekColumnChunk struct { + base ColumnChunk + seek int64 +} + +func (c *seekColumnChunk) Type() Type { + return c.base.Type() +} + +func (c *seekColumnChunk) Column() int { + return c.base.Column() +} + +func (c *seekColumnChunk) Pages() Pages { + pages := c.base.Pages() + pages.SeekToRow(c.seek) + return pages +} + +func (c *seekColumnChunk) ColumnIndex() (ColumnIndex, error) { + return c.base.ColumnIndex() +} + +func (c *seekColumnChunk) OffsetIndex() (OffsetIndex, error) { + return c.base.OffsetIndex() +} + +func (c *seekColumnChunk) BloomFilter() BloomFilter { + return c.base.BloomFilter() +} + +func (c *seekColumnChunk) NumValues() int64 { + return c.base.NumValues() +} + +type emptyRowGroup struct { + schema *Schema + columns []ColumnChunk +} + +func newEmptyRowGroup(schema *Schema) *emptyRowGroup { + columns := schema.Columns() + rowGroup := &emptyRowGroup{ + schema: schema, + columns: make([]ColumnChunk, len(columns)), + } + emptyColumnChunks := make([]emptyColumnChunk, len(columns)) + for i, column := range schema.Columns() { + leaf, _ := schema.Lookup(column...) + emptyColumnChunks[i].typ = leaf.Node.Type() + emptyColumnChunks[i].column = int16(leaf.ColumnIndex) + rowGroup.columns[i] = &emptyColumnChunks[i] + } + return rowGroup +} + +func (g *emptyRowGroup) NumRows() int64 { return 0 } +func (g *emptyRowGroup) ColumnChunks() []ColumnChunk { return g.columns } +func (g *emptyRowGroup) Schema() *Schema { return g.schema } +func (g *emptyRowGroup) SortingColumns() []SortingColumn { return nil } +func (g *emptyRowGroup) Rows() Rows { return emptyRows{g.schema} } + +type emptyColumnChunk struct { + typ Type + column int16 +} + +func (c *emptyColumnChunk) Type() Type { return c.typ } +func (c *emptyColumnChunk) Column() int { return int(c.column) } +func (c *emptyColumnChunk) Pages() Pages { return emptyPages{} } +func (c *emptyColumnChunk) ColumnIndex() (ColumnIndex, error) { return emptyColumnIndex{}, nil } +func (c *emptyColumnChunk) OffsetIndex() (OffsetIndex, error) { return emptyOffsetIndex{}, nil } +func (c *emptyColumnChunk) BloomFilter() BloomFilter { return emptyBloomFilter{} } +func (c *emptyColumnChunk) NumValues() int64 { return 0 } + +type emptyBloomFilter struct{} + +func (emptyBloomFilter) ReadAt([]byte, int64) (int, error) { return 0, io.EOF } +func (emptyBloomFilter) Size() int64 { return 0 } +func (emptyBloomFilter) Check(Value) (bool, error) { return false, nil } + +type emptyRows struct{ schema *Schema } + +func (r emptyRows) Close() error { return nil } +func (r emptyRows) Schema() *Schema { return r.schema } +func (r emptyRows) ReadRows([]Row) (int, error) { return 0, io.EOF } +func (r emptyRows) SeekToRow(int64) error { return nil } +func (r emptyRows) WriteRowsTo(RowWriter) (int64, error) { return 0, nil } + +type emptyPages struct{} + +func (emptyPages) ReadPage() (Page, error) { return nil, io.EOF } +func (emptyPages) SeekToRow(int64) error { return nil } +func (emptyPages) Close() error { return nil } + +var ( + _ RowReaderWithSchema = (*rowGroupRows)(nil) + //_ RowWriterTo = (*rowGroupRows)(nil) + + _ RowReaderWithSchema = emptyRows{} + _ RowWriterTo = emptyRows{} +) diff --git a/vendor/github.com/parquet-go/parquet-go/scan.go b/vendor/github.com/parquet-go/parquet-go/scan.go new file mode 100644 index 00000000000..abc287e7045 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/scan.go @@ -0,0 +1,33 @@ +package parquet + +import "io" + +// ScanRowReader constructs a RowReader which exposes rows from reader until +// the predicate returns false for one of the rows, or EOF is reached. +func ScanRowReader(reader RowReader, predicate func(Row, int64) bool) RowReader { + return &scanRowReader{reader: reader, predicate: predicate} +} + +type scanRowReader struct { + reader RowReader + predicate func(Row, int64) bool + rowIndex int64 +} + +func (s *scanRowReader) ReadRows(rows []Row) (int, error) { + if s.rowIndex < 0 { + return 0, io.EOF + } + + n, err := s.reader.ReadRows(rows) + + for i, row := range rows[:n] { + if !s.predicate(row, s.rowIndex) { + s.rowIndex = -1 + return i, io.EOF + } + s.rowIndex++ + } + + return n, err +} diff --git a/vendor/github.com/parquet-go/parquet-go/schema.go b/vendor/github.com/parquet-go/parquet-go/schema.go new file mode 100644 index 00000000000..6a7afd5ada4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/schema.go @@ -0,0 +1,1214 @@ +package parquet + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "maps" + "math" + "reflect" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/memory" +) + +// Schema represents a parquet schema created from a Go value. +// +// Schema implements the Node interface to represent the root node of a parquet +// schema. +// +// Schema values are safe to use concurrently from multiple goroutines but must +// be passed by referenced after being created because their internal state +// contains synchronization primitives that are not safe to copy. +type Schema struct { + name string + root Node + funcs onceValue[schemaFuncs] + state onceValue[schemaState] + cache onceValue[schemaCache] +} + +type schemaFuncs struct { + deconstruct deconstructFunc + reconstruct reconstructFunc +} + +type schemaState struct { + mapping columnMapping + columns [][]string +} + +type schemaCache struct { + hashSeed maphash.Seed + writeValue cacheMap[uint64, writeValueFunc] +} + +type cacheMap[K comparable, V any] struct { + value atomic.Value // map[K]V +} + +func (c *cacheMap[K, V]) load(k K, f func() V) V { + oldMap, _ := c.value.Load().(map[K]V) + value, ok := oldMap[k] + if ok { + return value + } + value = f() + newMap := make(map[K]V, len(oldMap)+1) + maps.Copy(newMap, oldMap) + newMap[k] = value + c.value.Store(newMap) + return value +} + +type onceValue[T any] struct { + once sync.Once + value *T +} + +func (v *onceValue[T]) load(f func() *T) *T { + v.once.Do(func() { v.value = f() }) + return v.value +} + +// SchemaOf constructs a parquet schema from a Go value. +// +// The function can construct parquet schemas from struct or pointer-to-struct +// values only. A panic is raised if a Go value of a different type is passed +// to this function. +// +// When creating a parquet Schema from a Go value, the struct fields may contain +// a "parquet" tag to describe properties of the parquet node. The "parquet" tag +// follows the conventional format of Go struct tags: a comma-separated list of +// values describe the options, with the first one defining the name of the +// parquet column. +// +// The following options are also supported in the "parquet" struct tag: +// +// optional | make the parquet column optional +// snappy | sets the parquet column compression codec to snappy +// gzip | sets the parquet column compression codec to gzip +// brotli | sets the parquet column compression codec to brotli +// lz4 | sets the parquet column compression codec to lz4 +// zstd | sets the parquet column compression codec to zstd +// plain | enables the plain encoding (no-op default) +// dict | enables dictionary encoding on the parquet column +// delta | enables delta encoding on the parquet column +// list | for slice types, use the parquet LIST logical type +// enum | for string types, use the parquet ENUM logical type +// bytes | for string types, use no parquet logical type +// string | for []byte types, use the parquet STRING logical type +// uuid | for string and [16]byte types, use the parquet UUID logical type +// decimal | for int32, int64 and [n]byte types, use the parquet DECIMAL logical type +// date | for int32 types use the DATE logical type +// time | for int32 and int64 types use the TIME logical type +// timestamp | for int64 types use the TIMESTAMP logical type with, by default, millisecond precision +// split | for float32/float64, use the BYTE_STREAM_SPLIT encoding +// id(n) | where n is int denoting a column field id. Example id(2) for a column with field id of 2 +// +// # The date logical type is an int32 value of the number of days since the unix epoch +// +// The timestamp precision can be changed by defining which precision to use as an argument. +// Supported precisions are: nanosecond, millisecond and microsecond. Example: +// +// type Message struct { +// TimestampMicros int64 `parquet:"timestamp_micros,timestamp(microsecond)" +// } +// +// Both the time and timestamp tags accept an optional second parameter +// to set the `isAdjustedToUTC` annotation of the parquet logical type. +// Valid values are "utc" or "local". If not specified, the default value +// for this annotation will be "utc", which will set the `isAdjustedToUTC` annotation +// value to true. Example: +// +// type Message struct { +// TimestampMicrosAdjusted int64 `parquet:"timestamp_micros_adjusted,timestamp(microsecond:utc)" +// TimestampMicrosNotAdjusted int64 `parquet:"timestamp_micros_not_adjusted,timestamp(microsecond:local)" +// } +// +// The decimal tag must be followed by two integer parameters, the first integer +// representing the scale and the second the precision; for example: +// +// type Item struct { +// Cost int64 `parquet:"cost,decimal(0:3)"` +// } +// +// Invalid combination of struct tags and Go types, or repeating options will +// cause the function to panic. +// +// As a special case, if the field tag is "-", the field is omitted from the schema +// and the data will not be written into the parquet file(s). +// Note that a field with name "-" can still be generated using the tag "-,". +// +// The configuration of Parquet maps are done via two tags: +// - The `parquet-key` tag allows to configure the key of a map. +// - The `parquet-value` tag allows users to configure a map's values, for example to declare their native Parquet types. +// +// When configuring a Parquet map, the `parquet` tag will configure the map itself. +// +// For example, the following will set the int64 key of the map to be a timestamp: +// +// type Actions struct { +// Action map[int64]string `parquet:"," parquet-key:",timestamp"` +// } +// +// To configure the element of a list, use the `parquet-element` tag. For example, the following will +// set the id of the element field to 2: +// +// type Item struct { +// Attributes []string `parquet:",id(1),list" parquet-element:",id(2)"` +// } +// +// Note that the name of the element cannot be changed. +// +// The schema name is the Go type name of the value. +func SchemaOf(model any, opts ...SchemaOption) *Schema { + cfg := SchemaConfig{} + for _, opt := range opts { + opt.ConfigureSchema(&cfg) + } + return schemaOf(dereference(reflect.TypeOf(model)), cfg.StructTags...) +} + +var cachedSchemas sync.Map // map[reflect.Type]*Schema + +func schemaOf(model reflect.Type, tagReplacements ...StructTagOption) *Schema { + cacheable := len(tagReplacements) == 0 + + if cacheable { + cached, _ := cachedSchemas.Load(model) + schema, _ := cached.(*Schema) + if schema != nil { + return schema + } + } + + if model.Kind() != reflect.Struct { + panic("cannot construct parquet schema from value of type " + model.String()) + } + + schema := NewSchema(model.Name(), nodeOf(nil, model, noTags, tagReplacements)) + + if cacheable { + if actual, loaded := cachedSchemas.LoadOrStore(model, schema); loaded { + schema = actual.(*Schema) + } + } + return schema +} + +// NewSchema constructs a new Schema object with the given name and root node. +// +// The function panics if Node contains more leaf columns than supported by the +// package (see parquet.MaxColumnIndex). +func NewSchema(name string, root Node) *Schema { + return &Schema{name: name, root: root} +} + +func dereference(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func makeDeconstructFunc(node Node) (deconstruct deconstructFunc) { + if schema, _ := node.(*Schema); schema != nil { + return schema.lazyLoadFuncs().deconstruct + } + if !node.Leaf() { + _, deconstruct = deconstructFuncOf(0, node) + } + return deconstruct +} + +func makeReconstructFunc(node Node) (reconstruct reconstructFunc) { + if schema, _ := node.(*Schema); schema != nil { + return schema.lazyLoadFuncs().reconstruct + } + if !node.Leaf() { + _, reconstruct = reconstructFuncOf(0, node) + } + return reconstruct +} + +func (s *Schema) lazyLoadFuncs() *schemaFuncs { + return s.funcs.load(func() *schemaFuncs { + return &schemaFuncs{ + deconstruct: makeDeconstructFunc(s.root), + reconstruct: makeReconstructFunc(s.root), + } + }) +} + +func (s *Schema) lazyLoadState() *schemaState { + return s.state.load(func() *schemaState { + mapping, columns := columnMappingOf(s.root) + return &schemaState{ + mapping: mapping, + columns: columns, + } + }) +} + +func (s *Schema) lazyLoadCache() *schemaCache { + return s.cache.load(func() *schemaCache { + return &schemaCache{ + hashSeed: maphash.MakeSeed(), + } + }) +} + +// ConfigureRowGroup satisfies the RowGroupOption interface, allowing Schema +// instances to be passed to row group constructors to pre-declare the schema of +// the output parquet file. +func (s *Schema) ConfigureRowGroup(config *RowGroupConfig) { config.Schema = s } + +// ConfigureReader satisfies the ReaderOption interface, allowing Schema +// instances to be passed to NewReader to pre-declare the schema of rows +// read from the reader. +func (s *Schema) ConfigureReader(config *ReaderConfig) { config.Schema = s } + +// ConfigureWriter satisfies the WriterOption interface, allowing Schema +// instances to be passed to NewWriter to pre-declare the schema of the +// output parquet file. +func (s *Schema) ConfigureWriter(config *WriterConfig) { config.Schema = s } + +// ID returns field id of the root node. +func (s *Schema) ID() int { return s.root.ID() } + +// String returns a parquet schema representation of s. +func (s *Schema) String() string { return sprint(s.name, s.root) } + +// Name returns the name of s. +func (s *Schema) Name() string { return s.name } + +// Type returns the parquet type of s. +func (s *Schema) Type() Type { return s.root.Type() } + +// Optional returns false since the root node of a parquet schema is always required. +func (s *Schema) Optional() bool { return s.root.Optional() } + +// Repeated returns false since the root node of a parquet schema is always required. +func (s *Schema) Repeated() bool { return s.root.Repeated() } + +// Required returns true since the root node of a parquet schema is always required. +func (s *Schema) Required() bool { return s.root.Required() } + +// Leaf returns true if the root node of the parquet schema is a leaf column. +func (s *Schema) Leaf() bool { return s.root.Leaf() } + +// Fields returns the list of fields on the root node of the parquet schema. +func (s *Schema) Fields() []Field { return s.root.Fields() } + +// Encoding returns the encoding set on the root node of the parquet schema. +func (s *Schema) Encoding() encoding.Encoding { return s.root.Encoding() } + +// Compression returns the compression codec set on the root node of the parquet +// schema. +func (s *Schema) Compression() compress.Codec { return s.root.Compression() } + +// GoType returns the Go type that best represents the schema. +func (s *Schema) GoType() reflect.Type { return s.root.GoType() } + +// Deconstruct deconstructs a Go value and appends it to a row. +// +// The method panics is the structure of the go value does not match the +// parquet schema. +func (s *Schema) Deconstruct(row Row, value any) Row { + state := s.lazyLoadState() + funcs := s.lazyLoadFuncs() + columns := make([][]Value, len(state.columns)) + values := make([]Value, len(state.columns)) + + for i := range columns { + columns[i] = values[i : i : i+1] + } + + v := reflect.ValueOf(value) + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + if v.IsNil() { + v = reflect.Value{} + break + } + v = v.Elem() + } + funcs.deconstruct(columns, columnLevels{}, v) + return appendRow(row, columns) +} + +// Reconstruct reconstructs a Go value from a row. +// +// The go value passed as first argument must be a non-nil pointer for the +// row to be decoded into. +// +// The method panics if the structure of the go value and parquet row do not +// match. +func (s *Schema) Reconstruct(value any, row Row) error { + v := reflect.ValueOf(value) + if !v.IsValid() { + panic("cannot reconstruct row into go value of type ") + } + if v.Kind() != reflect.Ptr { + panic("cannot reconstruct row into go value of non-pointer type " + v.Type().String()) + } + if v.IsNil() { + panic("cannot reconstruct row into nil pointer of type " + v.Type().String()) + } + for v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + b := valuesSliceBufferPool.Get( + func() *valuesSliceBuffer { + return &valuesSliceBuffer{ + values: make([][]Value, 0, 64), + } + }, + func(v *valuesSliceBuffer) { v.values = v.values[:0] }, + ) + + state := s.lazyLoadState() + funcs := s.lazyLoadFuncs() + columns := b.reserve(len(state.columns)) + row.Range(func(columnIndex int, columnValues []Value) bool { + if columnIndex < len(columns) { + columns[columnIndex] = columnValues + } + return true + }) + // we avoid the defer penalty by releasing b manually + err := funcs.reconstruct(v, columnLevels{}, columns) + b.release() + return err +} + +type valuesSliceBuffer struct { + values [][]Value +} + +func (v *valuesSliceBuffer) reserve(n int) [][]Value { + if n <= cap(v.values) { + return v.values[:n] + } + // we can try to keep growing by the power of two, but we care more about the + // memory footprint so this should suffice. + // + // The nature of reads tends to be from similar number of columns.The less work + // we do here the better performance we can get. + v.values = make([][]Value, n) + return v.values +} + +func (v *valuesSliceBuffer) release() { + valuesSliceBufferPool.Put(v) +} + +var valuesSliceBufferPool memory.Pool[valuesSliceBuffer] + +// Lookup returns the leaf column at the given path. +// +// The path is the sequence of column names identifying a leaf column (not +// including the root). +// +// If the path was not found in the mapping, or if it did not represent a +// leaf column of the parquet schema, the boolean will be false. +func (s *Schema) Lookup(path ...string) (LeafColumn, bool) { + leaf := s.lazyLoadState().mapping.lookup(path) + return LeafColumn{ + Node: leaf.node, + Path: leaf.path, + ColumnIndex: int(leaf.columnIndex), + MaxRepetitionLevel: int(leaf.maxRepetitionLevel), + MaxDefinitionLevel: int(leaf.maxDefinitionLevel), + }, leaf.node != nil +} + +// Columns returns the list of column paths available in the schema. +// +// The method always returns the same slice value across calls to ColumnPaths, +// applications should treat it as immutable. +func (s *Schema) Columns() [][]string { return s.lazyLoadState().columns } + +// Comparator constructs a comparator function which orders rows according to +// the list of sorting columns passed as arguments. +func (s *Schema) Comparator(sortingColumns ...SortingColumn) func(Row, Row) int { + return compareRowsFuncOf(s, sortingColumns) +} + +func (s *Schema) forEachNode(do func(name string, node Node)) { + forEachNodeOf(s.Name(), s, do) +} + +type structNode struct { + gotype reflect.Type + fields []structField +} + +func structNodeOf(path []string, t reflect.Type, tagReplacements []StructTagOption) *structNode { + // Collect struct fields first so we can order them before generating the + // column indexes. + fields := structFieldsOf(path, t, tagReplacements) + + s := &structNode{ + gotype: t, + fields: make([]structField, len(fields)), + } + + for i := range fields { + field := structField{name: fields[i].Name, index: fields[i].Index} + tags := fromStructTag(fields[i].Tag) + field.Node = makeNodeOf(append(path, fields[i].Name), fields[i].Type, fields[i].Name, tags, tagReplacements) + + s.fields[i] = field + } + + return s +} + +// structFieldsOf returns the list of fields for the given path and type. Struct tags are replaced +// and fields potentially renamed using the provided options. +func structFieldsOf(path []string, t reflect.Type, tagReplacements []StructTagOption) []reflect.StructField { + return appendStructFields(path, t, nil, nil, 0, tagReplacements) +} + +func appendStructFields(path []string, t reflect.Type, fields []reflect.StructField, index []int, offset uintptr, tagReplacements []StructTagOption) []reflect.StructField { + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + // Tag replacements if present. + // Embedded anonymous fields do not extend the + // column path and tags are not used. + if !f.Anonymous { + fpath := append(path, f.Name) + for _, opt := range tagReplacements { + if slices.Equal(fpath, opt.ColumnPath) { + f.Tag = opt.StructTag + } + } + } + + ftags := fromStructTag(f.Tag) + + if tag := ftags.parquet; tag != "" { + name, _ := split(tag) + if tag != "-," && name == "-" { + continue + } + if name != "" { + f.Name = name + } + } + + fieldIndex := index[:len(index):len(index)] + fieldIndex = append(fieldIndex, i) + + f.Offset += offset + + if f.Anonymous { + fields = appendStructFields(path, f.Type, fields, fieldIndex, f.Offset, tagReplacements) + } else if f.IsExported() { + f.Index = fieldIndex + fields = append(fields, f) + } + } + return fields +} + +func (s *structNode) Optional() bool { return false } + +func (s *structNode) Repeated() bool { return false } + +func (s *structNode) Required() bool { return true } + +func (s *structNode) Leaf() bool { return false } + +func (s *structNode) Encoding() encoding.Encoding { return nil } + +func (s *structNode) Compression() compress.Codec { return nil } + +func (s *structNode) GoType() reflect.Type { return s.gotype } + +func (s *structNode) ID() int { return 0 } + +func (s *structNode) String() string { return sprint("", s) } + +func (s *structNode) Type() Type { return groupType{} } + +func (s *structNode) Fields() []Field { + fields := make([]Field, len(s.fields)) + for i := range s.fields { + fields[i] = &s.fields[i] + } + return fields +} + +// fieldByIndex is like reflect.Value.FieldByIndex but returns the zero-value of +// reflect.Value if one of the fields was a nil pointer instead of panicking. +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v = v.Field(i); v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + v = v.Elem() + break + } else { + v = v.Elem() + } + } + } + return v +} + +type structField struct { + Node + name string + index []int +} + +func (f *structField) Name() string { return f.name } + +func (f *structField) Value(base reflect.Value) reflect.Value { + switch base.Kind() { + case reflect.Map: + return base.MapIndex(reflect.ValueOf(&f.name).Elem()) + case reflect.Ptr: + if base.IsNil() { + base.Set(reflect.New(base.Type().Elem())) + } + return fieldByIndex(base.Elem(), f.index) + default: + if len(f.index) == 1 { + return base.Field(f.index[0]) + } else { + return fieldByIndex(base, f.index) + } + } +} + +func nodeString(t reflect.Type, name string, tag ...string) string { + return fmt.Sprintf("%s %s %v", name, t.String(), tag) +} + +func throwInvalidTag(t reflect.Type, name string, tag string) { + panic(tag + " is an invalid parquet tag: " + nodeString(t, name, tag)) +} + +func throwUnknownTag(t reflect.Type, name string, tag string) { + panic(tag + " is an unrecognized parquet tag: " + nodeString(t, name, tag)) +} + +func throwInvalidNode(t reflect.Type, msg, name string, parquetTags parquetTags) { + tags := make([]string, 0, 3) // A node can have at most 3 tags. + if parquetTags.parquet != "" { + tags = append(tags, parquetTags.parquet) + } + if parquetTags.parquetKey != "" { + tags = append(tags, parquetTags.parquetKey) + } + if parquetTags.parquetValue != "" { + tags = append(tags, parquetTags.parquetValue) + } + if parquetTags.parquetElement != "" { + tags = append(tags, parquetTags.parquetElement) + } + panic(msg + ": " + nodeString(t, name, tags...)) +} + +// FixedLenByteArray decimals are sized based on precision +// this function calculates the necessary byte array size. +func decimalFixedLenByteArraySize(precision int) int { + return int(math.Ceil((math.Log10(2) + float64(precision)) / math.Log10(256))) +} + +func forEachStructTagOption(sf reflect.StructField, do func(t reflect.Type, option, args string)) { + if tag := fromStructTag(sf.Tag).parquet; tag != "" { + _, tag = split(tag) // skip the field name + for tag != "" { + option := "" + args := "" + option, tag = split(tag) + option, args = splitOptionArgs(option) + ft := sf.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + do(ft, option, args) + } + } +} + +func nodeOf(path []string, t reflect.Type, tags parquetTags, tagReplacements []StructTagOption) Node { + switch t { + case reflect.TypeFor[deprecated.Int96](): + return Leaf(Int96Type) + case reflect.TypeFor[uuid.UUID](): + return UUID() + case reflect.TypeFor[time.Time](): + return Timestamp(Nanosecond) + } + + var n Node + switch t.Kind() { + case reflect.Bool: + n = Leaf(BooleanType) + + case reflect.Int, reflect.Int64: + n = Int(64) + + case reflect.Int8, reflect.Int16, reflect.Int32: + n = Int(t.Bits()) + + case reflect.Uint, reflect.Uintptr, reflect.Uint64: + n = Uint(64) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + n = Uint(t.Bits()) + + case reflect.Float32: + n = Leaf(FloatType) + + case reflect.Float64: + n = Leaf(DoubleType) + + case reflect.String: + n = String() + + case reflect.Ptr: + n = Optional(nodeOf(path, t.Elem(), noTags, tagReplacements)) + + case reflect.Slice: + if elem := t.Elem(); elem.Kind() == reflect.Uint8 { // []byte? + n = Leaf(ByteArrayType) + } else { + n = Repeated(nodeOf(path, elem, noTags, tagReplacements)) + } + + case reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + n = Leaf(FixedLenByteArrayType(t.Len())) + } + + case reflect.Map: + mapTag := tags.parquet + if strings.Contains(mapTag, "json") { + n = JSON() + } else { + n = Map( + makeNodeOf(append(path, "key_value", "key"), t.Key(), t.Name(), tags.getMapKeyNodeTags(), tagReplacements), + makeNodeOf(append(path, "key_value", "value"), t.Elem(), t.Name(), tags.getMapValueNodeTags(), tagReplacements), + ) + } + + forEachTagOption([]string{mapTag}, func(option, args string) { + switch option { + case "", "json": + return + case "optional": + n = Optional(n) + case "id": + id, err := parseIDArgs(args) + if err != nil { + throwInvalidTag(t, "map", option) + } + n = FieldID(n, id) + default: + throwUnknownTag(t, "map", option) + } + }) + + case reflect.Struct: + return structNodeOf(path, t, tagReplacements) + + case reflect.Interface: + return (Group)(nil) + } + + if n == nil { + panic("cannot create parquet node from go value of type " + t.String()) + } + + return &goNode{Node: n, gotype: t} +} + +func split(s string) (head, tail string) { + if i := strings.IndexByte(s, ','); i < 0 { + head = s + } else { + head, tail = s[:i], s[i+1:] + } + return +} + +func splitOptionArgs(s string) (option, args string) { + if i := strings.IndexByte(s, '('); i >= 0 { + option = s[:i] + args = s[i:] + } else { + option = s + args = "()" + } + return +} + +func parseDecimalArgs(args string) (scale, precision int, err error) { + if !strings.HasPrefix(args, "(") || !strings.HasSuffix(args, ")") { + return 0, 0, fmt.Errorf("malformed decimal args: %s", args) + } + args = strings.TrimPrefix(args, "(") + args = strings.TrimSuffix(args, ")") + parts := strings.Split(args, ":") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("malformed decimal args: (%s)", args) + } + s, err := strconv.ParseInt(parts[0], 10, 32) + if err != nil { + return 0, 0, err + } + p, err := strconv.ParseInt(parts[1], 10, 32) + if err != nil { + return 0, 0, err + } + return int(s), int(p), nil +} + +func parseIDArgs(args string) (int, error) { + if !strings.HasPrefix(args, "(") || !strings.HasSuffix(args, ")") { + return 0, fmt.Errorf("malformed id args: %s", args) + } + args = strings.TrimPrefix(args, "(") + args = strings.TrimSuffix(args, ")") + return strconv.Atoi(args) +} + +func parseTimestampArgs(args string) (unit TimeUnit, isUTCNormalized bool, err error) { + if !strings.HasPrefix(args, "(") || !strings.HasSuffix(args, ")") { + return nil, false, fmt.Errorf("malformed timestamp args: %s", args) + } + + args = strings.TrimPrefix(args, "(") + args = strings.TrimSuffix(args, ")") + + if len(args) == 0 { + return Millisecond, true, nil + } + + parts := strings.Split(args, ":") + if len(parts) > 2 { + return nil, false, fmt.Errorf("malformed timestamp args: (%s)", args) + } + + unit, err = parseTimeUnit(parts[0]) + if err != nil { + return nil, false, err + } + + adjusted := true + if len(parts) > 1 { + adjusted, err = parseUTCNormalization(parts[1]) + if err != nil { + return nil, false, err + } + } + + return unit, adjusted, nil +} + +func parseTimeUnit(arg string) (TimeUnit, error) { + switch arg { + case "millisecond": + return Millisecond, nil + case "microsecond": + return Microsecond, nil + case "nanosecond": + return Nanosecond, nil + default: + } + + return nil, fmt.Errorf("unknown time unit: %s", arg) +} + +func parseUTCNormalization(arg string) (isUTCNormalized bool, err error) { + switch arg { + case "utc": + return true, nil + case "local": + return false, nil + default: + return false, fmt.Errorf("unknown utc normalization: %s", arg) + } +} + +type goNode struct { + Node + gotype reflect.Type +} + +func (n *goNode) GoType() reflect.Type { return n.gotype } + +var ( + _ RowGroupOption = (*Schema)(nil) + _ ReaderOption = (*Schema)(nil) + _ WriterOption = (*Schema)(nil) +) + +func makeNodeOf(path []string, t reflect.Type, name string, tags parquetTags, tagReplacements []StructTagOption) Node { + var ( + node Node + optional bool + list bool + encoded encoding.Encoding + compressed compress.Codec + fieldID int + ) + + setNode := func(n Node) { + if node != nil { + throwInvalidNode(t, "struct field has multiple logical parquet types declared", name, tags) + } + node = n + } + + setOptional := func() { + if optional { + throwInvalidNode(t, "struct field has multiple declaration of the optional tag", name, tags) + } + optional = true + } + + setList := func() { + if list { + throwInvalidNode(t, "struct field has multiple declaration of the list tag", name, tags) + } + list = true + } + + setEncoding := func(e encoding.Encoding) { + if encoded != nil { + throwInvalidNode(t, "struct field has encoding declared multiple time", name, tags) + } + encoded = e + } + + setCompression := func(c compress.Codec) { + if compressed != nil { + throwInvalidNode(t, "struct field has compression codecs declared multiple times", name, tags) + } + compressed = c + } + + if t.Kind() == reflect.Map { + node = nodeOf(path, t, tags, tagReplacements) + } else { + forEachTagOption([]string{tags.parquet}, func(option, args string) { + switch option { + case "": + return + case "optional": + setOptional() + + case "snappy": + setCompression(&Snappy) + + case "gzip": + setCompression(&Gzip) + + case "brotli": + setCompression(&Brotli) + + case "lz4": + setCompression(&Lz4Raw) + + case "zstd": + setCompression(&Zstd) + + case "uncompressed": + setCompression(&Uncompressed) + + case "plain": + setEncoding(&Plain) + + case "dict": + setEncoding(&RLEDictionary) + + case "json": + setNode(JSON()) + + case "delta": + switch t.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64: + setEncoding(&DeltaBinaryPacked) + case reflect.String: + setEncoding(&DeltaByteArray) + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { // []byte? + setEncoding(&DeltaByteArray) + } else { + throwInvalidTag(t, name, option) + } + case reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { // [N]byte? + setEncoding(&DeltaByteArray) + } else { + throwInvalidTag(t, name, option) + } + default: + switch t { + case reflect.TypeFor[time.Time](): + setEncoding(&DeltaBinaryPacked) + default: + throwInvalidTag(t, name, option) + } + } + + case "split": + switch t.Kind() { + case reflect.Float32, reflect.Float64: + setEncoding(&ByteStreamSplit) + default: + throwInvalidTag(t, name, option) + } + + case "list": + switch t.Kind() { + case reflect.Slice: + if t == reflect.TypeFor[json.RawMessage]() { + throwInvalidTag(t, name, option) + } + element := makeNodeOf(append(path, "list", "element"), t.Elem(), t.Name(), tags.getListElementNodeTags(), tagReplacements) + setNode(element) + setList() + default: + throwInvalidTag(t, name, option) + } + + case "enum": + switch t.Kind() { + case reflect.String: + setNode(Enum()) + default: + throwInvalidTag(t, name, option) + } + + case "uuid": + switch t.Kind() { + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 || t.Len() != 16 { + throwInvalidTag(t, name, option) + } + setNode(UUID()) + case reflect.String: + setNode(UUID()) + default: + throwInvalidTag(t, name, option) + } + + case "decimal": + scale, precision, err := parseDecimalArgs(args) + if err != nil { + throwInvalidTag(t, name, option+args) + } + var baseType Type + switch t.Kind() { + case reflect.Int32: + baseType = Int32Type + case reflect.Int64: + baseType = Int64Type + case reflect.Array, reflect.Slice: + baseType = FixedLenByteArrayType(decimalFixedLenByteArraySize(precision)) + default: + throwInvalidTag(t, name, option) + } + + setNode(Decimal(scale, precision, baseType)) + + case "string": + switch { + case t.Kind() == reflect.String: + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + default: + throwInvalidTag(t, name, option) + } + setNode(String()) + + case "bytes": + switch { + case t.Kind() == reflect.String: + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + default: + throwInvalidTag(t, name, option) + } + setNode(Leaf(ByteArrayType)) + + case "date": + switch t.Kind() { + case reflect.Int32: + setNode(Date()) + case reflect.Ptr: + // Support *time.Time with date tag + if t.Elem() == reflect.TypeFor[time.Time]() { + setNode(Optional(Date())) + } else { + throwInvalidTag(t, name, option) + } + default: + switch t { + case reflect.TypeFor[time.Time](): + setNode(Date()) + default: + throwInvalidTag(t, name, option) + } + } + + case "time": + // Handle time.Duration specially - it can use any time unit + // (millisecond, microsecond, or nanosecond) and TimeAdjusted() + // will automatically select the correct physical type + if t == reflect.TypeFor[time.Duration]() { + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil { + throwInvalidTag(t, name, option+args) + } + // If no args provided, default to nanosecond for time.Duration + if args == "()" { + timeUnit = Nanosecond + adjusted = true + } + setNode(TimeAdjusted(timeUnit, adjusted)) + } else { + switch t.Kind() { + case reflect.Int32: + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil || timeUnit.Duration() < time.Millisecond { + throwInvalidTag(t, name, option+args) + } + setNode(TimeAdjusted(timeUnit, adjusted)) + case reflect.Int64: + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil || timeUnit.Duration() == time.Millisecond { + throwInvalidTag(t, name, option+args) + } + setNode(TimeAdjusted(timeUnit, adjusted)) + default: + throwInvalidTag(t, name, option) + } + } + + case "timestamp": + switch t.Kind() { + case reflect.Int64: + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil { + throwInvalidTag(t, name, option+args) + } + setNode(TimestampAdjusted(timeUnit, adjusted)) + case reflect.Ptr: + // Support *time.Time with timestamp tags + if t.Elem() == reflect.TypeFor[time.Time]() { + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil { + throwInvalidTag(t, name, option+args) + } + // Wrap in Optional for schema correctness (nil pointers = NULL values) + setNode(Optional(TimestampAdjusted(timeUnit, adjusted))) + } else { + throwInvalidTag(t, name, option) + } + default: + switch t { + case reflect.TypeFor[time.Time](): + timeUnit, adjusted, err := parseTimestampArgs(args) + if err != nil { + throwInvalidTag(t, name, option+args) + } + setNode(TimestampAdjusted(timeUnit, adjusted)) + default: + throwInvalidTag(t, name, option) + } + } + + case "id": + id, err := parseIDArgs(args) + if err != nil { + throwInvalidNode(t, "struct field has field id that is not a valid int", name, tags) + } + fieldID = id + } + }) + } + + // Special case: an "optional" struct tag on a slice applies to the + // individual items, not the overall list. The least messy way to + // deal with this is at this level, instead of passing down optional + // information into the nodeOf function, and then passing back whether an + // optional tag was applied. + if node == nil && t.Kind() == reflect.Slice { + isUint8 := t.Elem().Kind() == reflect.Uint8 + // Note for strings "optional" applies only to the entire BYTE_ARRAY and + // not each individual byte. + if optional && !isUint8 { + node = Repeated(Optional(nodeOf(path, t.Elem(), tags, tagReplacements))) + // Don't also apply "optional" to the whole list. + optional = false + } + } + + if node == nil { + node = nodeOf(path, t, tags, tagReplacements) + } + + if compressed != nil { + node = Compressed(node, compressed) + } + + if encoded != nil { + node = Encoded(node, encoded) + } + + if list { + node = List(node) + } + + if node.Repeated() && !list { + repeated := node.GoType().Elem() + if repeated.Kind() == reflect.Slice { + // Special case: allow [][]uint8 as seen in a logical map of strings + if repeated.Elem().Kind() != reflect.Uint8 { + panic("unhandled nested slice on parquet schema without list tag") + } + } + } + + if optional { + node = Optional(node) + } + if fieldID != 0 { + node = FieldID(node, fieldID) + } + return node +} + +func forEachTagOption(tags []string, do func(option, args string)) { + for _, tag := range tags { + _, tag = split(tag) // skip the field name + for tag != "" { + option := "" + option, tag = split(tag) + var args string + option, args = splitOptionArgs(option) + do(option, args) + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/search.go b/vendor/github.com/parquet-go/parquet-go/search.go new file mode 100644 index 00000000000..49c5eb22ae2 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/search.go @@ -0,0 +1,95 @@ +package parquet + +// Search is like Find, but uses the default ordering of the given type. Search +// and Find are scoped to a given ColumnChunk and find the pages within a +// ColumnChunk which might contain the result. See Find for more details. +func Search(index ColumnIndex, value Value, typ Type) int { + return Find(index, value, CompareNullsLast(typ.Compare)) +} + +// Find uses the ColumnIndex passed as argument to find the page in a column +// chunk (determined by the given ColumnIndex) that the given value is expected +// to be found in. +// +// The function returns the index of the first page that might contain the +// value. If the function determines that the value does not exist in the +// index, NumPages is returned. +// +// If you want to search the entire parquet file, you must iterate over the +// RowGroups and search each one individually, if there are multiple in the +// file. If you call writer.Flush before closing the file, then you will have +// multiple RowGroups to iterate over, otherwise Flush is called once on Close. +// +// The comparison function passed as last argument is used to determine the +// relative order of values. This should generally be the Compare method of +// the column type, but can sometimes be customized to modify how null values +// are interpreted, for example: +// +// pageIndex := parquet.Find(columnIndex, value, +// parquet.CompareNullsFirst(typ.Compare), +// ) +func Find(index ColumnIndex, value Value, cmp func(Value, Value) int) int { + switch { + case index.IsAscending(): + return binarySearch(index, value, cmp) + default: + return linearSearch(index, value, cmp) + } +} + +func binarySearch(index ColumnIndex, value Value, cmp func(Value, Value) int) int { + numPages := index.NumPages() + topIndex := numPages + currentIndex := 0 + + // while there's at least one more page to check + for currentIndex < topIndex { + // nextIndex is set to halfway between currentIndex and topIndex + nextIndex := ((topIndex - currentIndex) / 2) + currentIndex + + // Compare against both min and max to handle overlapping page bounds. + // When page bounds overlap due to truncation, we need to search left + // to find the first page that might contain the value. + switch { + case cmp(value, index.MinValue(nextIndex)) < 0: + // value < min: can't be in this page or any after it + topIndex = nextIndex + case cmp(value, index.MaxValue(nextIndex)) > 0: + // value > max: can't be in this page or any before it (including nextIndex) + currentIndex = nextIndex + 1 + default: + // min <= value <= max: value might be in this page or an earlier one + // with overlapping bounds, so search left to find the first occurrence + topIndex = nextIndex + } + } + + // After the loop, currentIndex == topIndex points to the candidate page. + // Verify the value is actually within the page bounds. + if currentIndex < numPages { + minValue := index.MinValue(currentIndex) + maxValue := index.MaxValue(currentIndex) + + // If value is not in pages[currentIndex], then it's not in this columnChunk + if cmp(value, minValue) < 0 || cmp(value, maxValue) > 0 { + return numPages + } + } + + return currentIndex +} + +func linearSearch(index ColumnIndex, value Value, cmp func(Value, Value) int) int { + n := index.NumPages() + + for i := range n { + min := index.MinValue(i) + max := index.MaxValue(i) + + if cmp(min, value) <= 0 && cmp(value, max) <= 0 { + return i + } + } + + return n +} diff --git a/vendor/github.com/parquet-go/parquet-go/sorting.go b/vendor/github.com/parquet-go/parquet-go/sorting.go new file mode 100644 index 00000000000..e4087b6d316 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sorting.go @@ -0,0 +1,270 @@ +package parquet + +import ( + "io" + "slices" + "sort" +) + +// SortingWriter is a type similar to GenericWriter but it ensures that rows +// are sorted according to the sorting columns configured on the writer. +// +// The writer accumulates rows in an in-memory buffer which is sorted when it +// reaches the target number of rows, then written to a temporary row group. +// When the writer is flushed or closed, the temporary row groups are merged +// into a row group in the output file, ensuring that rows remain sorted in the +// final row group. +// +// Because row groups get encoded and compressed, they hold a lot less memory +// than if all rows were retained in memory. Sorting then merging rows chunks +// also tends to be a lot more efficient than sorting all rows in memory as it +// results in better CPU cache utilization since sorting multi-megabyte arrays +// causes a lot of cache misses since the data set cannot be held in CPU caches. +type SortingWriter[T any] struct { + rowbuf *RowBuffer[T] + writer *GenericWriter[T] + output *GenericWriter[T] + buffer io.ReadWriteSeeker + maxRows int64 + numRows int64 + sorting SortingConfig + dedupe dedupe +} + +// NewSortingWriter constructs a new sorting writer which writes a parquet file +// where rows of each row group are ordered according to the sorting columns +// configured on the writer. +// +// The sortRowCount argument defines the target number of rows that will be +// sorted in memory before being written to temporary row groups. The greater +// this value the more memory is needed to buffer rows in memory. Choosing a +// value that is too small limits the maximum number of rows that can exist in +// the output file since the writer cannot create more than 32K temporary row +// groups to hold the sorted row chunks. +func NewSortingWriter[T any](output io.Writer, sortRowCount int64, options ...WriterOption) *SortingWriter[T] { + config, err := NewWriterConfig(options...) + if err != nil { + panic(err) + } + return &SortingWriter[T]{ + rowbuf: NewRowBuffer[T](&RowGroupConfig{ + Schema: config.Schema, + Sorting: config.Sorting, + }), + writer: NewGenericWriter[T](io.Discard, &WriterConfig{ + CreatedBy: config.CreatedBy, + ColumnPageBuffers: config.ColumnPageBuffers, + ColumnIndexSizeLimit: config.ColumnIndexSizeLimit, + PageBufferSize: config.PageBufferSize, + WriteBufferSize: config.WriteBufferSize, + DataPageVersion: config.DataPageVersion, + Schema: config.Schema, + Compression: config.Compression, + Sorting: config.Sorting, + Encodings: config.Encodings, + }), + output: NewGenericWriter[T](output, config), + maxRows: sortRowCount, + sorting: config.Sorting, + } +} + +func (w *SortingWriter[T]) Close() error { + if err := w.Flush(); err != nil { + return err + } + return w.output.Close() +} + +func (w *SortingWriter[T]) Flush() error { + defer w.resetSortingBuffer() + + if err := w.sortAndWriteBufferedRows(); err != nil { + return err + } + + if w.numRows == 0 { + return nil + } + + if err := w.writer.Close(); err != nil { + return err + } + + size, err := w.buffer.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + + f, err := OpenFile(newReaderAt(w.buffer), size, + &FileConfig{ + SkipPageIndex: true, + SkipBloomFilters: true, + ReadBufferSize: defaultReadBufferSize, + }, + ) + if err != nil { + return err + } + + m, err := MergeRowGroups(f.RowGroups(), + &RowGroupConfig{ + Schema: w.Schema(), + Sorting: w.sorting, + }, + ) + if err != nil { + return err + } + + rows := m.Rows() + defer rows.Close() + + reader := RowReader(rows) + if w.sorting.DropDuplicatedRows { + reader = DedupeRowReader(rows, w.rowbuf.compare) + } + + if _, err := CopyRows(w.output, reader); err != nil { + return err + } + + return w.output.Flush() +} + +func (w *SortingWriter[T]) Reset(output io.Writer) { + w.output.Reset(output) + w.rowbuf.Reset() + w.resetSortingBuffer() +} + +func (w *SortingWriter[T]) resetSortingBuffer() { + w.writer.Reset(io.Discard) + w.numRows = 0 + + if w.buffer != nil { + w.sorting.SortingBuffers.PutBuffer(w.buffer) + w.buffer = nil + } +} + +func (w *SortingWriter[T]) Write(rows []T) (int, error) { + return w.writeRows(len(rows), func(i, j int) (int, error) { return w.rowbuf.Write(rows[i:j]) }) +} + +func (w *SortingWriter[T]) WriteRows(rows []Row) (int, error) { + return w.writeRows(len(rows), func(i, j int) (int, error) { return w.rowbuf.WriteRows(rows[i:j]) }) +} + +func (w *SortingWriter[T]) writeRows(numRows int, writeRows func(i, j int) (int, error)) (int, error) { + wn := 0 + + for wn < numRows { + if w.rowbuf.NumRows() >= w.maxRows { + if err := w.sortAndWriteBufferedRows(); err != nil { + return wn, err + } + } + + n := int(w.maxRows - w.rowbuf.NumRows()) + n += wn + if n > numRows { + n = numRows + } + + n, err := writeRows(wn, n) + wn += n + + if err != nil { + return wn, err + } + } + + return wn, nil +} + +func (w *SortingWriter[T]) SetKeyValueMetadata(key, value string) { + w.output.SetKeyValueMetadata(key, value) +} + +func (w *SortingWriter[T]) Schema() *Schema { + return w.output.Schema() +} + +func (w *SortingWriter[T]) sortAndWriteBufferedRows() error { + if w.rowbuf.Len() == 0 { + return nil + } + + defer w.rowbuf.Reset() + sort.Sort(w.rowbuf) + + if w.sorting.DropDuplicatedRows { + w.rowbuf.rows = w.rowbuf.rows[:w.dedupe.deduplicate(w.rowbuf.rows, w.rowbuf.compare)] + defer w.dedupe.reset() + } + + rows := w.rowbuf.Rows() + defer rows.Close() + + if w.buffer == nil { + w.buffer = w.sorting.SortingBuffers.GetBuffer() + w.writer.Reset(w.buffer) + } + + n, err := CopyRows(w.writer, rows) + if err != nil { + return err + } + + if err := w.writer.Flush(); err != nil { + return err + } + + w.numRows += n + return nil +} + +// File returns a FileView of the written parquet file. +// Only available after Close is called. +func (w *SortingWriter[T]) File() FileView { + return w.output.File() +} + +// EqualSortingColumns compares two slices of sorting columns for equality. +// +// Two sorting column slices are considered equal if they have the same length +// and each corresponding pair of sorting columns is equal. Two sorting columns +// are equal if they have: +// - The same column path (including nested field paths) +// - The same sort direction (ascending or descending) +// - The same nulls handling (nulls first or nulls last) +// +// The comparison is order-sensitive, meaning that [A, B] is not equal to [B, A]. +// Both nil and empty slices are considered equal. +// +// This function is useful for: +// - Validating that merged row groups maintain expected sorting +// - Comparing sorting configurations between different row groups +// - Testing sorting column propagation in merge operations +// +// Example: +// +// cols1 := []SortingColumn{Ascending("name"), Descending("age")} +// cols2 := []SortingColumn{Ascending("name"), Descending("age")} +// equal := EqualSortingColumns(cols1, cols2) // returns true +// +// cols3 := []SortingColumn{Descending("age"), Ascending("name")} +// equal = EqualSortingColumns(cols1, cols3) // returns false (different order) +// +// cols4 := []SortingColumn{Ascending("name"), Ascending("age")} +// equal = EqualSortingColumns(cols1, cols4) // returns false (different direction) +func EqualSortingColumns(a, b []SortingColumn) bool { + return len(a) == len(b) && slices.EqualFunc(a, b, equalSortingColumn) +} + +// equalSortingColumn compares two individual sorting columns for equality. +// Two sorting columns are equal if they have the same path, direction, and nulls handling. +func equalSortingColumn(a, b SortingColumn) bool { + return slices.Equal(a.Path(), b.Path()) && a.Descending() == b.Descending() && a.NullsFirst() == b.NullsFirst() +} diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/array.go b/vendor/github.com/parquet-go/parquet-go/sparse/array.go new file mode 100644 index 00000000000..fecfb4dc4d7 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/array.go @@ -0,0 +1,320 @@ +package sparse + +import ( + "time" + "unsafe" +) + +type Array struct{ array } + +func UnsafeArray(base unsafe.Pointer, length int, offset uintptr) Array { + return Array{unsafeArray(base, length, offset)} +} + +func (a Array) Len() int { return int(a.len) } +func (a Array) Index(i int) unsafe.Pointer { return a.index(i) } +func (a Array) Slice(i, j int) Array { return Array{a.slice(i, j)} } +func (a Array) Offset(off uintptr) Array { return Array{a.offset(off)} } +func (a Array) BoolArray() BoolArray { return BoolArray{a.array} } +func (a Array) Int8Array() Int8Array { return Int8Array{a.array} } +func (a Array) Int16Array() Int16Array { return Int16Array{a.array} } +func (a Array) Int32Array() Int32Array { return Int32Array{a.array} } +func (a Array) Int64Array() Int64Array { return Int64Array{a.array} } +func (a Array) Float32Array() Float32Array { return Float32Array{a.array} } +func (a Array) Float64Array() Float64Array { return Float64Array{a.array} } +func (a Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Array) Uint64Array() Uint64Array { return Uint64Array{a.array} } +func (a Array) Uint128Array() Uint128Array { return Uint128Array{a.array} } +func (a Array) StringArray() StringArray { return StringArray{a.array} } +func (a Array) TimeArray() TimeArray { return TimeArray{a.array} } + +type array struct { + ptr unsafe.Pointer + len uintptr + off uintptr +} + +func makeArray[T any](base []T) array { + var z T + return array{ + ptr: unsafe.Pointer(unsafe.SliceData(base)), + len: uintptr(len(base)), + off: unsafe.Sizeof(z), + } +} + +func unsafeArray(base unsafe.Pointer, length int, offset uintptr) array { + return array{ptr: base, len: uintptr(length), off: offset} +} + +func (a array) index(i int) unsafe.Pointer { + if uintptr(i) >= a.len { + panic("index out of bounds") + } + return unsafe.Add(a.ptr, a.off*uintptr(i)) +} + +func (a array) slice(i, j int) array { + if uintptr(i) > a.len || uintptr(j) > a.len || i > j { + panic("slice index out of bounds") + } + return array{ + ptr: unsafe.Add(a.ptr, a.off*uintptr(i)), + len: uintptr(j - i), + off: a.off, + } +} + +func (a array) offset(off uintptr) array { + if a.ptr == nil { + panic("offset of nil array") + } + return array{ + ptr: unsafe.Add(a.ptr, off), + len: a.len, + off: a.off, + } +} + +type BoolArray struct{ array } + +func MakeBoolArray(values []bool) BoolArray { + return BoolArray{makeArray(values)} +} + +func UnsafeBoolArray(base unsafe.Pointer, length int, offset uintptr) BoolArray { + return BoolArray{unsafeArray(base, length, offset)} +} + +func (a BoolArray) Len() int { return int(a.len) } +func (a BoolArray) Index(i int) bool { return *(*byte)(a.index(i)) != 0 } +func (a BoolArray) Slice(i, j int) BoolArray { return BoolArray{a.slice(i, j)} } +func (a BoolArray) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a BoolArray) UnsafeArray() Array { return Array{a.array} } + +type Int8Array struct{ array } + +func MakeInt8Array(values []int8) Int8Array { + return Int8Array{makeArray(values)} +} + +func UnsafeInt8Array(base unsafe.Pointer, length int, offset uintptr) Int8Array { + return Int8Array{unsafeArray(base, length, offset)} +} + +func (a Int8Array) Len() int { return int(a.len) } +func (a Int8Array) Index(i int) int8 { return *(*int8)(a.index(i)) } +func (a Int8Array) Slice(i, j int) Int8Array { return Int8Array{a.slice(i, j)} } +func (a Int8Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Int8Array) UnsafeArray() Array { return Array{a.array} } + +type Int16Array struct{ array } + +func MakeInt16Array(values []int16) Int16Array { + return Int16Array{makeArray(values)} +} + +func UnsafeInt16Array(base unsafe.Pointer, length int, offset uintptr) Int16Array { + return Int16Array{unsafeArray(base, length, offset)} +} + +func (a Int16Array) Len() int { return int(a.len) } +func (a Int16Array) Index(i int) int16 { return *(*int16)(a.index(i)) } +func (a Int16Array) Slice(i, j int) Int16Array { return Int16Array{a.slice(i, j)} } +func (a Int16Array) Int8Array() Int8Array { return Int8Array{a.array} } +func (a Int16Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Int16Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Int16Array) UnsafeArray() Array { return Array{a.array} } + +type Int32Array struct{ array } + +func MakeInt32Array(values []int32) Int32Array { + return Int32Array{makeArray(values)} +} + +func UnsafeInt32Array(base unsafe.Pointer, length int, offset uintptr) Int32Array { + return Int32Array{unsafeArray(base, length, offset)} +} + +func (a Int32Array) Len() int { return int(a.len) } +func (a Int32Array) Index(i int) int32 { return *(*int32)(a.index(i)) } +func (a Int32Array) Slice(i, j int) Int32Array { return Int32Array{a.slice(i, j)} } +func (a Int32Array) Int8Array() Int8Array { return Int8Array{a.array} } +func (a Int32Array) Int16Array() Int16Array { return Int16Array{a.array} } +func (a Int32Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Int32Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Int32Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Int32Array) UnsafeArray() Array { return Array{a.array} } + +type Int64Array struct{ array } + +func MakeInt64Array(values []int64) Int64Array { + return Int64Array{makeArray(values)} +} + +func UnsafeInt64Array(base unsafe.Pointer, length int, offset uintptr) Int64Array { + return Int64Array{unsafeArray(base, length, offset)} +} + +func (a Int64Array) Len() int { return int(a.len) } +func (a Int64Array) Index(i int) int64 { return *(*int64)(a.index(i)) } +func (a Int64Array) Slice(i, j int) Int64Array { return Int64Array{a.slice(i, j)} } +func (a Int64Array) Int8Array() Int8Array { return Int8Array{a.array} } +func (a Int64Array) Int16Array() Int16Array { return Int16Array{a.array} } +func (a Int64Array) Int32Array() Int32Array { return Int32Array{a.array} } +func (a Int64Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Int64Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Int64Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Int64Array) Uint64Array() Uint64Array { return Uint64Array{a.array} } +func (a Int64Array) UnsafeArray() Array { return Array{a.array} } + +type Float32Array struct{ array } + +func MakeFloat32Array(values []float32) Float32Array { + return Float32Array{makeArray(values)} +} + +func UnsafeFloat32Array(base unsafe.Pointer, length int, offset uintptr) Float32Array { + return Float32Array{unsafeArray(base, length, offset)} +} + +func (a Float32Array) Len() int { return int(a.len) } +func (a Float32Array) Index(i int) float32 { return *(*float32)(a.index(i)) } +func (a Float32Array) Slice(i, j int) Float32Array { return Float32Array{a.slice(i, j)} } +func (a Float32Array) Array() Array { return Array{a.array} } +func (a Float32Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Float32Array) UnsafeArray() Array { return Array{a.array} } + +type Float64Array struct{ array } + +func MakeFloat64Array(values []float64) Float64Array { + return Float64Array{makeArray(values)} +} + +func UnsafeFloat64Array(base unsafe.Pointer, length int, offset uintptr) Float64Array { + return Float64Array{unsafeArray(base, length, offset)} +} + +func (a Float64Array) Len() int { return int(a.len) } +func (a Float64Array) Index(i int) float64 { return *(*float64)(a.index(i)) } +func (a Float64Array) Slice(i, j int) Float64Array { return Float64Array{a.slice(i, j)} } +func (a Float64Array) Uint64Array() Uint64Array { return Uint64Array{a.array} } +func (a Float64Array) UnsafeArray() Array { return Array{a.array} } + +type Uint8Array struct{ array } + +func MakeUint8Array(values []uint8) Uint8Array { + return Uint8Array{makeArray(values)} +} + +func UnsafeUint8Array(base unsafe.Pointer, length int, offset uintptr) Uint8Array { + return Uint8Array{unsafeArray(base, length, offset)} +} + +func (a Uint8Array) Len() int { return int(a.len) } +func (a Uint8Array) Index(i int) uint8 { return *(*uint8)(a.index(i)) } +func (a Uint8Array) Slice(i, j int) Uint8Array { return Uint8Array{a.slice(i, j)} } +func (a Uint8Array) UnsafeArray() Array { return Array{a.array} } + +type Uint16Array struct{ array } + +func MakeUint16Array(values []uint16) Uint16Array { + return Uint16Array{makeArray(values)} +} + +func UnsafeUint16Array(base unsafe.Pointer, length int, offset uintptr) Uint16Array { + return Uint16Array{unsafeArray(base, length, offset)} +} + +func (a Uint16Array) Len() int { return int(a.len) } +func (a Uint16Array) Index(i int) uint16 { return *(*uint16)(a.index(i)) } +func (a Uint16Array) Slice(i, j int) Uint16Array { return Uint16Array{a.slice(i, j)} } +func (a Uint16Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Uint16Array) UnsafeArray() Array { return Array{a.array} } + +type Uint32Array struct{ array } + +func MakeUint32Array(values []uint32) Uint32Array { + return Uint32Array{makeArray(values)} +} + +func UnsafeUint32Array(base unsafe.Pointer, length int, offset uintptr) Uint32Array { + return Uint32Array{unsafeArray(base, length, offset)} +} + +func (a Uint32Array) Len() int { return int(a.len) } +func (a Uint32Array) Index(i int) uint32 { return *(*uint32)(a.index(i)) } +func (a Uint32Array) Slice(i, j int) Uint32Array { return Uint32Array{a.slice(i, j)} } +func (a Uint32Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Uint32Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Uint32Array) UnsafeArray() Array { return Array{a.array} } + +type Uint64Array struct{ array } + +func MakeUint64Array(values []uint64) Uint64Array { + return Uint64Array{makeArray(values)} +} + +func UnsafeUint64Array(base unsafe.Pointer, length int, offset uintptr) Uint64Array { + return Uint64Array{unsafeArray(base, length, offset)} +} + +func (a Uint64Array) Len() int { return int(a.len) } +func (a Uint64Array) Index(i int) uint64 { return *(*uint64)(a.index(i)) } +func (a Uint64Array) Slice(i, j int) Uint64Array { return Uint64Array{a.slice(i, j)} } +func (a Uint64Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Uint64Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Uint64Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Uint64Array) UnsafeArray() Array { return Array{a.array} } + +type Uint128Array struct{ array } + +func MakeUint128Array(values [][16]byte) Uint128Array { + return Uint128Array{makeArray(values)} +} + +func UnsafeUint128Array(base unsafe.Pointer, length int, offset uintptr) Uint128Array { + return Uint128Array{unsafeArray(base, length, offset)} +} + +func (a Uint128Array) Len() int { return int(a.len) } +func (a Uint128Array) Index(i int) [16]byte { return *(*[16]byte)(a.index(i)) } +func (a Uint128Array) Slice(i, j int) Uint128Array { return Uint128Array{a.slice(i, j)} } +func (a Uint128Array) Uint8Array() Uint8Array { return Uint8Array{a.array} } +func (a Uint128Array) Uint16Array() Uint16Array { return Uint16Array{a.array} } +func (a Uint128Array) Uint32Array() Uint32Array { return Uint32Array{a.array} } +func (a Uint128Array) Uint64Array() Uint64Array { return Uint64Array{a.array} } +func (a Uint128Array) UnsafeArray() Array { return Array{a.array} } + +type StringArray struct{ array } + +func MakeStringArray(values []string) StringArray { + const sizeOfString = unsafe.Sizeof("") + return StringArray{makeArray(values)} +} + +func UnsafeStringArray(base unsafe.Pointer, length int, offset uintptr) StringArray { + return StringArray{unsafeArray(base, length, offset)} +} + +func (a StringArray) Len() int { return int(a.len) } +func (a StringArray) Index(i int) string { return *(*string)(a.index(i)) } +func (a StringArray) Slice(i, j int) StringArray { return StringArray{a.slice(i, j)} } +func (a StringArray) UnsafeArray() Array { return Array{a.array} } + +type TimeArray struct{ array } + +func MakeTimeArray(values []time.Time) TimeArray { + return TimeArray{makeArray(values)} +} + +func UnsafeTimeArray(base unsafe.Pointer, length int, offset uintptr) TimeArray { + return TimeArray{unsafeArray(base, length, offset)} +} + +func (a TimeArray) Len() int { return int(a.len) } +func (a TimeArray) Index(i int) time.Time { return *(*time.Time)(a.index(i)) } +func (a TimeArray) Slice(i, j int) TimeArray { return TimeArray{a.slice(i, j)} } +func (a TimeArray) UnsafeArray() Array { return Array{a.array} } diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/gather.go b/vendor/github.com/parquet-go/parquet-go/sparse/gather.go new file mode 100644 index 00000000000..5ead58aa22c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/gather.go @@ -0,0 +1,37 @@ +package sparse + +import "github.com/parquet-go/bitpack/unsafecast" + +func GatherInt32(dst []int32, src Int32Array) int { + return GatherUint32(unsafecast.Slice[uint32](dst), src.Uint32Array()) +} + +func GatherInt64(dst []int64, src Int64Array) int { + return GatherUint64(unsafecast.Slice[uint64](dst), src.Uint64Array()) +} + +func GatherFloat32(dst []float32, src Float32Array) int { + return GatherUint32(unsafecast.Slice[uint32](dst), src.Uint32Array()) +} + +func GatherFloat64(dst []float64, src Float64Array) int { + return GatherUint64(unsafecast.Slice[uint64](dst), src.Uint64Array()) +} + +func GatherBits(dst []byte, src Uint8Array) int { return gatherBits(dst, src) } + +func GatherUint32(dst []uint32, src Uint32Array) int { return gather32(dst, src) } + +func GatherUint64(dst []uint64, src Uint64Array) int { return gather64(dst, src) } + +func GatherUint128(dst [][16]byte, src Uint128Array) int { return gather128(dst, src) } + +func GatherString(dst []string, src StringArray) int { + n := min(len(dst), src.Len()) + + for i := range dst[:n] { + dst[i] = src.Index(i) + } + + return n +} diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.go b/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.go new file mode 100644 index 00000000000..427abbccbf5 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.go @@ -0,0 +1,85 @@ +//go:build !purego + +package sparse + +import ( + "golang.org/x/sys/cpu" +) + +func gatherBits(dst []byte, src Uint8Array) int { + n := min(len(dst)*8, src.Len()) + i := 0 + + if n >= 8 { + i = (n / 8) * 8 + // Make sure `offset` is at least 4 bytes, otherwise VPGATHERDD may read + // data beyond the end of the program memory and trigger a fault. + // + // If the boolean values do not have enough padding we must fallback to + // the scalar algorithm to be able to load single bytes from memory. + if src.off >= 4 && cpu.X86.HasAVX2 { + gatherBitsAVX2(dst, src.Slice(0, i)) + } else { + gatherBitsDefault(dst, src.Slice(0, i)) + } + } + + for i < n { + x := i / 8 + y := i % 8 + b := src.Index(i) + dst[x] = ((b & 1) << y) | (dst[x] & ^(1 << y)) + i++ + } + + return n +} + +func gather32(dst []uint32, src Uint32Array) int { + n := min(len(dst), src.Len()) + i := 0 + + if n >= 16 && cpu.X86.HasAVX2 { + i = (n / 8) * 8 + gather32AVX2(dst[:i:i], src) + } + + for i < n { + dst[i] = src.Index(i) + i++ + } + + return n +} + +func gather64(dst []uint64, src Uint64Array) int { + n := min(len(dst), src.Len()) + i := 0 + + if n >= 8 && cpu.X86.HasAVX2 { + i = (n / 4) * 4 + gather64AVX2(dst[:i:i], src) + } + + for i < n { + dst[i] = src.Index(i) + i++ + } + + return n +} + +//go:noescape +func gatherBitsAVX2(dst []byte, src Uint8Array) + +//go:noescape +func gatherBitsDefault(dst []byte, src Uint8Array) + +//go:noescape +func gather32AVX2(dst []uint32, src Uint32Array) + +//go:noescape +func gather64AVX2(dst []uint64, src Uint64Array) + +//go:noescape +func gather128(dst [][16]byte, src Uint128Array) int diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.s b/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.s new file mode 100644 index 00000000000..e87dee82af4 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/gather_amd64.s @@ -0,0 +1,193 @@ +//go:build !purego + +#include "textflag.h" + +// func gatherBitsAVX2(dst []byte, src Uint8Array) +TEXT ·gatherBitsAVX2(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ src_array_ptr+24(FP), BX + MOVQ src_array_len+32(FP), CX + MOVQ src_array_off+40(FP), DX + XORQ SI, SI + SHRQ $3, CX + + VPBROADCASTD src_array_off+40(FP), Y0 + VPMULLD range0n7<>(SB), Y0, Y0 + VPCMPEQD Y1, Y1, Y1 + VPCMPEQD Y2, Y2, Y2 +loop: + VPGATHERDD Y1, (BX)(Y0*1), Y3 + VMOVDQU Y2, Y1 + VPSLLD $31, Y3, Y3 + VMOVMSKPS Y3, DI + + MOVB DI, (AX)(SI*1) + + LEAQ (BX)(DX*8), BX + INCQ SI + CMPQ SI, CX + JNE loop + VZEROUPPER + RET + +// func gatherBitsDefault(dst []byte, src Uint8Array) +TEXT ·gatherBitsDefault(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ src_array_ptr+24(FP), BX + MOVQ src_array_len+32(FP), CX + MOVQ src_array_off+40(FP), DX + XORQ SI, SI + SHRQ $3, CX +loop: + LEAQ (BX)(DX*2), DI + MOVBQZX (BX), R8 + MOVBQZX (BX)(DX*1), R9 + MOVBQZX (DI), R10 + MOVBQZX (DI)(DX*1), R11 + LEAQ (BX)(DX*4), BX + LEAQ (DI)(DX*4), DI + MOVBQZX (BX), R12 + MOVBQZX (BX)(DX*1), R13 + MOVBQZX (DI), R14 + MOVBQZX (DI)(DX*1), R15 + LEAQ (BX)(DX*4), BX + + ANDQ $1, R8 + ANDQ $1, R9 + ANDQ $1, R10 + ANDQ $1, R11 + ANDQ $1, R12 + ANDQ $1, R13 + ANDQ $1, R14 + ANDQ $1, R15 + + SHLQ $1, R9 + SHLQ $2, R10 + SHLQ $3, R11 + SHLQ $4, R12 + SHLQ $5, R13 + SHLQ $6, R14 + SHLQ $7, R15 + + ORQ R9, R8 + ORQ R11, R10 + ORQ R13, R12 + ORQ R15, R14 + ORQ R10, R8 + ORQ R12, R8 + ORQ R14, R8 + + MOVB R8, (AX)(SI*1) + + INCQ SI + CMPQ SI, CX + JNE loop + RET + +// func gather32AVX2(dst []uint32, src Uint32Array) +TEXT ·gather32AVX2(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_array_ptr+24(FP), BX + MOVQ src_array_off+40(FP), DX + XORQ SI, SI + + VPBROADCASTD src_array_off+40(FP), Y0 + VPMULLD range0n7<>(SB), Y0, Y0 + VPCMPEQD Y1, Y1, Y1 + VPCMPEQD Y2, Y2, Y2 +loop: + VPGATHERDD Y1, (BX)(Y0*1), Y3 + VMOVDQU Y3, (AX)(SI*4) + VMOVDQU Y2, Y1 + + LEAQ (BX)(DX*8), BX + ADDQ $8, SI + CMPQ SI, CX + JNE loop + VZEROUPPER + RET + +// func gather64AVX2(dst []uint64, src Uint64Array) +TEXT ·gather64AVX2(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_array_ptr+24(FP), BX + MOVQ src_array_off+40(FP), DX + XORQ SI, SI + + VPBROADCASTQ src_array_off+40(FP), Y0 + VPMULLD range0n3<>(SB), Y0, Y0 + VPCMPEQQ Y1, Y1, Y1 + VPCMPEQQ Y2, Y2, Y2 +loop: + VPGATHERQQ Y1, (BX)(Y0*1), Y3 + VMOVDQU Y3, (AX)(SI*8) + VMOVDQU Y2, Y1 + + LEAQ (BX)(DX*4), BX + ADDQ $4, SI + CMPQ SI, CX + JNE loop + VZEROUPPER + RET + +// func gather128(dst [][16]byte, src Uint128Array) int +TEXT ·gather128(SB), NOSPLIT, $0-56 + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_array_ptr+24(FP), BX + MOVQ src_array_len+32(FP), DI + MOVQ src_array_off+40(FP), DX + XORQ SI, SI + + CMPQ DI, CX + CMOVQLT DI, CX + + CMPQ CX, $0 + JE done + + CMPQ CX, $1 + JE tail + + XORQ SI, SI + MOVQ CX, DI + SHRQ $1, DI + SHLQ $1, DI +loop: + MOVOU (BX), X0 + MOVOU (BX)(DX*1), X1 + + MOVOU X0, (AX) + MOVOU X1, 16(AX) + + LEAQ (BX)(DX*2), BX + ADDQ $32, AX + ADDQ $2, SI + CMPQ SI, DI + JNE loop + + CMPQ SI, CX + JE done +tail: + MOVOU (BX), X0 + MOVOU X0, (AX) +done: + MOVQ CX, ret+48(FP) + RET + +GLOBL range0n3<>(SB), RODATA|NOPTR, $32 +DATA range0n3<>+0(SB)/8, $0 +DATA range0n3<>+8(SB)/8, $1 +DATA range0n3<>+16(SB)/8, $2 +DATA range0n3<>+24(SB)/8, $3 + +GLOBL range0n7<>(SB), RODATA|NOPTR, $32 +DATA range0n7<>+0(SB)/4, $0 +DATA range0n7<>+4(SB)/4, $1 +DATA range0n7<>+8(SB)/4, $2 +DATA range0n7<>+12(SB)/4, $3 +DATA range0n7<>+16(SB)/4, $4 +DATA range0n7<>+20(SB)/4, $5 +DATA range0n7<>+24(SB)/4, $6 +DATA range0n7<>+28(SB)/4, $7 diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/gather_purego.go b/vendor/github.com/parquet-go/parquet-go/sparse/gather_purego.go new file mode 100644 index 00000000000..2f25c4486f0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/gather_purego.go @@ -0,0 +1,72 @@ +//go:build purego || !amd64 + +package sparse + +func gatherBits(dst []byte, src Uint8Array) int { + n := min(len(dst)*8, src.Len()) + i := 0 + + if k := (n / 8) * 8; k > 0 { + for j := 0; i < k; j++ { + b0 := src.Index(i + 0) + b1 := src.Index(i + 1) + b2 := src.Index(i + 2) + b3 := src.Index(i + 3) + b4 := src.Index(i + 4) + b5 := src.Index(i + 5) + b6 := src.Index(i + 6) + b7 := src.Index(i + 7) + + dst[j] = (b0 & 1) | + ((b1 & 1) << 1) | + ((b2 & 1) << 2) | + ((b3 & 1) << 3) | + ((b4 & 1) << 4) | + ((b5 & 1) << 5) | + ((b6 & 1) << 6) | + ((b7 & 1) << 7) + + i += 8 + } + } + + for i < n { + x := i / 8 + y := i % 8 + b := src.Index(i) + dst[x] = ((b & 1) << y) | (dst[x] & ^(1 << y)) + i++ + } + + return n +} + +func gather32(dst []uint32, src Uint32Array) int { + n := min(len(dst), src.Len()) + + for i := range dst[:n] { + dst[i] = src.Index(i) + } + + return n +} + +func gather64(dst []uint64, src Uint64Array) int { + n := min(len(dst), src.Len()) + + for i := range dst[:n] { + dst[i] = src.Index(i) + } + + return n +} + +func gather128(dst [][16]byte, src Uint128Array) int { + n := min(len(dst), src.Len()) + + for i := range dst[:n] { + dst[i] = src.Index(i) + } + + return n +} diff --git a/vendor/github.com/parquet-go/parquet-go/sparse/sparse.go b/vendor/github.com/parquet-go/parquet-go/sparse/sparse.go new file mode 100644 index 00000000000..b2680bf554b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/sparse/sparse.go @@ -0,0 +1,20 @@ +// Package sparse contains abstractions to help work on arrays of values in +// sparse memory locations. +// +// Conversion between array types is supported when converting integers to a +// lower size (e.g. int32 to int16, or uint64 to uint8), or converting from +// signed integers to unsigned. Float types can also be converted to unsigned +// integers of the same size, in which case the conversion is similar to using +// the standard library's math.Float32bits and math.Float64bits functions. +// +// All array types can be converted to a generic Array type that can be used to erase +// type information and bypass type conversion rules. This conversion is similar +// to using Go's unsafe package to bypass Go's type system and should usually be +// avoided and a sign that the application is attempting to break type safety +// boundaries. +// +// The package provides Gather* functions which retrieve values from sparse +// arrays into contiguous memory buffers. On platforms that support it, these +// operations are implemented using SIMD gather instructions (e.g. VPGATHER on +// Intel CPUs). +package sparse diff --git a/vendor/github.com/parquet-go/parquet-go/tags.go b/vendor/github.com/parquet-go/parquet-go/tags.go new file mode 100644 index 00000000000..26f62a8d467 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/tags.go @@ -0,0 +1,54 @@ +package parquet + +import "reflect" + +var noTags = parquetTags{} + +// parquetTags represents the superset of all the parquet struct tags that can be used +// to configure a field. +type parquetTags struct { + parquet string + parquetKey string + parquetValue string + parquetElement string +} + +// fromStructTag parses the parquet struct tags from a reflect.StructTag and returns +// a parquetTags struct. +func fromStructTag(tag reflect.StructTag) parquetTags { + parquetTags := parquetTags{} + if val := tag.Get("parquet"); val != "" { + parquetTags.parquet = val + } + if val := tag.Get("parquet-key"); val != "" { + parquetTags.parquetKey = val + } + if val := tag.Get("parquet-value"); val != "" { + parquetTags.parquetValue = val + } + if val := tag.Get("parquet-element"); val != "" { + parquetTags.parquetElement = val + } + return parquetTags +} + +// getMapKeyNodeTags returns the parquet tags for configuring the keys of a map. +func (p parquetTags) getMapKeyNodeTags() parquetTags { + return parquetTags{ + parquet: p.parquetKey, + } +} + +// getMapValueNodeTags returns the parquet tags for configuring the values of a map. +func (p parquetTags) getMapValueNodeTags() parquetTags { + return parquetTags{ + parquet: p.parquetValue, + } +} + +// getListElementNodeTags returns the parquet tags for configuring the elements of a list. +func (p parquetTags) getListElementNodeTags() parquetTags { + return parquetTags{ + parquet: p.parquetElement, + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/transform.go b/vendor/github.com/parquet-go/parquet-go/transform.go new file mode 100644 index 00000000000..318e27c90a1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/transform.go @@ -0,0 +1,140 @@ +package parquet + +// TransformRowReader constructs a RowReader which applies the given transform +// to each row rad from reader. +// +// The transformation function appends the transformed src row to dst, returning +// dst and any error that occurred during the transformation. If dst is returned +// unchanged, the row is skipped. +func TransformRowReader(reader RowReader, transform func(dst, src Row) (Row, error)) RowReader { + return &transformRowReader{reader: reader, transform: transform} +} + +type transformRowReader struct { + reader RowReader + transform func(Row, Row) (Row, error) + rows []Row + offset int + length int +} + +func (t *transformRowReader) ReadRows(rows []Row) (n int, err error) { + if len(t.rows) == 0 { + t.rows = makeRows(len(rows)) + } + + for { + for n < len(rows) && t.offset < t.length { + dst := rows[n][:0] + src := t.rows[t.offset] + rows[n], err = t.transform(dst, src) + if err != nil { + return n, err + } + clearValues(src) + t.rows[t.offset] = src[:0] + t.offset++ + n++ + } + + if n == len(rows) { + return n, nil + } + + r, err := t.reader.ReadRows(t.rows) + if r == 0 && err != nil { + return n, err + } + t.offset = 0 + t.length = r + } +} + +type transformRowBuffer struct { + buffer []Row + offset int32 + length int32 +} + +func (b *transformRowBuffer) init(n int) { + b.buffer = makeRows(n) + b.offset = 0 + b.length = 0 +} + +func (b *transformRowBuffer) discard() { + row := b.buffer[b.offset] + clearValues(row) + b.buffer[b.offset] = row[:0] + + if b.offset++; b.offset == b.length { + b.reset(0) + } +} + +func (b *transformRowBuffer) reset(n int) { + b.offset = 0 + b.length = int32(n) +} + +func (b *transformRowBuffer) rows() []Row { + return b.buffer[b.offset:b.length] +} + +func (b *transformRowBuffer) cap() int { + return len(b.buffer) +} + +func (b *transformRowBuffer) len() int { + return int(b.length - b.offset) +} + +// TransformRowWriter constructs a RowWriter which applies the given transform +// to each row writter to writer. +// +// The transformation function appends the transformed src row to dst, returning +// dst and any error that occurred during the transformation. If dst is returned +// unchanged, the row is skipped. +func TransformRowWriter(writer RowWriter, transform func(dst, src Row) (Row, error)) RowWriter { + return &transformRowWriter{writer: writer, transform: transform} +} + +type transformRowWriter struct { + writer RowWriter + transform func(Row, Row) (Row, error) + rows []Row +} + +func (t *transformRowWriter) WriteRows(rows []Row) (n int, err error) { + if len(t.rows) == 0 { + t.rows = makeRows(len(rows)) + } + + for n < len(rows) { + numRows := min(len(rows)-n, len(t.rows)) + if err := t.writeRows(rows[n : n+numRows]); err != nil { + return n, err + } + n += numRows + } + + return n, nil +} + +func (t *transformRowWriter) writeRows(rows []Row) (err error) { + numRows := 0 + defer func() { clearRows(t.rows[:numRows]) }() + + for _, row := range rows { + t.rows[numRows], err = t.transform(t.rows[numRows][:0], row) + if err != nil { + return err + } + if len(t.rows[numRows]) != 0 { + numRows++ + } + } + + _, err = t.writer.WriteRows(t.rows[:numRows]) + return err +} diff --git a/vendor/github.com/parquet-go/parquet-go/type.go b/vendor/github.com/parquet-go/parquet-go/type.go new file mode 100644 index 00000000000..54b9186802d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type.go @@ -0,0 +1,284 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Kind is an enumeration type representing the physical types supported by the +// parquet type system. +type Kind int8 + +const ( + Boolean Kind = Kind(format.Boolean) + Int32 Kind = Kind(format.Int32) + Int64 Kind = Kind(format.Int64) + Int96 Kind = Kind(format.Int96) + Float Kind = Kind(format.Float) + Double Kind = Kind(format.Double) + ByteArray Kind = Kind(format.ByteArray) + FixedLenByteArray Kind = Kind(format.FixedLenByteArray) +) + +// String returns a human-readable representation of the physical type. +func (k Kind) String() string { return format.Type(k).String() } + +// Value constructs a value from k and v. +// +// The method panics if the data is not a valid representation of the value +// kind; for example, if the kind is Int32 but the data is not 4 bytes long. +func (k Kind) Value(v []byte) Value { + x, err := parseValue(k, v) + if err != nil { + panic(err) + } + return x +} + +// The Type interface represents logical types of the parquet type system. +// +// Types are immutable and therefore safe to access from multiple goroutines. +type Type interface { + // Returns a human-readable representation of the parquet type. + String() string + + // Returns the Kind value representing the underlying physical type. + // + // The method panics if it is called on a group type. + Kind() Kind + + // For integer and floating point physical types, the method returns the + // size of values in bits. + // + // For fixed-length byte arrays, the method returns the size of elements + // in bytes. + // + // For other types, the value is zero. + Length() int + + // Returns an estimation of the number of bytes required to hold the given + // number of values of this type in memory. + // + // The method returns zero for group types. + EstimateSize(numValues int) int + + // Returns an estimation of the number of values of this type that can be + // held in the given byte size. + // + // The method returns zero for group types. + EstimateNumValues(size int) int + + // Compares two values and returns a negative integer if a < b, positive if + // a > b, or zero if a == b. + // + // The values' Kind must match the type, otherwise the result is undefined. + // + // The method panics if it is called on a group type. + Compare(a, b Value) int + + // ColumnOrder returns the type's column order. For group types, this method + // returns nil. + // + // The order describes the comparison logic implemented by the Less method. + // + // As an optimization, the method may return the same pointer across + // multiple calls. Applications must treat the returned value as immutable, + // mutating the value will result in undefined behavior. + ColumnOrder() *format.ColumnOrder + + // Returns the physical type as a *format.Type value. For group types, this + // method returns nil. + // + // As an optimization, the method may return the same pointer across + // multiple calls. Applications must treat the returned value as immutable, + // mutating the value will result in undefined behavior. + PhysicalType() *format.Type + + // Returns the logical type as a *format.LogicalType value. When the logical + // type is unknown, the method returns nil. + // + // As an optimization, the method may return the same pointer across + // multiple calls. Applications must treat the returned value as immutable, + // mutating the value will result in undefined behavior. + LogicalType() *format.LogicalType + + // Returns the logical type's equivalent converted type. When there are + // no equivalent converted type, the method returns nil. + // + // As an optimization, the method may return the same pointer across + // multiple calls. Applications must treat the returned value as immutable, + // mutating the value will result in undefined behavior. + ConvertedType() *deprecated.ConvertedType + + // Creates a column indexer for values of this type. + // + // The size limit is a hint to the column indexer that it is allowed to + // truncate the page boundaries to the given size. Only BYTE_ARRAY and + // FIXED_LEN_BYTE_ARRAY types currently take this value into account. + // + // A value of zero or less means no limits. + // + // The method panics if it is called on a group type. + NewColumnIndexer(sizeLimit int) ColumnIndexer + + // Creates a row group buffer column for values of this type. + // + // Column buffers are created using the index of the column they are + // accumulating values in memory for (relative to the parent schema), + // and the size of their memory buffer. + // + // The application may give an estimate of the number of values it expects + // to write to the buffer as second argument. This estimate helps set the + // initialize buffer capacity but is not a hard limit, the underlying memory + // buffer will grown as needed to allow more values to be written. Programs + // may use the Size method of the column buffer (or the parent row group, + // when relevant) to determine how many bytes are being used, and perform a + // flush of the buffers to a storage layer. + // + // The method panics if it is called on a group type. + NewColumnBuffer(columnIndex, numValues int) ColumnBuffer + + // Creates a dictionary holding values of this type. + // + // The dictionary retains the data buffer, it does not make a copy of it. + // If the application needs to share ownership of the memory buffer, it must + // ensure that it will not be modified while the page is in use, or it must + // make a copy of it prior to creating the dictionary. + // + // The method panics if the data type does not correspond to the parquet + // type it is called on. + NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary + + // Creates a page belonging to a column at the given index, backed by the + // data buffer. + // + // The page retains the data buffer, it does not make a copy of it. If the + // application needs to share ownership of the memory buffer, it must ensure + // that it will not be modified while the page is in use, or it must make a + // copy of it prior to creating the page. + // + // The method panics if the data type does not correspond to the parquet + // type it is called on. + NewPage(columnIndex, numValues int, data encoding.Values) Page + + // Creates an encoding.Values instance backed by the given buffers. + // + // The offsets is only used by BYTE_ARRAY types, where it represents the + // positions of each variable length value in the values buffer. + // + // The following expression creates an empty instance for any type: + // + // values := typ.NewValues(nil, nil) + // + // The method panics if it is called on group types. + NewValues(values []byte, offsets []uint32) encoding.Values + + // Assuming the src buffer contains PLAIN encoded values of the type it is + // called on, applies the given encoding and produces the output to the dst + // buffer passed as first argument by dispatching the call to one of the + // encoding methods. + Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) + + // Assuming the src buffer contains values encoding in the given encoding, + // decodes the input and produces the encoded values into the dst output + // buffer passed as first argument by dispatching the call to one of the + // encoding methods. + Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) + + // Returns an estimation of the output size after decoding the values passed + // as first argument with the given encoding. + // + // For most types, this is similar to calling EstimateSize with the known + // number of encoded values. For variable size types, using this method may + // provide a more precise result since it can inspect the input buffer. + EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int + + // Assigns a Parquet value to a Go value. Returns an error if assignment is + // not possible. The source Value must be an expected logical type for the + // receiver. This can be accomplished using ConvertValue. + AssignValue(dst reflect.Value, src Value) error + + // Convert a Parquet Value of the given Type into a Parquet Value that is + // compatible with the receiver. The returned Value is suitable to be passed + // to AssignValue. + ConvertValue(val Value, typ Type) (Value, error) +} + +// EqualTypes returns true if type1 and type2 are equal. +// +// Types are considered equal if they have the same Kind, Length, and LogicalType. +// The comparison uses reflect.DeepEqual for LogicalType comparison. +// +// Note: This function is designed for leaf types. For complex group types like +// MAP and LIST, use EqualNodes instead, as those types require structural comparison +// of their nested fields. +func EqualTypes(type1, type2 Type) bool { + return equalKind(type1, type2) && equalLength(type1, type2) && equalLogicalTypes(type1, type2) +} + +func equalKind(type1, type2 Type) bool { + return type1.Kind() == type2.Kind() +} + +func equalLength(type1, type2 Type) bool { + return type1.Length() == type2.Length() +} + +func equalLogicalTypes(type1, type2 Type) bool { + return reflect.DeepEqual(type1.LogicalType(), type2.LogicalType()) +} + +var ( + BooleanType Type = booleanType{} + Int32Type Type = int32Type{} + Int64Type Type = int64Type{} + Int96Type Type = int96Type{} + FloatType Type = floatType{} + DoubleType Type = doubleType{} + ByteArrayType Type = byteArrayType{} +) + +// In the current parquet version supported by this library, only type-defined +// orders are supported. +var typeDefinedColumnOrder = format.ColumnOrder{ + TypeOrder: new(format.TypeDefinedOrder), +} + +var physicalTypes = [...]format.Type{ + 0: format.Boolean, + 1: format.Int32, + 2: format.Int64, + 3: format.Int96, + 4: format.Float, + 5: format.Double, + 6: format.ByteArray, + 7: format.FixedLenByteArray, +} + +var convertedTypes = [...]deprecated.ConvertedType{ + 0: deprecated.UTF8, + 1: deprecated.Map, + 2: deprecated.MapKeyValue, + 3: deprecated.List, + 4: deprecated.Enum, + 5: deprecated.Decimal, + 6: deprecated.Date, + 7: deprecated.TimeMillis, + 8: deprecated.TimeMicros, + 9: deprecated.TimestampMillis, + 10: deprecated.TimestampMicros, + 11: deprecated.Uint8, + 12: deprecated.Uint16, + 13: deprecated.Uint32, + 14: deprecated.Uint64, + 15: deprecated.Int8, + 16: deprecated.Int16, + 17: deprecated.Int32, + 18: deprecated.Int64, + 19: deprecated.Json, + 20: deprecated.Bson, + 21: deprecated.Interval, +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_boolean.go b/vendor/github.com/parquet-go/parquet-go/type_boolean.go new file mode 100644 index 00000000000..bb173ff2629 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_boolean.go @@ -0,0 +1,90 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type booleanType struct{} + +func (t booleanType) String() string { return "BOOLEAN" } +func (t booleanType) Kind() Kind { return Boolean } +func (t booleanType) Length() int { return 1 } +func (t booleanType) EstimateSize(n int) int { return (n + 7) / 8 } +func (t booleanType) EstimateNumValues(n int) int { return 8 * n } +func (t booleanType) Compare(a, b Value) int { return compareBool(a.boolean(), b.boolean()) } +func (t booleanType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t booleanType) LogicalType() *format.LogicalType { return nil } +func (t booleanType) ConvertedType() *deprecated.ConvertedType { return nil } +func (t booleanType) PhysicalType() *format.Type { return &physicalTypes[Boolean] } + +func (t booleanType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newBooleanColumnIndexer() +} + +func (t booleanType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newBooleanColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t booleanType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newBooleanDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t booleanType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newBooleanPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t booleanType) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.BooleanValues(values) +} + +func (t booleanType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeBoolean(dst, src, enc) +} + +func (t booleanType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeBoolean(dst, src, enc) +} + +func (t booleanType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t booleanType) AssignValue(dst reflect.Value, src Value) error { + v := src.boolean() + switch dst.Kind() { + case reflect.Bool: + dst.SetBool(v) + default: + dst.Set(reflect.ValueOf(v)) + } + return nil +} + +func (t booleanType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToBoolean(val) + } + switch typ.Kind() { + case Boolean: + return val, nil + case Int32: + return convertInt32ToBoolean(val) + case Int64: + return convertInt64ToBoolean(val) + case Int96: + return convertInt96ToBoolean(val) + case Float: + return convertFloatToBoolean(val) + case Double: + return convertDoubleToBoolean(val) + case ByteArray, FixedLenByteArray: + return convertByteArrayToBoolean(val) + default: + return makeValueKind(Boolean), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_bson.go b/vendor/github.com/parquet-go/parquet-go/type_bson.go new file mode 100644 index 00000000000..687e5d1f7d0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_bson.go @@ -0,0 +1,87 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// BSON constructs a leaf node of BSON logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#bson +func BSON() Node { return Leaf(&bsonType{}) } + +var bsonLogicalType = format.LogicalType{ + Bson: new(format.BsonType), +} + +type bsonType format.BsonType + +func (t *bsonType) String() string { return (*format.BsonType)(t).String() } + +func (t *bsonType) Kind() Kind { return byteArrayType{}.Kind() } + +func (t *bsonType) Length() int { return byteArrayType{}.Length() } + +func (t *bsonType) EstimateSize(n int) int { return byteArrayType{}.EstimateSize(n) } + +func (t *bsonType) EstimateNumValues(n int) int { return byteArrayType{}.EstimateNumValues(n) } + +func (t *bsonType) Compare(a, b Value) int { return byteArrayType{}.Compare(a, b) } + +func (t *bsonType) ColumnOrder() *format.ColumnOrder { return byteArrayType{}.ColumnOrder() } + +func (t *bsonType) PhysicalType() *format.Type { return byteArrayType{}.PhysicalType() } + +func (t *bsonType) LogicalType() *format.LogicalType { return &bsonLogicalType } + +func (t *bsonType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Bson] +} + +func (t *bsonType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return byteArrayType{}.NewColumnIndexer(sizeLimit) +} + +func (t *bsonType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return byteArrayType{}.NewDictionary(columnIndex, numValues, data) +} + +func (t *bsonType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return byteArrayType{}.NewColumnBuffer(columnIndex, numValues) +} + +func (t *bsonType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return byteArrayType{}.NewPage(columnIndex, numValues, data) +} + +func (t *bsonType) NewValues(values []byte, offsets []uint32) encoding.Values { + return byteArrayType{}.NewValues(values, offsets) +} + +func (t *bsonType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return byteArrayType{}.Encode(dst, src, enc) +} + +func (t *bsonType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return byteArrayType{}.Decode(dst, src, enc) +} + +func (t *bsonType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return byteArrayType{}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *bsonType) AssignValue(dst reflect.Value, src Value) error { + return byteArrayType{}.AssignValue(dst, src) +} + +func (t *bsonType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case byteArrayType, *bsonType: + return val, nil + default: + return val, invalidConversion(val, "BSON", typ.String()) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_byte_array.go b/vendor/github.com/parquet-go/parquet-go/type_byte_array.go new file mode 100644 index 00000000000..fc685d3e141 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_byte_array.go @@ -0,0 +1,102 @@ +package parquet + +import ( + "bytes" + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type byteArrayType struct{} + +func (t byteArrayType) String() string { return "BYTE_ARRAY" } +func (t byteArrayType) Kind() Kind { return ByteArray } +func (t byteArrayType) Length() int { return 0 } +func (t byteArrayType) EstimateSize(n int) int { return estimatedSizeOfByteArrayValues * n } +func (t byteArrayType) EstimateNumValues(n int) int { return n / estimatedSizeOfByteArrayValues } +func (t byteArrayType) Compare(a, b Value) int { return bytes.Compare(a.byteArray(), b.byteArray()) } +func (t byteArrayType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t byteArrayType) LogicalType() *format.LogicalType { return nil } +func (t byteArrayType) ConvertedType() *deprecated.ConvertedType { return nil } +func (t byteArrayType) PhysicalType() *format.Type { return &physicalTypes[ByteArray] } + +func (t byteArrayType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newByteArrayColumnIndexer(sizeLimit) +} + +func (t byteArrayType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newByteArrayColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t byteArrayType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newByteArrayDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t byteArrayType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newByteArrayPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t byteArrayType) NewValues(values []byte, offsets []uint32) encoding.Values { + return encoding.ByteArrayValues(values, offsets) +} + +func (t byteArrayType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeByteArray(dst, src, enc) +} + +func (t byteArrayType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeByteArray(dst, src, enc) +} + +func (t byteArrayType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return enc.EstimateDecodeByteArraySize(src) +} + +func (t byteArrayType) AssignValue(dst reflect.Value, src Value) error { + v := src.byteArray() + switch dst.Kind() { + case reflect.String: + dst.SetString(string(v)) + case reflect.Slice: + dst.SetBytes(copyBytes(v)) + case reflect.Ptr: + // Handle pointer types like *string + if src.IsNull() { + dst.Set(reflect.Zero(dst.Type())) + } else { + // Allocate a new value of the element type + elem := reflect.New(dst.Type().Elem()) + if err := t.AssignValue(elem.Elem(), src); err != nil { + return err + } + dst.Set(elem) + } + default: + val := reflect.ValueOf(string(v)) + dst.Set(val) + } + return nil +} + +func (t byteArrayType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.Kind() { + case Boolean: + return convertBooleanToByteArray(val) + case Int32: + return convertInt32ToByteArray(val) + case Int64: + return convertInt64ToByteArray(val) + case Int96: + return convertInt96ToByteArray(val) + case Float: + return convertFloatToByteArray(val) + case Double: + return convertDoubleToByteArray(val) + case ByteArray, FixedLenByteArray: + return val, nil + default: + return makeValueKind(ByteArray), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_date.go b/vendor/github.com/parquet-go/parquet-go/type_date.go new file mode 100644 index 00000000000..e408be207db --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_date.go @@ -0,0 +1,117 @@ +package parquet + +import ( + "reflect" + "time" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Date constructs a leaf node of DATE logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#date +func Date() Node { return Leaf(&dateType{}) } + +var dateLogicalType = format.LogicalType{ + Date: new(format.DateType), +} + +type dateType format.DateType + +func (t *dateType) String() string { return (*format.DateType)(t).String() } + +func (t *dateType) Kind() Kind { return int32Type{}.Kind() } + +func (t *dateType) Length() int { return int32Type{}.Length() } + +func (t *dateType) EstimateSize(n int) int { return int32Type{}.EstimateSize(n) } + +func (t *dateType) EstimateNumValues(n int) int { return int32Type{}.EstimateNumValues(n) } + +func (t *dateType) Compare(a, b Value) int { return int32Type{}.Compare(a, b) } + +func (t *dateType) ColumnOrder() *format.ColumnOrder { return int32Type{}.ColumnOrder() } + +func (t *dateType) PhysicalType() *format.Type { return int32Type{}.PhysicalType() } + +func (t *dateType) LogicalType() *format.LogicalType { return &dateLogicalType } + +func (t *dateType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Date] +} + +func (t *dateType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return int32Type{}.NewColumnIndexer(sizeLimit) +} + +func (t *dateType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return int32Type{}.NewDictionary(columnIndex, numValues, data) +} + +func (t *dateType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return int32Type{}.NewColumnBuffer(columnIndex, numValues) +} + +func (t *dateType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return int32Type{}.NewPage(columnIndex, numValues, data) +} + +func (t *dateType) NewValues(values []byte, offsets []uint32) encoding.Values { + return int32Type{}.NewValues(values, offsets) +} + +func (t *dateType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return int32Type{}.Encode(dst, src, enc) +} + +func (t *dateType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return int32Type{}.Decode(dst, src, enc) +} + +func (t *dateType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return int32Type{}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *dateType) AssignValue(dst reflect.Value, src Value) error { + switch dst.Type() { + case reflect.TypeOf(time.Time{}): + // Check if the value is NULL - if so, assign zero time.Time + if src.IsNull() { + dst.Set(reflect.ValueOf(time.Time{})) + return nil + } + + // DATE is stored as days since Unix epoch (January 1, 1970) + days := src.int32() + val := time.Unix(int64(days)*86400, 0).UTC() + dst.Set(reflect.ValueOf(val)) + return nil + case reflect.TypeOf((*time.Time)(nil)): + // Handle *time.Time (pointer to time.Time) + if src.IsNull() { + // For NULL values, set the pointer to nil + dst.Set(reflect.Zero(dst.Type())) + return nil + } + + // DATE is stored as days since Unix epoch (January 1, 1970) + days := src.int32() + val := time.Unix(int64(days)*86400, 0).UTC() + ptr := &val + dst.Set(reflect.ValueOf(ptr)) + return nil + } + return int32Type{}.AssignValue(dst, src) +} + +func (t *dateType) ConvertValue(val Value, typ Type) (Value, error) { + switch src := typ.(type) { + case *stringType: + return convertStringToDate(val, time.UTC) + case *timestampType: + return convertTimestampToDate(val, src.Unit, src.tz()) + } + return int32Type{}.ConvertValue(val, typ) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_decimal.go b/vendor/github.com/parquet-go/parquet-go/type_decimal.go new file mode 100644 index 00000000000..5f57557aa2c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_decimal.go @@ -0,0 +1,40 @@ +package parquet + +import ( + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/format" +) + +// Decimal constructs a leaf node of decimal logical type with the given +// scale, precision, and underlying type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#decimal +func Decimal(scale, precision int, typ Type) Node { + switch typ.Kind() { + case Int32, Int64, ByteArray, FixedLenByteArray: + default: + panic("DECIMAL node must annotate Int32, Int64, ByteArray or FixedLenByteArray but got " + typ.String()) + } + return Leaf(&decimalType{ + decimal: format.DecimalType{ + Scale: int32(scale), + Precision: int32(precision), + }, + Type: typ, + }) +} + +type decimalType struct { + decimal format.DecimalType + Type +} + +func (t *decimalType) String() string { return t.decimal.String() } + +func (t *decimalType) LogicalType() *format.LogicalType { + return &format.LogicalType{Decimal: &t.decimal} +} + +func (t *decimalType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Decimal] +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_double.go b/vendor/github.com/parquet-go/parquet-go/type_double.go new file mode 100644 index 00000000000..a94ed8654f8 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_double.go @@ -0,0 +1,90 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type doubleType struct{} + +func (t doubleType) String() string { return "DOUBLE" } +func (t doubleType) Kind() Kind { return Double } +func (t doubleType) Length() int { return 64 } +func (t doubleType) EstimateSize(n int) int { return 8 * n } +func (t doubleType) EstimateNumValues(n int) int { return n / 8 } +func (t doubleType) Compare(a, b Value) int { return compareFloat64(a.double(), b.double()) } +func (t doubleType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t doubleType) LogicalType() *format.LogicalType { return nil } +func (t doubleType) ConvertedType() *deprecated.ConvertedType { return nil } +func (t doubleType) PhysicalType() *format.Type { return &physicalTypes[Double] } + +func (t doubleType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newDoubleColumnIndexer() +} + +func (t doubleType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newDoubleColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t doubleType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newDoubleDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t doubleType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newDoublePage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t doubleType) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.DoubleValuesFromBytes(values) +} + +func (t doubleType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeDouble(dst, src, enc) +} + +func (t doubleType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeDouble(dst, src, enc) +} + +func (t doubleType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t doubleType) AssignValue(dst reflect.Value, src Value) error { + v := src.double() + switch dst.Kind() { + case reflect.Float32, reflect.Float64: + dst.SetFloat(v) + default: + dst.Set(reflect.ValueOf(v)) + } + return nil +} + +func (t doubleType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToDouble(val) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToDouble(val) + case Int32: + return convertInt32ToDouble(val) + case Int64: + return convertInt64ToDouble(val) + case Int96: + return convertInt96ToDouble(val) + case Float: + return convertFloatToDouble(val) + case Double: + return val, nil + case ByteArray, FixedLenByteArray: + return convertByteArrayToDouble(val) + default: + return makeValueKind(Double), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_enum.go b/vendor/github.com/parquet-go/parquet-go/type_enum.go new file mode 100644 index 00000000000..d47c7bbb2fe --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_enum.go @@ -0,0 +1,87 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Enum constructs a leaf node with a logical type representing enumerations. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#enum +func Enum() Node { return Leaf(&enumType{}) } + +var enumLogicalType = format.LogicalType{ + Enum: new(format.EnumType), +} + +type enumType format.EnumType + +func (t *enumType) String() string { return (*format.EnumType)(t).String() } + +func (t *enumType) Kind() Kind { return new(stringType).Kind() } + +func (t *enumType) Length() int { return new(stringType).Length() } + +func (t *enumType) EstimateSize(n int) int { return new(stringType).EstimateSize(n) } + +func (t *enumType) EstimateNumValues(n int) int { return new(stringType).EstimateNumValues(n) } + +func (t *enumType) Compare(a, b Value) int { return new(stringType).Compare(a, b) } + +func (t *enumType) ColumnOrder() *format.ColumnOrder { return new(stringType).ColumnOrder() } + +func (t *enumType) PhysicalType() *format.Type { return new(stringType).PhysicalType() } + +func (t *enumType) LogicalType() *format.LogicalType { return &enumLogicalType } + +func (t *enumType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Enum] +} + +func (t *enumType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return new(stringType).NewColumnIndexer(sizeLimit) +} + +func (t *enumType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return new(stringType).NewDictionary(columnIndex, numValues, data) +} + +func (t *enumType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return new(stringType).NewColumnBuffer(columnIndex, numValues) +} + +func (t *enumType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return new(stringType).NewPage(columnIndex, numValues, data) +} + +func (t *enumType) NewValues(values []byte, offsets []uint32) encoding.Values { + return new(stringType).NewValues(values, offsets) +} + +func (t *enumType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return new(stringType).Encode(dst, src, enc) +} + +func (t *enumType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return new(stringType).Decode(dst, src, enc) +} + +func (t *enumType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return new(stringType).EstimateDecodeSize(numValues, src, enc) +} + +func (t *enumType) AssignValue(dst reflect.Value, src Value) error { + return new(stringType).AssignValue(dst, src) +} + +func (t *enumType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case byteArrayType, *stringType, *enumType: + return val, nil + default: + return val, invalidConversion(val, "ENUM", typ.String()) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_fixed_len_byte_array.go b/vendor/github.com/parquet-go/parquet-go/type_fixed_len_byte_array.go new file mode 100644 index 00000000000..43b58c1dd2c --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_fixed_len_byte_array.go @@ -0,0 +1,234 @@ +package parquet + +import ( + "bytes" + "fmt" + "reflect" + "unsafe" + + "github.com/google/uuid" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type fixedLenByteArrayType struct { + length int + isUUID bool +} + +func (t fixedLenByteArrayType) String() string { + return fmt.Sprintf("FIXED_LEN_BYTE_ARRAY(%d)", t.length) +} + +func (t fixedLenByteArrayType) Kind() Kind { return FixedLenByteArray } + +func (t fixedLenByteArrayType) Length() int { return t.length } + +func (t fixedLenByteArrayType) EstimateSize(n int) int { return t.length * n } + +func (t fixedLenByteArrayType) EstimateNumValues(n int) int { return n / t.length } + +func (t fixedLenByteArrayType) Compare(a, b Value) int { + return bytes.Compare(a.byteArray(), b.byteArray()) +} + +func (t fixedLenByteArrayType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } + +func (t fixedLenByteArrayType) LogicalType() *format.LogicalType { return nil } + +func (t fixedLenByteArrayType) ConvertedType() *deprecated.ConvertedType { return nil } + +func (t fixedLenByteArrayType) PhysicalType() *format.Type { return &physicalTypes[FixedLenByteArray] } + +func (t fixedLenByteArrayType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newFixedLenByteArrayColumnIndexer(t.length, sizeLimit) +} + +func (t fixedLenByteArrayType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newFixedLenByteArrayColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t fixedLenByteArrayType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newFixedLenByteArrayDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t fixedLenByteArrayType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newFixedLenByteArrayPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t fixedLenByteArrayType) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.FixedLenByteArrayValues(values, t.length) +} + +func (t fixedLenByteArrayType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeFixedLenByteArray(dst, src, enc) +} + +func (t fixedLenByteArrayType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeFixedLenByteArray(dst, src, enc) +} + +func (t fixedLenByteArrayType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t fixedLenByteArrayType) AssignValue(dst reflect.Value, src Value) error { + v := src.byteArray() + switch dst.Kind() { + case reflect.Array: + if dst.Type().Elem().Kind() == reflect.Uint8 && dst.Len() == len(v) { + // This code could be implemented as a call to reflect.Copy but + // it would require creating a reflect.Value from v which causes + // the heap allocation to pack the []byte value. To avoid this + // overhead we instead convert the reflect.Value holding the + // destination array into a byte slice which allows us to use + // a more efficient call to copy. + d := unsafe.Slice((*byte)(reflectValueData(dst)), len(v)) + copy(d, v) + return nil + } + case reflect.Slice: + dst.SetBytes(copyBytes(v)) + return nil + case reflect.String: + if t.isUUID { + dst.SetString(uuid.UUID(v).String()) + return nil + } + } + + val := reflect.ValueOf(copyBytes(v)) + dst.Set(val) + return nil +} + +func reflectValueData(v reflect.Value) unsafe.Pointer { + return (*[2]unsafe.Pointer)(unsafe.Pointer(&v))[1] +} + +func reflectValuePointer(v reflect.Value) unsafe.Pointer { + if v.Kind() == reflect.Map { + // Map values are inlined in the reflect.Value data area, + // because they are a reference type and their paointer is + // packed in the interface. However, we need to get an + // address to the pointer itself, so we extract it and + // return the address of this pointer. It causes a heap + // allocation, which is unfortunate, an we would probably + // want to optimize away eventually. + p := v.UnsafePointer() + return unsafe.Pointer(&p) + } + return reflectValueData(v) +} + +func (t fixedLenByteArrayType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToFixedLenByteArray(val, t.length) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToFixedLenByteArray(val, t.length) + case Int32: + return convertInt32ToFixedLenByteArray(val, t.length) + case Int64: + return convertInt64ToFixedLenByteArray(val, t.length) + case Int96: + return convertInt96ToFixedLenByteArray(val, t.length) + case Float: + return convertFloatToFixedLenByteArray(val, t.length) + case Double: + return convertDoubleToFixedLenByteArray(val, t.length) + case ByteArray, FixedLenByteArray: + return convertByteArrayToFixedLenByteArray(val, t.length) + default: + return makeValueBytes(FixedLenByteArray, make([]byte, t.length)), nil + } +} + +// BE128 stands for "big-endian 128 bits". This type is used as a special case +// for fixed-length byte arrays of 16 bytes, which are commonly used to +// represent columns of random unique identifiers such as UUIDs. +// +// Comparisons of BE128 values use the natural byte order, the zeroth byte is +// the most significant byte. +// +// The special case is intended to provide optimizations based on the knowledge +// that the values are 16 bytes long. Stronger type checking can also be applied +// by the compiler when using [16]byte values rather than []byte, reducing the +// risk of errors on these common code paths. +type be128Type struct { + isUUID bool +} + +func (t be128Type) String() string { return "FIXED_LEN_BYTE_ARRAY(16)" } + +func (t be128Type) Kind() Kind { return FixedLenByteArray } + +func (t be128Type) Length() int { return 16 } + +func (t be128Type) EstimateSize(n int) int { return 16 * n } + +func (t be128Type) EstimateNumValues(n int) int { return n / 16 } + +func (t be128Type) Compare(a, b Value) int { return compareBE128(a.be128(), b.be128()) } + +func (t be128Type) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } + +func (t be128Type) LogicalType() *format.LogicalType { return nil } + +func (t be128Type) ConvertedType() *deprecated.ConvertedType { return nil } + +func (t be128Type) PhysicalType() *format.Type { return &physicalTypes[FixedLenByteArray] } + +func (t be128Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newBE128ColumnIndexer() +} + +func (t be128Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newBE128ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t be128Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newBE128Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t be128Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newBE128Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t be128Type) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.FixedLenByteArrayValues(values, 16) +} + +func (t be128Type) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeFixedLenByteArray(dst, src, enc) +} + +func (t be128Type) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeFixedLenByteArray(dst, src, enc) +} + +func (t be128Type) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t be128Type) AssignValue(dst reflect.Value, src Value) error { + return fixedLenByteArrayType{length: 16, isUUID: t.isUUID}.AssignValue(dst, src) +} + +func (t be128Type) ConvertValue(val Value, typ Type) (Value, error) { + return fixedLenByteArrayType{length: 16, isUUID: t.isUUID}.ConvertValue(val, typ) +} + +// FixedLenByteArrayType constructs a type for fixed-length values of the given +// size (in bytes). +func FixedLenByteArrayType(length int) Type { + switch length { + case 16: + return be128Type{} + default: + return fixedLenByteArrayType{length: length} + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_float.go b/vendor/github.com/parquet-go/parquet-go/type_float.go new file mode 100644 index 00000000000..10420562127 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_float.go @@ -0,0 +1,90 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type floatType struct{} + +func (t floatType) String() string { return "FLOAT" } +func (t floatType) Kind() Kind { return Float } +func (t floatType) Length() int { return 32 } +func (t floatType) EstimateSize(n int) int { return 4 * n } +func (t floatType) EstimateNumValues(n int) int { return n / 4 } +func (t floatType) Compare(a, b Value) int { return compareFloat32(a.float(), b.float()) } +func (t floatType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t floatType) LogicalType() *format.LogicalType { return nil } +func (t floatType) ConvertedType() *deprecated.ConvertedType { return nil } +func (t floatType) PhysicalType() *format.Type { return &physicalTypes[Float] } + +func (t floatType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newFloatColumnIndexer() +} + +func (t floatType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newFloatColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t floatType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newFloatDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t floatType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newFloatPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t floatType) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.FloatValuesFromBytes(values) +} + +func (t floatType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeFloat(dst, src, enc) +} + +func (t floatType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeFloat(dst, src, enc) +} + +func (t floatType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t floatType) AssignValue(dst reflect.Value, src Value) error { + v := src.float() + switch dst.Kind() { + case reflect.Float32, reflect.Float64: + dst.SetFloat(float64(v)) + default: + dst.Set(reflect.ValueOf(v)) + } + return nil +} + +func (t floatType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToFloat(val) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToFloat(val) + case Int32: + return convertInt32ToFloat(val) + case Int64: + return convertInt64ToFloat(val) + case Int96: + return convertInt96ToFloat(val) + case Float: + return val, nil + case Double: + return convertDoubleToFloat(val) + case ByteArray, FixedLenByteArray: + return convertByteArrayToFloat(val) + default: + return makeValueKind(Float), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_group.go b/vendor/github.com/parquet-go/parquet-go/type_group.go new file mode 100644 index 00000000000..df9610a9a50 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_group.go @@ -0,0 +1,75 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type groupType struct{} + +func (groupType) String() string { return "group" } + +func (groupType) Kind() Kind { + panic("cannot call Kind on parquet group") +} + +func (groupType) Compare(Value, Value) int { + panic("cannot compare values on parquet group") +} + +func (groupType) NewColumnIndexer(int) ColumnIndexer { + panic("cannot create column indexer from parquet group") +} + +func (groupType) NewDictionary(int, int, encoding.Values) Dictionary { + panic("cannot create dictionary from parquet group") +} + +func (t groupType) NewColumnBuffer(int, int) ColumnBuffer { + panic("cannot create column buffer from parquet group") +} + +func (t groupType) NewPage(int, int, encoding.Values) Page { + panic("cannot create page from parquet group") +} + +func (t groupType) NewValues(_ []byte, _ []uint32) encoding.Values { + panic("cannot create values from parquet group") +} + +func (groupType) Encode(_ []byte, _ encoding.Values, _ encoding.Encoding) ([]byte, error) { + panic("cannot encode parquet group") +} + +func (groupType) Decode(_ encoding.Values, _ []byte, _ encoding.Encoding) (encoding.Values, error) { + panic("cannot decode parquet group") +} + +func (groupType) EstimateDecodeSize(_ int, _ []byte, _ encoding.Encoding) int { + panic("cannot estimate decode size of parquet group") +} + +func (groupType) AssignValue(reflect.Value, Value) error { + panic("cannot assign value to a parquet group") +} + +func (t groupType) ConvertValue(Value, Type) (Value, error) { + panic("cannot convert value to a parquet group") +} + +func (groupType) Length() int { return 0 } + +func (groupType) EstimateSize(int) int { return 0 } + +func (groupType) EstimateNumValues(int) int { return 0 } + +func (groupType) ColumnOrder() *format.ColumnOrder { return nil } + +func (groupType) PhysicalType() *format.Type { return nil } + +func (groupType) LogicalType() *format.LogicalType { return nil } + +func (groupType) ConvertedType() *deprecated.ConvertedType { return nil } diff --git a/vendor/github.com/parquet-go/parquet-go/type_int32.go b/vendor/github.com/parquet-go/parquet-go/type_int32.go new file mode 100644 index 00000000000..f6497c76d0d --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_int32.go @@ -0,0 +1,119 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type int32Type struct{} + +func (t int32Type) String() string { return "INT32" } +func (t int32Type) Kind() Kind { return Int32 } +func (t int32Type) Length() int { return 32 } +func (t int32Type) EstimateSize(n int) int { return 4 * n } +func (t int32Type) EstimateNumValues(n int) int { return n / 4 } +func (t int32Type) Compare(a, b Value) int { return compareInt32(a.int32(), b.int32()) } +func (t int32Type) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t int32Type) LogicalType() *format.LogicalType { + return &format.LogicalType{Integer: &format.IntType{ + BitWidth: 32, + IsSigned: true, + }} +} +func (t int32Type) ConvertedType() *deprecated.ConvertedType { return nil } +func (t int32Type) PhysicalType() *format.Type { return &physicalTypes[Int32] } + +func (t int32Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newInt32ColumnIndexer() +} + +func (t int32Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newInt32ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t int32Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newInt32Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int32Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newInt32Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int32Type) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.Int32ValuesFromBytes(values) +} + +func (t int32Type) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeInt32(dst, src, enc) +} + +func (t int32Type) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeInt32(dst, src, enc) +} + +func (t int32Type) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t int32Type) AssignValue(dst reflect.Value, src Value) error { + v := src.int32() + switch dst.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + dst.SetInt(int64(v)) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + dst.SetUint(uint64(v)) + default: + dst.Set(reflect.ValueOf(v)) + } + return nil +} + +func (t int32Type) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToInt32(val) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToInt32(val) + case Int32: + return val, nil + case Int64: + return convertInt64ToInt32(val) + case Int96: + return convertInt96ToInt32(val) + case Float: + return convertFloatToInt32(val) + case Double: + return convertDoubleToInt32(val) + case ByteArray, FixedLenByteArray: + return convertByteArrayToInt32(val) + default: + return makeValueKind(Int32), nil + } +} + +type uint32Type struct{ int32Type } + +func (t uint32Type) Compare(a, b Value) int { + return compareUint32(a.uint32(), b.uint32()) +} + +func (t uint32Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newUint32ColumnIndexer() +} + +func (t uint32Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newUint32ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t uint32Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newUint32Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t uint32Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newUint32Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_int64.go b/vendor/github.com/parquet-go/parquet-go/type_int64.go new file mode 100644 index 00000000000..9742a7f14b0 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_int64.go @@ -0,0 +1,119 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type int64Type struct{} + +func (t int64Type) String() string { return "INT64" } +func (t int64Type) Kind() Kind { return Int64 } +func (t int64Type) Length() int { return 64 } +func (t int64Type) EstimateSize(n int) int { return 8 * n } +func (t int64Type) EstimateNumValues(n int) int { return n / 8 } +func (t int64Type) Compare(a, b Value) int { return compareInt64(a.int64(), b.int64()) } +func (t int64Type) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t int64Type) LogicalType() *format.LogicalType { + return &format.LogicalType{Integer: &format.IntType{ + BitWidth: 64, + IsSigned: true, + }} +} +func (t int64Type) ConvertedType() *deprecated.ConvertedType { return nil } +func (t int64Type) PhysicalType() *format.Type { return &physicalTypes[Int64] } + +func (t int64Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newInt64ColumnIndexer() +} + +func (t int64Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newInt64ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t int64Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newInt64Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int64Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newInt64Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int64Type) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.Int64ValuesFromBytes(values) +} + +func (t int64Type) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeInt64(dst, src, enc) +} + +func (t int64Type) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeInt64(dst, src, enc) +} + +func (t int64Type) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t int64Type) AssignValue(dst reflect.Value, src Value) error { + v := src.int64() + switch dst.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + dst.SetInt(v) + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + dst.SetUint(uint64(v)) + default: + dst.Set(reflect.ValueOf(v)) + } + return nil +} + +func (t int64Type) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToInt64(val) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToInt64(val) + case Int32: + return convertInt32ToInt64(val) + case Int64: + return val, nil + case Int96: + return convertInt96ToInt64(val) + case Float: + return convertFloatToInt64(val) + case Double: + return convertDoubleToInt64(val) + case ByteArray, FixedLenByteArray: + return convertByteArrayToInt64(val) + default: + return makeValueKind(Int64), nil + } +} + +type uint64Type struct{ int64Type } + +func (t uint64Type) Compare(a, b Value) int { + return compareUint64(a.uint64(), b.uint64()) +} + +func (t uint64Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newUint64ColumnIndexer() +} + +func (t uint64Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newUint64ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t uint64Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newUint64Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t uint64Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newUint64Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_int96.go b/vendor/github.com/parquet-go/parquet-go/type_int96.go new file mode 100644 index 00000000000..9abb6dfd1eb --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_int96.go @@ -0,0 +1,86 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type int96Type struct{} + +func (t int96Type) String() string { return "INT96" } + +func (t int96Type) Kind() Kind { return Int96 } +func (t int96Type) Length() int { return 96 } +func (t int96Type) EstimateSize(n int) int { return 12 * n } +func (t int96Type) EstimateNumValues(n int) int { return n / 12 } +func (t int96Type) Compare(a, b Value) int { return compareInt96(a.int96(), b.int96()) } +func (t int96Type) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } +func (t int96Type) LogicalType() *format.LogicalType { return nil } +func (t int96Type) ConvertedType() *deprecated.ConvertedType { return nil } +func (t int96Type) PhysicalType() *format.Type { return &physicalTypes[Int96] } + +func (t int96Type) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newInt96ColumnIndexer() +} + +func (t int96Type) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newInt96ColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t int96Type) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newInt96Dictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int96Type) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newInt96Page(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t int96Type) NewValues(values []byte, _ []uint32) encoding.Values { + return encoding.Int96ValuesFromBytes(values) +} + +func (t int96Type) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeInt96(dst, src, enc) +} + +func (t int96Type) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeInt96(dst, src, enc) +} + +func (t int96Type) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.EstimateSize(numValues) +} + +func (t int96Type) AssignValue(dst reflect.Value, src Value) error { + v := src.Int96() + dst.Set(reflect.ValueOf(v)) + return nil +} + +func (t int96Type) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case *stringType: + return convertStringToInt96(val) + } + switch typ.Kind() { + case Boolean: + return convertBooleanToInt96(val) + case Int32: + return convertInt32ToInt96(val) + case Int64: + return convertInt64ToInt96(val) + case Int96: + return val, nil + case Float: + return convertFloatToInt96(val) + case Double: + return convertDoubleToInt96(val) + case ByteArray, FixedLenByteArray: + return convertByteArrayToInt96(val) + default: + return makeValueKind(Int96), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_int_logical.go b/vendor/github.com/parquet-go/parquet-go/type_int_logical.go new file mode 100644 index 00000000000..16c258f4ee1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_int_logical.go @@ -0,0 +1,206 @@ +package parquet + +import ( + "fmt" + "math/bits" + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Int constructs a leaf node of signed integer logical type of the given bit +// width. +// +// The bit width must be one of 8, 16, 32, 64, or the function will panic. +func Int(bitWidth int) Node { + return Leaf(integerType(bitWidth, &signedIntTypes)) +} + +// Uint constructs a leaf node of unsigned integer logical type of the given +// bit width. +// +// The bit width must be one of 8, 16, 32, 64, or the function will panic. +func Uint(bitWidth int) Node { + return Leaf(integerType(bitWidth, &unsignedIntTypes)) +} + +func integerType(bitWidth int, types *[4]intType) *intType { + switch bitWidth { + case 8: + return &types[0] + case 16: + return &types[1] + case 32: + return &types[2] + case 64: + return &types[3] + default: + panic(fmt.Sprintf("cannot create a %d bits parquet integer node", bitWidth)) + } +} + +var signedIntTypes = [...]intType{ + {BitWidth: 8, IsSigned: true}, + {BitWidth: 16, IsSigned: true}, + {BitWidth: 32, IsSigned: true}, + {BitWidth: 64, IsSigned: true}, +} + +var unsignedIntTypes = [...]intType{ + {BitWidth: 8, IsSigned: false}, + {BitWidth: 16, IsSigned: false}, + {BitWidth: 32, IsSigned: false}, + {BitWidth: 64, IsSigned: false}, +} + +var signedLogicalIntTypes = [...]format.LogicalType{ + {Integer: (*format.IntType)(&signedIntTypes[0])}, + {Integer: (*format.IntType)(&signedIntTypes[1])}, + {Integer: (*format.IntType)(&signedIntTypes[2])}, + {Integer: (*format.IntType)(&signedIntTypes[3])}, +} + +var unsignedLogicalIntTypes = [...]format.LogicalType{ + {Integer: (*format.IntType)(&unsignedIntTypes[0])}, + {Integer: (*format.IntType)(&unsignedIntTypes[1])}, + {Integer: (*format.IntType)(&unsignedIntTypes[2])}, + {Integer: (*format.IntType)(&unsignedIntTypes[3])}, +} + +type intType format.IntType + +func (t *intType) baseType() Type { + if t.IsSigned { + if t.BitWidth == 64 { + return int64Type{} + } else { + return int32Type{} + } + } else { + if t.BitWidth == 64 { + return uint64Type{} + } else { + return uint32Type{} + } + } +} + +func (t *intType) String() string { return (*format.IntType)(t).String() } + +func (t *intType) Kind() Kind { return t.baseType().Kind() } + +func (t *intType) Length() int { return int(t.BitWidth) } + +func (t *intType) EstimateSize(n int) int { return (int(t.BitWidth) / 8) * n } + +func (t *intType) EstimateNumValues(n int) int { return n / (int(t.BitWidth) / 8) } + +func (t *intType) Compare(a, b Value) int { + // This code is similar to t.baseType().Compare(a,b) but comparison methods + // tend to be invoked a lot (e.g. when sorting) so avoiding the interface + // indirection in this case yields much better throughput in some cases. + if t.BitWidth == 64 { + i1 := a.int64() + i2 := b.int64() + if t.IsSigned { + return compareInt64(i1, i2) + } else { + return compareUint64(uint64(i1), uint64(i2)) + } + } else { + i1 := a.int32() + i2 := b.int32() + if t.IsSigned { + return compareInt32(i1, i2) + } else { + return compareUint32(uint32(i1), uint32(i2)) + } + } +} + +func (t *intType) ColumnOrder() *format.ColumnOrder { return t.baseType().ColumnOrder() } + +func (t *intType) PhysicalType() *format.Type { return t.baseType().PhysicalType() } + +func (t *intType) LogicalType() *format.LogicalType { + switch t { + case &signedIntTypes[0]: + return &signedLogicalIntTypes[0] + case &signedIntTypes[1]: + return &signedLogicalIntTypes[1] + case &signedIntTypes[2]: + return &signedLogicalIntTypes[2] + case &signedIntTypes[3]: + return &signedLogicalIntTypes[3] + case &unsignedIntTypes[0]: + return &unsignedLogicalIntTypes[0] + case &unsignedIntTypes[1]: + return &unsignedLogicalIntTypes[1] + case &unsignedIntTypes[2]: + return &unsignedLogicalIntTypes[2] + case &unsignedIntTypes[3]: + return &unsignedLogicalIntTypes[3] + default: + return &format.LogicalType{Integer: (*format.IntType)(t)} + } +} + +func (t *intType) ConvertedType() *deprecated.ConvertedType { + convertedType := bits.Len8(uint8(t.BitWidth)/8) - 1 // 8=>0, 16=>1, 32=>2, 64=>4 + if t.IsSigned { + convertedType += int(deprecated.Int8) + } else { + convertedType += int(deprecated.Uint8) + } + return &convertedTypes[convertedType] +} + +func (t *intType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return t.baseType().NewColumnIndexer(sizeLimit) +} + +func (t *intType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return t.baseType().NewColumnBuffer(columnIndex, numValues) +} + +func (t *intType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return t.baseType().NewDictionary(columnIndex, numValues, data) +} + +func (t *intType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return t.baseType().NewPage(columnIndex, numValues, data) +} + +func (t *intType) NewValues(values []byte, offsets []uint32) encoding.Values { + return t.baseType().NewValues(values, offsets) +} + +func (t *intType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return t.baseType().Encode(dst, src, enc) +} + +func (t *intType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return t.baseType().Decode(dst, src, enc) +} + +func (t *intType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.baseType().EstimateDecodeSize(numValues, src, enc) +} + +func (t *intType) AssignValue(dst reflect.Value, src Value) error { + if t.BitWidth == 64 { + return int64Type{}.AssignValue(dst, src) + } else { + return int32Type{}.AssignValue(dst, src) + } +} + +func (t *intType) ConvertValue(val Value, typ Type) (Value, error) { + if t.BitWidth == 64 { + return int64Type{}.ConvertValue(val, typ) + } else { + return int32Type{}.ConvertValue(val, typ) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_json.go b/vendor/github.com/parquet-go/parquet-go/type_json.go new file mode 100644 index 00000000000..abf452f7fb9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_json.go @@ -0,0 +1,106 @@ +package parquet + +import ( + "encoding/json" + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// JSON constructs a leaf node of JSON logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#json +func JSON() Node { return Leaf(&jsonType{}) } + +var jsonLogicalType = format.LogicalType{ + Json: new(format.JsonType), +} + +type jsonType format.JsonType + +func (t *jsonType) String() string { return (*format.JsonType)(t).String() } + +func (t *jsonType) Kind() Kind { return byteArrayType{}.Kind() } + +func (t *jsonType) Length() int { return byteArrayType{}.Length() } + +func (t *jsonType) EstimateSize(n int) int { return byteArrayType{}.EstimateSize(n) } + +func (t *jsonType) EstimateNumValues(n int) int { return byteArrayType{}.EstimateNumValues(n) } + +func (t *jsonType) Compare(a, b Value) int { return byteArrayType{}.Compare(a, b) } + +func (t *jsonType) ColumnOrder() *format.ColumnOrder { return byteArrayType{}.ColumnOrder() } + +func (t *jsonType) PhysicalType() *format.Type { return byteArrayType{}.PhysicalType() } + +func (t *jsonType) LogicalType() *format.LogicalType { return &jsonLogicalType } + +func (t *jsonType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Json] +} + +func (t *jsonType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return byteArrayType{}.NewColumnIndexer(sizeLimit) +} + +func (t *jsonType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return byteArrayType{}.NewDictionary(columnIndex, numValues, data) +} + +func (t *jsonType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return byteArrayType{}.NewColumnBuffer(columnIndex, numValues) +} + +func (t *jsonType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return byteArrayType{}.NewPage(columnIndex, numValues, data) +} + +func (t *jsonType) NewValues(values []byte, offsets []uint32) encoding.Values { + return byteArrayType{}.NewValues(values, offsets) +} + +func (t *jsonType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return byteArrayType{}.Encode(dst, src, enc) +} + +func (t *jsonType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return byteArrayType{}.Decode(dst, src, enc) +} + +func (t *jsonType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return byteArrayType{}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *jsonType) AssignValue(dst reflect.Value, src Value) error { + // Assign value using ByteArrayType for BC... + switch dst.Kind() { + case reflect.String: + return byteArrayType{}.AssignValue(dst, src) + case reflect.Slice: + if dst.Type().Elem().Kind() == reflect.Uint8 { + return byteArrayType{}.AssignValue(dst, src) + } + } + + // Otherwise handle with json.Unmarshal + b := src.byteArray() + val := reflect.New(dst.Type()).Elem() + err := json.Unmarshal(b, val.Addr().Interface()) + if err != nil { + return err + } + dst.Set(val) + return nil +} + +func (t *jsonType) ConvertValue(val Value, typ Type) (Value, error) { + switch typ.(type) { + case byteArrayType, *stringType, *jsonType: + return val, nil + default: + return val, invalidConversion(val, "JSON", typ.String()) + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_list.go b/vendor/github.com/parquet-go/parquet-go/type_list.go new file mode 100644 index 00000000000..20a1bbf8663 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_list.go @@ -0,0 +1,86 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// List constructs a node of LIST logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists +func List(of Node) Node { + return listNode{Group{"list": Repeated(Group{"element": of})}} +} + +type listNode struct{ Group } + +func (listNode) Type() Type { return &listType{} } + +type listType format.ListType + +func (t *listType) String() string { return (*format.ListType)(t).String() } + +func (t *listType) Kind() Kind { panic("cannot call Kind on parquet LIST type") } + +func (t *listType) Length() int { return 0 } + +func (t *listType) EstimateSize(int) int { return 0 } + +func (t *listType) EstimateNumValues(int) int { return 0 } + +func (t *listType) Compare(Value, Value) int { panic("cannot compare values on parquet LIST type") } + +func (t *listType) ColumnOrder() *format.ColumnOrder { return nil } + +func (t *listType) PhysicalType() *format.Type { return nil } + +func (t *listType) LogicalType() *format.LogicalType { + return &format.LogicalType{List: (*format.ListType)(t)} +} + +func (t *listType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.List] +} + +func (t *listType) NewColumnIndexer(int) ColumnIndexer { + panic("create create column indexer from parquet LIST type") +} + +func (t *listType) NewDictionary(int, int, encoding.Values) Dictionary { + panic("cannot create dictionary from parquet LIST type") +} + +func (t *listType) NewColumnBuffer(int, int) ColumnBuffer { + panic("cannot create column buffer from parquet LIST type") +} + +func (t *listType) NewPage(int, int, encoding.Values) Page { + panic("cannot create page from parquet LIST type") +} + +func (t *listType) NewValues(values []byte, _ []uint32) encoding.Values { + panic("cannot create values from parquet LIST type") +} + +func (t *listType) Encode(_ []byte, _ encoding.Values, _ encoding.Encoding) ([]byte, error) { + panic("cannot encode parquet LIST type") +} + +func (t *listType) Decode(_ encoding.Values, _ []byte, _ encoding.Encoding) (encoding.Values, error) { + panic("cannot decode parquet LIST type") +} + +func (t *listType) EstimateDecodeSize(_ int, _ []byte, _ encoding.Encoding) int { + panic("cannot estimate decode size of parquet LIST type") +} + +func (t *listType) AssignValue(reflect.Value, Value) error { + panic("cannot assign value to a parquet LIST type") +} + +func (t *listType) ConvertValue(Value, Type) (Value, error) { + panic("cannot convert value to a parquet LIST type") +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_map.go b/vendor/github.com/parquet-go/parquet-go/type_map.go new file mode 100644 index 00000000000..d5f6a95cc55 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_map.go @@ -0,0 +1,91 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Map constructs a node of MAP logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#maps +func Map(key, value Node) Node { + return mapNode{Group{ + "key_value": Repeated(Group{ + "key": Required(key), + "value": value, + }), + }} +} + +type mapNode struct{ Group } + +func (mapNode) Type() Type { return &mapType{} } + +type mapType format.MapType + +func (t *mapType) String() string { return (*format.MapType)(t).String() } + +func (t *mapType) Kind() Kind { panic("cannot call Kind on parquet MAP type") } + +func (t *mapType) Length() int { return 0 } + +func (t *mapType) EstimateSize(int) int { return 0 } + +func (t *mapType) EstimateNumValues(int) int { return 0 } + +func (t *mapType) Compare(Value, Value) int { panic("cannot compare values on parquet MAP type") } + +func (t *mapType) ColumnOrder() *format.ColumnOrder { return nil } + +func (t *mapType) PhysicalType() *format.Type { return nil } + +func (t *mapType) LogicalType() *format.LogicalType { + return &format.LogicalType{Map: (*format.MapType)(t)} +} + +func (t *mapType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.Map] +} + +func (t *mapType) NewColumnIndexer(int) ColumnIndexer { + panic("create create column indexer from parquet MAP type") +} + +func (t *mapType) NewDictionary(int, int, encoding.Values) Dictionary { + panic("cannot create dictionary from parquet MAP type") +} + +func (t *mapType) NewColumnBuffer(int, int) ColumnBuffer { + panic("cannot create column buffer from parquet MAP type") +} + +func (t *mapType) NewPage(int, int, encoding.Values) Page { + panic("cannot create page from parquet MAP type") +} + +func (t *mapType) NewValues(values []byte, _ []uint32) encoding.Values { + panic("cannot create values from parquet MAP type") +} + +func (t *mapType) Encode(_ []byte, _ encoding.Values, _ encoding.Encoding) ([]byte, error) { + panic("cannot encode parquet MAP type") +} + +func (t *mapType) Decode(_ encoding.Values, _ []byte, _ encoding.Encoding) (encoding.Values, error) { + panic("cannot decode parquet MAP type") +} + +func (t *mapType) EstimateDecodeSize(_ int, _ []byte, _ encoding.Encoding) int { + panic("cannot estimate decode size of parquet MAP type") +} + +func (t *mapType) AssignValue(reflect.Value, Value) error { + panic("cannot assign value to a parquet MAP type") +} + +func (t *mapType) ConvertValue(Value, Type) (Value, error) { + panic("cannot convert value to a parquet MAP type") +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_null.go b/vendor/github.com/parquet-go/parquet-go/type_null.go new file mode 100644 index 00000000000..ca1947d9cea --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_null.go @@ -0,0 +1,73 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +type nullType format.NullType + +func (t *nullType) String() string { return (*format.NullType)(t).String() } + +func (t *nullType) Kind() Kind { return -1 } + +func (t *nullType) Length() int { return 0 } + +func (t *nullType) EstimateSize(int) int { return 0 } + +func (t *nullType) EstimateNumValues(int) int { return 0 } + +func (t *nullType) Compare(Value, Value) int { panic("cannot compare values on parquet NULL type") } + +func (t *nullType) ColumnOrder() *format.ColumnOrder { return nil } + +func (t *nullType) PhysicalType() *format.Type { return nil } + +func (t *nullType) LogicalType() *format.LogicalType { + return &format.LogicalType{Unknown: (*format.NullType)(t)} +} + +func (t *nullType) ConvertedType() *deprecated.ConvertedType { return nil } + +func (t *nullType) NewColumnIndexer(int) ColumnIndexer { + panic("create create column indexer from parquet NULL type") +} + +func (t *nullType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newNullDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t *nullType) NewColumnBuffer(int, int) ColumnBuffer { + panic("cannot create column buffer from parquet NULL type") +} + +func (t *nullType) NewPage(columnIndex, numValues int, _ encoding.Values) Page { + return newNullPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t *nullType) NewValues(_ []byte, _ []uint32) encoding.Values { + return encoding.Values{} +} + +func (t *nullType) Encode(dst []byte, _ encoding.Values, _ encoding.Encoding) ([]byte, error) { + return dst[:0], nil +} + +func (t *nullType) Decode(dst encoding.Values, _ []byte, _ encoding.Encoding) (encoding.Values, error) { + return dst, nil +} + +func (t *nullType) EstimateDecodeSize(_ int, _ []byte, _ encoding.Encoding) int { + return 0 +} + +func (t *nullType) AssignValue(reflect.Value, Value) error { + return nil +} + +func (t *nullType) ConvertValue(val Value, _ Type) (Value, error) { + return val, nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_string.go b/vendor/github.com/parquet-go/parquet-go/type_string.go new file mode 100644 index 00000000000..cdd9afb7873 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_string.go @@ -0,0 +1,121 @@ +package parquet + +import ( + "bytes" + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// String constructs a leaf node of UTF8 logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#string +func String() Node { return Leaf(&stringType{}) } + +var stringLogicalType = format.LogicalType{ + UTF8: new(format.StringType), +} + +type stringType format.StringType + +func (t *stringType) String() string { return (*format.StringType)(t).String() } + +func (t *stringType) Kind() Kind { return ByteArray } + +func (t *stringType) Length() int { return 0 } + +func (t *stringType) EstimateSize(n int) int { return byteArrayType{}.EstimateSize(n) } + +func (t *stringType) EstimateNumValues(n int) int { return byteArrayType{}.EstimateNumValues(n) } + +func (t *stringType) Compare(a, b Value) int { + return bytes.Compare(a.byteArray(), b.byteArray()) +} + +func (t *stringType) ColumnOrder() *format.ColumnOrder { + return &typeDefinedColumnOrder +} + +func (t *stringType) PhysicalType() *format.Type { + return &physicalTypes[ByteArray] +} + +func (t *stringType) LogicalType() *format.LogicalType { + return &stringLogicalType +} + +func (t *stringType) ConvertedType() *deprecated.ConvertedType { + return &convertedTypes[deprecated.UTF8] +} + +func (t *stringType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return newByteArrayColumnIndexer(sizeLimit) +} + +func (t *stringType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return newByteArrayDictionary(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t *stringType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return newByteArrayColumnBuffer(t, makeColumnIndex(columnIndex), makeNumValues(numValues)) +} + +func (t *stringType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return newByteArrayPage(t, makeColumnIndex(columnIndex), makeNumValues(numValues), data) +} + +func (t *stringType) NewValues(values []byte, offsets []uint32) encoding.Values { + return encoding.ByteArrayValues(values, offsets) +} + +func (t *stringType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return encoding.EncodeByteArray(dst, src, enc) +} + +func (t *stringType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return encoding.DecodeByteArray(dst, src, enc) +} + +func (t *stringType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return byteArrayType{}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *stringType) AssignValue(dst reflect.Value, src Value) error { + return byteArrayType{}.AssignValue(dst, src) +} + +func (t *stringType) ConvertValue(val Value, typ Type) (Value, error) { + switch t2 := typ.(type) { + case *dateType: + return convertDateToString(val) + case *timeType: + tz := t2.tz() + if t2.Unit.Micros != nil { + return convertTimeMicrosToString(val, tz) + } else { + return convertTimeMillisToString(val, tz) + } + } + switch typ.Kind() { + case Boolean: + return convertBooleanToString(val) + case Int32: + return convertInt32ToString(val) + case Int64: + return convertInt64ToString(val) + case Int96: + return convertInt96ToString(val) + case Float: + return convertFloatToString(val) + case Double: + return convertDoubleToString(val) + case ByteArray: + return val, nil + case FixedLenByteArray: + return convertFixedLenByteArrayToString(val) + default: + return makeValueKind(ByteArray), nil + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_time.go b/vendor/github.com/parquet-go/parquet-go/type_time.go new file mode 100644 index 00000000000..327ba8bf0ef --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_time.go @@ -0,0 +1,279 @@ +package parquet + +import ( + "reflect" + "time" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// TimeUnit represents units of time in the parquet type system. +type TimeUnit interface { + // Returns the precision of the time unit as a time.Duration value. + Duration() time.Duration + // Converts the TimeUnit value to its representation in the parquet thrift + // format. + TimeUnit() format.TimeUnit +} + +var ( + Millisecond TimeUnit = &millisecond{} + Microsecond TimeUnit = µsecond{} + Nanosecond TimeUnit = &nanosecond{} +) + +type millisecond format.MilliSeconds + +func (u *millisecond) Duration() time.Duration { return time.Millisecond } +func (u *millisecond) TimeUnit() format.TimeUnit { + return format.TimeUnit{Millis: (*format.MilliSeconds)(u)} +} + +type microsecond format.MicroSeconds + +func (u *microsecond) Duration() time.Duration { return time.Microsecond } +func (u *microsecond) TimeUnit() format.TimeUnit { + return format.TimeUnit{Micros: (*format.MicroSeconds)(u)} +} + +type nanosecond format.NanoSeconds + +func (u *nanosecond) Duration() time.Duration { return time.Nanosecond } +func (u *nanosecond) TimeUnit() format.TimeUnit { + return format.TimeUnit{Nanos: (*format.NanoSeconds)(u)} +} + +// Time constructs a leaf node of TIME logical type. +// IsAdjustedToUTC is true by default. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#time +func Time(unit TimeUnit) Node { + return TimeAdjusted(unit, true) +} + +// TimeAdjusted constructs a leaf node of TIME logical type +// with the IsAdjustedToUTC property explicitly set. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#time +func TimeAdjusted(unit TimeUnit, isAdjustedToUTC bool) Node { + // Use pre-allocated instances for common cases + timeUnit := unit.TimeUnit() + if isAdjustedToUTC { + switch { + case timeUnit.Millis != nil: + return Leaf(&timeMilliAdjustedToUTC) + case timeUnit.Micros != nil: + return Leaf(&timeMicroAdjustedToUTC) + case timeUnit.Nanos != nil: + return Leaf(&timeNanoAdjustedToUTC) + } + } else { + switch { + case timeUnit.Millis != nil: + return Leaf(&timeMilliNotAdjustedToUTC) + case timeUnit.Micros != nil: + return Leaf(&timeMicroNotAdjustedToUTC) + case timeUnit.Nanos != nil: + return Leaf(&timeNanoNotAdjustedToUTC) + } + } + // Fallback for unknown unit types + return Leaf(&timeType{IsAdjustedToUTC: isAdjustedToUTC, Unit: timeUnit}) +} + +var timeMilliAdjustedToUTC = timeType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Millis: new(format.MilliSeconds)}, +} + +var timeMicroAdjustedToUTC = timeType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Micros: new(format.MicroSeconds)}, +} + +var timeNanoAdjustedToUTC = timeType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Nanos: new(format.NanoSeconds)}, +} + +var timeMilliNotAdjustedToUTC = timeType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Millis: new(format.MilliSeconds)}, +} + +var timeMicroNotAdjustedToUTC = timeType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Micros: new(format.MicroSeconds)}, +} + +var timeNanoNotAdjustedToUTC = timeType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Nanos: new(format.NanoSeconds)}, +} + +var timeMilliAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeMilliAdjustedToUTC), +} + +var timeMicroAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeMicroAdjustedToUTC), +} + +var timeNanoAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeNanoAdjustedToUTC), +} + +var timeMilliNotAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeMilliNotAdjustedToUTC), +} + +var timeMicroNotAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeMicroNotAdjustedToUTC), +} + +var timeNanoNotAdjustedToUTCLogicalType = format.LogicalType{ + Time: (*format.TimeType)(&timeNanoNotAdjustedToUTC), +} + +type timeType format.TimeType + +func (t *timeType) tz() *time.Location { + if t.IsAdjustedToUTC { + return time.UTC + } else { + return time.Local + } +} + +func (t *timeType) baseType() Type { + if t.useInt32() { + return int32Type{} + } else { + return int64Type{} + } +} + +func (t *timeType) useInt32() bool { return t.Unit.Millis != nil } + +func (t *timeType) useInt64() bool { return t.Unit.Micros != nil } + +func (t *timeType) String() string { return (*format.TimeType)(t).String() } + +func (t *timeType) Kind() Kind { return t.baseType().Kind() } + +func (t *timeType) Length() int { return t.baseType().Length() } + +func (t *timeType) EstimateSize(n int) int { return t.baseType().EstimateSize(n) } + +func (t *timeType) EstimateNumValues(n int) int { return t.baseType().EstimateNumValues(n) } + +func (t *timeType) Compare(a, b Value) int { return t.baseType().Compare(a, b) } + +func (t *timeType) ColumnOrder() *format.ColumnOrder { return t.baseType().ColumnOrder() } + +func (t *timeType) PhysicalType() *format.Type { return t.baseType().PhysicalType() } + +func (t *timeType) LogicalType() *format.LogicalType { + switch t { + case &timeMilliAdjustedToUTC: + return &timeMilliAdjustedToUTCLogicalType + case &timeMicroAdjustedToUTC: + return &timeMicroAdjustedToUTCLogicalType + case &timeNanoAdjustedToUTC: + return &timeNanoAdjustedToUTCLogicalType + case &timeMilliNotAdjustedToUTC: + return &timeMilliNotAdjustedToUTCLogicalType + case &timeMicroNotAdjustedToUTC: + return &timeMicroNotAdjustedToUTCLogicalType + case &timeNanoNotAdjustedToUTC: + return &timeNanoNotAdjustedToUTCLogicalType + default: + return &format.LogicalType{Time: (*format.TimeType)(t)} + } +} + +func (t *timeType) ConvertedType() *deprecated.ConvertedType { + switch { + case t.useInt32(): + return &convertedTypes[deprecated.TimeMillis] + case t.useInt64(): + return &convertedTypes[deprecated.TimeMicros] + default: + return nil + } +} + +func (t *timeType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return t.baseType().NewColumnIndexer(sizeLimit) +} + +func (t *timeType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return t.baseType().NewColumnBuffer(columnIndex, numValues) +} + +func (t *timeType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return t.baseType().NewDictionary(columnIndex, numValues, data) +} + +func (t *timeType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return t.baseType().NewPage(columnIndex, numValues, data) +} + +func (t *timeType) NewValues(values []byte, offset []uint32) encoding.Values { + return t.baseType().NewValues(values, offset) +} + +func (t *timeType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return t.baseType().Encode(dst, src, enc) +} + +func (t *timeType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return t.baseType().Decode(dst, src, enc) +} + +func (t *timeType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return t.baseType().EstimateDecodeSize(numValues, src, enc) +} + +func (t *timeType) AssignValue(dst reflect.Value, src Value) error { + // Handle time.Duration specially to convert from the stored time unit to nanoseconds + if dst.Type() == reflect.TypeFor[time.Duration]() { + v := src.int64() + var nanos int64 + switch { + case t.Unit.Millis != nil: + nanos = v * int64(time.Millisecond) + case t.Unit.Micros != nil: + nanos = v * int64(time.Microsecond) + case t.Unit.Nanos != nil: + nanos = v + default: + nanos = v + } + dst.SetInt(nanos) + return nil + } + return t.baseType().AssignValue(dst, src) +} + +func (t *timeType) ConvertValue(val Value, typ Type) (Value, error) { + switch src := typ.(type) { + case *stringType: + tz := t.tz() + if t.Unit.Micros != nil { + return convertStringToTimeMicros(val, tz) + } else { + return convertStringToTimeMillis(val, tz) + } + case *timestampType: + tz := t.tz() + if t.Unit.Micros != nil { + return convertTimestampToTimeMicros(val, src.Unit, src.tz(), tz) + } else { + return convertTimestampToTimeMillis(val, src.Unit, src.tz(), tz) + } + } + return t.baseType().ConvertValue(val, typ) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_timestamp.go b/vendor/github.com/parquet-go/parquet-go/type_timestamp.go new file mode 100644 index 00000000000..08d58633a1f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_timestamp.go @@ -0,0 +1,257 @@ +package parquet + +import ( + "reflect" + "time" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Timestamp constructs of leaf node of TIMESTAMP logical type. +// IsAdjustedToUTC is true by default. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#timestamp +func Timestamp(unit TimeUnit) Node { + return TimestampAdjusted(unit, true) +} + +// TimestampAdjusted constructs a leaf node of TIMESTAMP logical type +// with the IsAdjustedToUTC property explicitly set. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#time +func TimestampAdjusted(unit TimeUnit, isAdjustedToUTC bool) Node { + // Use pre-allocated instances for common cases + timeUnit := unit.TimeUnit() + if isAdjustedToUTC { + switch { + case timeUnit.Millis != nil: + return Leaf(×tampMilliAdjustedToUTC) + case timeUnit.Micros != nil: + return Leaf(×tampMicroAdjustedToUTC) + case timeUnit.Nanos != nil: + return Leaf(×tampNanoAdjustedToUTC) + } + } else { + switch { + case timeUnit.Millis != nil: + return Leaf(×tampMilliNotAdjustedToUTC) + case timeUnit.Micros != nil: + return Leaf(×tampMicroNotAdjustedToUTC) + case timeUnit.Nanos != nil: + return Leaf(×tampNanoNotAdjustedToUTC) + } + } + // Fallback for unknown unit types + return Leaf(×tampType{IsAdjustedToUTC: isAdjustedToUTC, Unit: timeUnit}) +} + +var timestampMilliAdjustedToUTC = timestampType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Millis: new(format.MilliSeconds)}, +} + +var timestampMicroAdjustedToUTC = timestampType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Micros: new(format.MicroSeconds)}, +} + +var timestampNanoAdjustedToUTC = timestampType{ + IsAdjustedToUTC: true, + Unit: format.TimeUnit{Nanos: new(format.NanoSeconds)}, +} + +var timestampMilliNotAdjustedToUTC = timestampType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Millis: new(format.MilliSeconds)}, +} + +var timestampMicroNotAdjustedToUTC = timestampType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Micros: new(format.MicroSeconds)}, +} + +var timestampNanoNotAdjustedToUTC = timestampType{ + IsAdjustedToUTC: false, + Unit: format.TimeUnit{Nanos: new(format.NanoSeconds)}, +} + +var timestampMilliAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampMilliAdjustedToUTC), +} + +var timestampMicroAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampMicroAdjustedToUTC), +} + +var timestampNanoAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampNanoAdjustedToUTC), +} + +var timestampMilliNotAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampMilliNotAdjustedToUTC), +} + +var timestampMicroNotAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampMicroNotAdjustedToUTC), +} + +var timestampNanoNotAdjustedToUTCLogicalType = format.LogicalType{ + Timestamp: (*format.TimestampType)(×tampNanoNotAdjustedToUTC), +} + +type timestampType format.TimestampType + +func (t *timestampType) tz() *time.Location { + if t.IsAdjustedToUTC { + return time.UTC + } else { + return time.Local + } +} + +func (t *timestampType) String() string { return (*format.TimestampType)(t).String() } + +func (t *timestampType) Kind() Kind { return int64Type{}.Kind() } + +func (t *timestampType) Length() int { return int64Type{}.Length() } + +func (t *timestampType) EstimateSize(n int) int { return int64Type{}.EstimateSize(n) } + +func (t *timestampType) EstimateNumValues(n int) int { return int64Type{}.EstimateNumValues(n) } + +func (t *timestampType) Compare(a, b Value) int { return int64Type{}.Compare(a, b) } + +func (t *timestampType) ColumnOrder() *format.ColumnOrder { return int64Type{}.ColumnOrder() } + +func (t *timestampType) PhysicalType() *format.Type { return int64Type{}.PhysicalType() } + +func (t *timestampType) LogicalType() *format.LogicalType { + switch t { + case ×tampMilliAdjustedToUTC: + return ×tampMilliAdjustedToUTCLogicalType + case ×tampMicroAdjustedToUTC: + return ×tampMicroAdjustedToUTCLogicalType + case ×tampNanoAdjustedToUTC: + return ×tampNanoAdjustedToUTCLogicalType + case ×tampMilliNotAdjustedToUTC: + return ×tampMilliNotAdjustedToUTCLogicalType + case ×tampMicroNotAdjustedToUTC: + return ×tampMicroNotAdjustedToUTCLogicalType + case ×tampNanoNotAdjustedToUTC: + return ×tampNanoNotAdjustedToUTCLogicalType + default: + return &format.LogicalType{Timestamp: (*format.TimestampType)(t)} + } +} + +func (t *timestampType) ConvertedType() *deprecated.ConvertedType { + switch { + case t.Unit.Millis != nil: + return &convertedTypes[deprecated.TimestampMillis] + case t.Unit.Micros != nil: + return &convertedTypes[deprecated.TimestampMicros] + default: + return nil + } +} + +func (t *timestampType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return int64Type{}.NewColumnIndexer(sizeLimit) +} + +func (t *timestampType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return int64Type{}.NewDictionary(columnIndex, numValues, data) +} + +func (t *timestampType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return int64Type{}.NewColumnBuffer(columnIndex, numValues) +} + +func (t *timestampType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return int64Type{}.NewPage(columnIndex, numValues, data) +} + +func (t *timestampType) NewValues(values []byte, offsets []uint32) encoding.Values { + return int64Type{}.NewValues(values, offsets) +} + +func (t *timestampType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return int64Type{}.Encode(dst, src, enc) +} + +func (t *timestampType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return int64Type{}.Decode(dst, src, enc) +} + +func (t *timestampType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return int64Type{}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *timestampType) AssignValue(dst reflect.Value, src Value) error { + switch dst.Type() { + case reflect.TypeOf(time.Time{}): + // Check if the value is NULL - if so, assign zero time.Time + if src.IsNull() { + dst.Set(reflect.ValueOf(time.Time{})) + return nil + } + + unit := Nanosecond.TimeUnit() + lt := t.LogicalType() + if lt != nil && lt.Timestamp != nil { + unit = lt.Timestamp.Unit + } + + nanos := src.int64() + switch { + case unit.Millis != nil: + nanos = nanos * 1e6 + case unit.Micros != nil: + nanos = nanos * 1e3 + } + + val := time.Unix(0, nanos).UTC() + dst.Set(reflect.ValueOf(val)) + return nil + case reflect.TypeOf((*time.Time)(nil)): + // Handle *time.Time (pointer to time.Time) + if src.IsNull() { + // For NULL values, set the pointer to nil + dst.Set(reflect.Zero(dst.Type())) + return nil + } + + unit := Nanosecond.TimeUnit() + lt := t.LogicalType() + if lt != nil && lt.Timestamp != nil { + unit = lt.Timestamp.Unit + } + + nanos := src.int64() + switch { + case unit.Millis != nil: + nanos = nanos * 1e6 + case unit.Micros != nil: + nanos = nanos * 1e3 + } + + val := time.Unix(0, nanos).UTC() + ptr := &val + dst.Set(reflect.ValueOf(ptr)) + return nil + default: + return int64Type{}.AssignValue(dst, src) + } +} + +func (t *timestampType) ConvertValue(val Value, typ Type) (Value, error) { + switch src := typ.(type) { + case *timestampType: + return convertTimestampToTimestamp(val, src.Unit, t.Unit) + case *dateType: + return convertDateToTimestamp(val, t.Unit, t.tz()) + } + return int64Type{}.ConvertValue(val, typ) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_uuid.go b/vendor/github.com/parquet-go/parquet-go/type_uuid.go new file mode 100644 index 00000000000..1cf2a4f94fc --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_uuid.go @@ -0,0 +1,80 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// UUID constructs a leaf node of UUID logical type. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#uuid +func UUID() Node { return Leaf(&uuidType{}) } + +var uuidLogicaType = format.LogicalType{ + UUID: new(format.UUIDType), +} + +type uuidType format.UUIDType + +func (t *uuidType) String() string { return (*format.UUIDType)(t).String() } + +func (t *uuidType) Kind() Kind { return be128Type{}.Kind() } + +func (t *uuidType) Length() int { return be128Type{}.Length() } + +func (t *uuidType) EstimateSize(n int) int { return be128Type{}.EstimateSize(n) } + +func (t *uuidType) EstimateNumValues(n int) int { return be128Type{}.EstimateNumValues(n) } + +func (t *uuidType) Compare(a, b Value) int { return be128Type{}.Compare(a, b) } + +func (t *uuidType) ColumnOrder() *format.ColumnOrder { return &typeDefinedColumnOrder } + +func (t *uuidType) PhysicalType() *format.Type { return &physicalTypes[FixedLenByteArray] } + +func (t *uuidType) LogicalType() *format.LogicalType { return &uuidLogicaType } + +func (t *uuidType) ConvertedType() *deprecated.ConvertedType { return nil } + +func (t *uuidType) NewColumnIndexer(sizeLimit int) ColumnIndexer { + return be128Type{isUUID: true}.NewColumnIndexer(sizeLimit) +} + +func (t *uuidType) NewDictionary(columnIndex, numValues int, data encoding.Values) Dictionary { + return be128Type{isUUID: true}.NewDictionary(columnIndex, numValues, data) +} + +func (t *uuidType) NewColumnBuffer(columnIndex, numValues int) ColumnBuffer { + return be128Type{isUUID: true}.NewColumnBuffer(columnIndex, numValues) +} + +func (t *uuidType) NewPage(columnIndex, numValues int, data encoding.Values) Page { + return be128Type{isUUID: true}.NewPage(columnIndex, numValues, data) +} + +func (t *uuidType) NewValues(values []byte, offsets []uint32) encoding.Values { + return be128Type{isUUID: true}.NewValues(values, offsets) +} + +func (t *uuidType) Encode(dst []byte, src encoding.Values, enc encoding.Encoding) ([]byte, error) { + return be128Type{isUUID: true}.Encode(dst, src, enc) +} + +func (t *uuidType) Decode(dst encoding.Values, src []byte, enc encoding.Encoding) (encoding.Values, error) { + return be128Type{isUUID: true}.Decode(dst, src, enc) +} + +func (t *uuidType) EstimateDecodeSize(numValues int, src []byte, enc encoding.Encoding) int { + return be128Type{isUUID: true}.EstimateDecodeSize(numValues, src, enc) +} + +func (t *uuidType) AssignValue(dst reflect.Value, src Value) error { + return be128Type{isUUID: true}.AssignValue(dst, src) +} + +func (t *uuidType) ConvertValue(val Value, typ Type) (Value, error) { + return be128Type{isUUID: true}.ConvertValue(val, typ) +} diff --git a/vendor/github.com/parquet-go/parquet-go/type_variant.go b/vendor/github.com/parquet-go/parquet-go/type_variant.go new file mode 100644 index 00000000000..fd6cc06c972 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/type_variant.go @@ -0,0 +1,97 @@ +package parquet + +import ( + "reflect" + + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/format" +) + +// Variant constructs a node of unshredded VARIANT logical type. It is a group with +// two required fields, "metadata" and "value", both byte arrays. +// +// Experimental: The specification for variants is still being developed and the type +// is not fully adopted. Support for this type is subject to change. +// +// Initial support does not attempt to process the variant data. So reading and writing +// data of this type behaves as if it were just a group with two byte array fields, as +// if the logical type annotation were absent. This may change in the future. +// +// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#variant +func Variant() Node { + return variantNode{Group{"metadata": Required(Leaf(ByteArrayType)), "value": Required(Leaf(ByteArrayType))}} +} + +// TODO: add ShreddedVariant(Node) function, to create a shredded variant +// where the argument defines the type/structure of the shredded value(s). + +type variantNode struct{ Group } + +func (variantNode) Type() Type { return &variantType{} } + +type variantType format.VariantType + +func (t *variantType) String() string { return (*format.VariantType)(t).String() } + +func (t *variantType) Kind() Kind { panic("cannot call Kind on parquet VARIANT type") } + +func (t *variantType) Length() int { return 0 } + +func (t *variantType) EstimateSize(int) int { return 0 } + +func (t *variantType) EstimateNumValues(int) int { return 0 } + +func (t *variantType) Compare(Value, Value) int { + panic("cannot compare values on parquet VARIANT type") +} + +func (t *variantType) ColumnOrder() *format.ColumnOrder { return nil } + +func (t *variantType) PhysicalType() *format.Type { return nil } + +func (t *variantType) LogicalType() *format.LogicalType { + return &format.LogicalType{Variant: (*format.VariantType)(t)} +} + +func (t *variantType) ConvertedType() *deprecated.ConvertedType { return nil } + +func (t *variantType) NewColumnIndexer(int) ColumnIndexer { + panic("create create column indexer from parquet VARIANT type") +} + +func (t *variantType) NewDictionary(int, int, encoding.Values) Dictionary { + panic("cannot create dictionary from parquet VARIANT type") +} + +func (t *variantType) NewColumnBuffer(int, int) ColumnBuffer { + panic("cannot create column buffer from parquet VARIANT type") +} + +func (t *variantType) NewPage(int, int, encoding.Values) Page { + panic("cannot create page from parquet VARIANT type") +} + +func (t *variantType) NewValues(values []byte, _ []uint32) encoding.Values { + panic("cannot create values from parquet VARIANT type") +} + +func (t *variantType) Encode(_ []byte, _ encoding.Values, _ encoding.Encoding) ([]byte, error) { + panic("cannot encode parquet VARIANT type") +} + +func (t *variantType) Decode(_ encoding.Values, _ []byte, _ encoding.Encoding) (encoding.Values, error) { + panic("cannot decode parquet VARIANT type") +} + +func (t *variantType) EstimateDecodeSize(_ int, _ []byte, _ encoding.Encoding) int { + panic("cannot estimate decode size of parquet VARIANT type") +} + +func (t *variantType) AssignValue(reflect.Value, Value) error { + panic("cannot assign value to a parquet VARIANT type") +} + +func (t *variantType) ConvertValue(Value, Type) (Value, error) { + panic("cannot convert value to a parquet VARIANT type") +} diff --git a/vendor/github.com/parquet-go/parquet-go/value.go b/vendor/github.com/parquet-go/parquet-go/value.go new file mode 100644 index 00000000000..ef04c0a1be3 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/value.go @@ -0,0 +1,1080 @@ +package parquet + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/google/uuid" + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/format" +) + +const ( + // 170 x sizeof(Value) = 4KB + defaultValueBufferSize = 170 + + offsetOfPtr = unsafe.Offsetof(Value{}.ptr) + offsetOfU64 = unsafe.Offsetof(Value{}.u64) + offsetOfU32 = offsetOfU64 + firstByteOffsetOf32BitsValue + offsetOfBool = offsetOfU64 + firstByteOffsetOfBooleanValue +) + +// The Value type is similar to the reflect.Value abstraction of Go values, but +// for parquet values. Value instances wrap underlying Go values mapped to one +// of the parquet physical types. +// +// Value instances are small, immutable objects, and usually passed by value +// between function calls. +// +// The zero-value of Value represents the null parquet value. +type Value struct { + // data + ptr *byte + u64 uint64 + // type + kind int8 // XOR(Kind) so the zero-value is + // levels + definitionLevel byte + repetitionLevel byte + columnIndex int16 // XOR so the zero-value is -1 +} + +// ValueReader is an interface implemented by types that support reading +// batches of values. +type ValueReader interface { + // Read values into the buffer passed as argument and return the number of + // values read. When all values have been read, the error will be io.EOF. + ReadValues([]Value) (int, error) +} + +// ValueReaderAt is an interface implemented by types that support reading +// values at offsets specified by the application. +type ValueReaderAt interface { + ReadValuesAt([]Value, int64) (int, error) +} + +// ValueReaderFrom is an interface implemented by value writers to read values +// from a reader. +type ValueReaderFrom interface { + ReadValuesFrom(ValueReader) (int64, error) +} + +// ValueWriter is an interface implemented by types that support reading +// batches of values. +type ValueWriter interface { + // Write values from the buffer passed as argument and returns the number + // of values written. + WriteValues([]Value) (int, error) +} + +// ValueWriterTo is an interface implemented by value readers to write values to +// a writer. +type ValueWriterTo interface { + WriteValuesTo(ValueWriter) (int64, error) +} + +// ValueReaderFunc is a function type implementing the ValueReader interface. +type ValueReaderFunc func([]Value) (int, error) + +func (f ValueReaderFunc) ReadValues(values []Value) (int, error) { return f(values) } + +// ValueWriterFunc is a function type implementing the ValueWriter interface. +type ValueWriterFunc func([]Value) (int, error) + +func (f ValueWriterFunc) WriteValues(values []Value) (int, error) { return f(values) } + +// CopyValues copies values from src to dst, returning the number of values +// that were written. +// +// As an optimization, the reader and writer may choose to implement +// ValueReaderFrom and ValueWriterTo to provide their own copy logic. +// +// The function returns any error it encounters reading or writing pages, except +// for io.EOF from the reader which indicates that there were no more values to +// read. +func CopyValues(dst ValueWriter, src ValueReader) (int64, error) { + return copyValues(dst, src, nil) +} + +func copyValues(dst ValueWriter, src ValueReader, buf []Value) (written int64, err error) { + if wt, ok := src.(ValueWriterTo); ok { + return wt.WriteValuesTo(dst) + } + + if rf, ok := dst.(ValueReaderFrom); ok { + return rf.ReadValuesFrom(src) + } + + if len(buf) == 0 { + buf = make([]Value, defaultValueBufferSize) + } + + defer clearValues(buf) + + for { + n, err := src.ReadValues(buf) + + if n > 0 { + wn, werr := dst.WriteValues(buf[:n]) + written += int64(wn) + if werr != nil { + return written, werr + } + } + + if err != nil { + if err == io.EOF { + err = nil + } + return written, err + } + + if n == 0 { + return written, io.ErrNoProgress + } + } +} + +// ValueOf constructs a parquet value from a Go value v. +// +// The physical type of the value is assumed from the Go type of v using the +// following conversion table: +// +// Go type | Parquet physical type +// ------- | --------------------- +// nil | NULL +// bool | BOOLEAN +// int8 | INT32 +// int16 | INT32 +// int32 | INT32 +// int64 | INT64 +// int | INT64 +// uint8 | INT32 +// uint16 | INT32 +// uint32 | INT32 +// uint64 | INT64 +// uintptr | INT64 +// float32 | FLOAT +// float64 | DOUBLE +// string | BYTE_ARRAY +// []byte | BYTE_ARRAY +// [*]byte | FIXED_LEN_BYTE_ARRAY +// +// When converting a []byte or [*]byte value, the underlying byte array is not +// copied; instead, the returned parquet value holds a reference to it. +// +// The repetition and definition levels of the returned value are both zero. +// +// The function panics if the Go value cannot be represented in parquet. +func ValueOf(v any) Value { + k := Kind(-1) + t := reflect.TypeOf(v) + + switch value := v.(type) { + case nil: + return Value{} + case uuid.UUID: + return makeValueBytes(FixedLenByteArray, value[:]) + case deprecated.Int96: + return makeValueInt96(value) + case time.Time: + k = Int64 + } + + switch t.Kind() { + case reflect.Bool: + k = Boolean + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + k = Int32 + case reflect.Int64, reflect.Int, reflect.Uint64, reflect.Uint, reflect.Uintptr: + k = Int64 + case reflect.Float32: + k = Float + case reflect.Float64: + k = Double + case reflect.String: + k = ByteArray + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + k = ByteArray + } + case reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + k = FixedLenByteArray + } + } + + if k < 0 { + panic("cannot create parquet value from go value of type " + t.String()) + } + + return makeValue(k, nil, reflect.ValueOf(v)) +} + +// NulLValue constructs a null value, which is the zero-value of the Value type. +func NullValue() Value { return Value{} } + +// ZeroValue constructs a zero value of the given kind. +func ZeroValue(kind Kind) Value { return makeValueKind(kind) } + +// BooleanValue constructs a BOOLEAN parquet value from the bool passed as +// argument. +func BooleanValue(value bool) Value { return makeValueBoolean(value) } + +// Int32Value constructs a INT32 parquet value from the int32 passed as +// argument. +func Int32Value(value int32) Value { return makeValueInt32(value) } + +// Int64Value constructs a INT64 parquet value from the int64 passed as +// argument. +func Int64Value(value int64) Value { return makeValueInt64(value) } + +// Int96Value constructs a INT96 parquet value from the deprecated.Int96 passed +// as argument. +func Int96Value(value deprecated.Int96) Value { return makeValueInt96(value) } + +// FloatValue constructs a FLOAT parquet value from the float32 passed as +// argument. +func FloatValue(value float32) Value { return makeValueFloat(value) } + +// DoubleValue constructs a DOUBLE parquet value from the float64 passed as +// argument. +func DoubleValue(value float64) Value { return makeValueDouble(value) } + +// ByteArrayValue constructs a BYTE_ARRAY parquet value from the byte slice +// passed as argument. +func ByteArrayValue(value []byte) Value { return makeValueBytes(ByteArray, value) } + +// FixedLenByteArrayValue constructs a BYTE_ARRAY parquet value from the byte +// slice passed as argument. +func FixedLenByteArrayValue(value []byte) Value { return makeValueBytes(FixedLenByteArray, value) } + +func makeValue(k Kind, lt *format.LogicalType, v reflect.Value) Value { + if v.Kind() == reflect.Interface { + if v.IsNil() { + return Value{} + } + if v = v.Elem(); v.Kind() == reflect.Pointer && v.IsNil() { + return Value{} + } + } + + switch v.Type() { + case reflect.TypeOf(time.Time{}): + unit := Nanosecond.TimeUnit() + if lt != nil && lt.Timestamp != nil { + unit = lt.Timestamp.Unit + } + + t := v.Interface().(time.Time) + var val int64 + switch { + case unit.Millis != nil: + val = t.UnixMilli() + case unit.Micros != nil: + val = t.UnixMicro() + default: + val = t.UnixNano() + } + return makeValueInt64(val) + } + + switch k { + case Boolean: + return makeValueBoolean(v.Bool()) + + case Int32: + switch v.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return makeValueInt32(int32(v.Int())) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return makeValueInt32(int32(v.Uint())) + } + + case Int64: + switch v.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return makeValueInt64(v.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + return makeValueUint64(v.Uint()) + } + + case Int96: + switch v.Type() { + case reflect.TypeOf(deprecated.Int96{}): + return makeValueInt96(v.Interface().(deprecated.Int96)) + } + + case Float: + switch v.Kind() { + case reflect.Float32: + return makeValueFloat(float32(v.Float())) + } + + case Double: + switch v.Kind() { + case reflect.Float32, reflect.Float64: + return makeValueDouble(v.Float()) + } + + case ByteArray: + switch v.Kind() { + case reflect.String: + return makeValueString(k, v.String()) + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + return makeValueBytes(k, v.Bytes()) + } + } + + case FixedLenByteArray: + switch v.Kind() { + case reflect.String: + if lt.UUID != nil { // uuid + uuidStr := v.String() + encoded, err := uuid.MustParse(uuidStr).MarshalBinary() + if err != nil { + panic(fmt.Errorf("error marshalling uuid: %w", err)) + } + return makeValueByteArray(k, unsafe.SliceData(encoded), len(encoded)) + } + return makeValueString(k, v.String()) + case reflect.Array: + if v.Type().Elem().Kind() == reflect.Uint8 { + return makeValueFixedLenByteArray(v) + } + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + return makeValueBytes(k, v.Bytes()) + } + } + } + + panic("cannot create parquet value of type " + k.String() + " from go value of type " + v.Type().String()) +} + +func makeValueKind(kind Kind) Value { + return Value{kind: ^int8(kind)} +} + +func makeValueBoolean(value bool) Value { + v := Value{kind: ^int8(Boolean)} + if value { + v.u64 = 1 + } + return v +} + +func makeValueInt32(value int32) Value { + return Value{ + kind: ^int8(Int32), + u64: uint64(value), + } +} + +func makeValueInt64(value int64) Value { + return Value{ + kind: ^int8(Int64), + u64: uint64(value), + } +} + +func makeValueInt96(value deprecated.Int96) Value { + // TODO: this is highly inefficient because we need a heap allocation to + // store the value; we don't expect INT96 to be used frequently since it + // is a deprecated feature of parquet, and it helps keep the Value type + // compact for all the other more common cases. + bits := [12]byte{} + binary.LittleEndian.PutUint32(bits[0:4], value[0]) + binary.LittleEndian.PutUint32(bits[4:8], value[1]) + binary.LittleEndian.PutUint32(bits[8:12], value[2]) + return Value{ + kind: ^int8(Int96), + ptr: &bits[0], + u64: 12, // set the length so we can use the ByteArray method + } +} + +func makeValueUint32(value uint32) Value { + return Value{ + kind: ^int8(Int32), + u64: uint64(value), + } +} + +func makeValueUint64(value uint64) Value { + return Value{ + kind: ^int8(Int64), + u64: value, + } +} + +func makeValueFloat(value float32) Value { + return Value{ + kind: ^int8(Float), + u64: uint64(math.Float32bits(value)), + } +} + +func makeValueDouble(value float64) Value { + return Value{ + kind: ^int8(Double), + u64: math.Float64bits(value), + } +} + +func makeValueBytes(kind Kind, value []byte) Value { + return makeValueByteArray(kind, unsafe.SliceData(value), len(value)) +} + +func makeValueString(kind Kind, value string) Value { + return makeValueByteArray(kind, unsafe.StringData(value), len(value)) +} + +func makeValueFixedLenByteArray(v reflect.Value) Value { + t := v.Type() + // When the array is addressable, we take advantage of this + // condition to avoid the heap allocation otherwise needed + // to pack the reference into an interface{} value. + if v.CanAddr() { + v = v.Addr() + } else { + u := reflect.New(t) + u.Elem().Set(v) + v = u + } + return makeValueByteArray(FixedLenByteArray, (*byte)(v.UnsafePointer()), t.Len()) +} + +func makeValueByteArray(kind Kind, data *byte, size int) Value { + return Value{ + kind: ^int8(kind), + ptr: data, + u64: uint64(size), + } +} + +// These methods are internal versions of methods exported by the Value type, +// they are usually inlined by the compiler and intended to be used inside the +// parquet-go package because they tend to generate better code than their +// exported counter part, which requires making a copy of the receiver. +func (v *Value) isNull() bool { return v.kind == 0 } +func (v *Value) byte() byte { return byte(v.u64) } +func (v *Value) boolean() bool { return v.u64 != 0 } +func (v *Value) int32() int32 { return int32(v.u64) } +func (v *Value) int64() int64 { return int64(v.u64) } +func (v *Value) int96() deprecated.Int96 { return makeInt96(v.byteArray()) } +func (v *Value) float() float32 { return math.Float32frombits(uint32(v.u64)) } +func (v *Value) double() float64 { return math.Float64frombits(uint64(v.u64)) } +func (v *Value) uint32() uint32 { return uint32(v.u64) } +func (v *Value) uint64() uint64 { return v.u64 } +func (v *Value) byteArray() []byte { return unsafe.Slice(v.ptr, v.u64) } +func (v *Value) string() string { return unsafe.String(v.ptr, v.u64) } +func (v *Value) be128() *[16]byte { return (*[16]byte)(unsafe.Pointer(v.ptr)) } +func (v *Value) column() int { return int(^v.columnIndex) } + +func (v Value) convertToBoolean(x bool) Value { + v.kind = ^int8(Boolean) + v.ptr = nil + v.u64 = 0 + if x { + v.u64 = 1 + } + return v +} + +func (v Value) convertToInt32(x int32) Value { + v.kind = ^int8(Int32) + v.ptr = nil + v.u64 = uint64(x) + return v +} + +func (v Value) convertToInt64(x int64) Value { + v.kind = ^int8(Int64) + v.ptr = nil + v.u64 = uint64(x) + return v +} + +func (v Value) convertToInt96(x deprecated.Int96) Value { + i96 := makeValueInt96(x) + v.kind = i96.kind + v.ptr = i96.ptr + v.u64 = i96.u64 + return v +} + +func (v Value) convertToFloat(x float32) Value { + v.kind = ^int8(Float) + v.ptr = nil + v.u64 = uint64(math.Float32bits(x)) + return v +} + +func (v Value) convertToDouble(x float64) Value { + v.kind = ^int8(Double) + v.ptr = nil + v.u64 = math.Float64bits(x) + return v +} + +func (v Value) convertToByteArray(x []byte) Value { + v.kind = ^int8(ByteArray) + v.ptr = unsafe.SliceData(x) + v.u64 = uint64(len(x)) + return v +} + +func (v Value) convertToFixedLenByteArray(x []byte) Value { + v.kind = ^int8(FixedLenByteArray) + v.ptr = unsafe.SliceData(x) + v.u64 = uint64(len(x)) + return v +} + +// Kind returns the kind of v, which represents its parquet physical type. +func (v Value) Kind() Kind { return ^Kind(v.kind) } + +// IsNull returns true if v is the null value. +func (v Value) IsNull() bool { return v.isNull() } + +// Byte returns v as a byte, which may truncate the underlying byte. +func (v Value) Byte() byte { return v.byte() } + +// Boolean returns v as a bool, assuming the underlying type is BOOLEAN. +func (v Value) Boolean() bool { return v.boolean() } + +// Int32 returns v as a int32, assuming the underlying type is INT32. +func (v Value) Int32() int32 { return v.int32() } + +// Int64 returns v as a int64, assuming the underlying type is INT64. +func (v Value) Int64() int64 { return v.int64() } + +// Int96 returns v as a int96, assuming the underlying type is INT96. +func (v Value) Int96() deprecated.Int96 { + var val deprecated.Int96 + if !v.isNull() { + val = v.int96() + } + return val +} + +// Float returns v as a float32, assuming the underlying type is FLOAT. +func (v Value) Float() float32 { return v.float() } + +// Double returns v as a float64, assuming the underlying type is DOUBLE. +func (v Value) Double() float64 { return v.double() } + +// Uint32 returns v as a uint32, assuming the underlying type is INT32. +func (v Value) Uint32() uint32 { return v.uint32() } + +// Uint64 returns v as a uint64, assuming the underlying type is INT64. +func (v Value) Uint64() uint64 { return v.uint64() } + +// ByteArray returns v as a []byte, assuming the underlying type is either +// BYTE_ARRAY or FIXED_LEN_BYTE_ARRAY. +// +// The application must treat the returned byte slice as a read-only value, +// mutating the content will result in undefined behaviors. +func (v Value) ByteArray() []byte { return v.byteArray() } + +// RepetitionLevel returns the repetition level of v. +func (v Value) RepetitionLevel() int { return int(v.repetitionLevel) } + +// DefinitionLevel returns the definition level of v. +func (v Value) DefinitionLevel() int { return int(v.definitionLevel) } + +// Column returns the column index within the row that v was created from. +// +// Returns -1 if the value does not carry a column index. +func (v Value) Column() int { return v.column() } + +// Bytes returns the binary representation of v. +// +// If v is the null value, an nil byte slice is returned. +func (v Value) Bytes() []byte { + switch v.Kind() { + case Boolean: + buf := [8]byte{} + binary.LittleEndian.PutUint32(buf[:4], v.uint32()) + return buf[0:1] + case Int32, Float: + buf := [8]byte{} + binary.LittleEndian.PutUint32(buf[:4], v.uint32()) + return buf[:4] + case Int64, Double: + buf := [8]byte{} + binary.LittleEndian.PutUint64(buf[:8], v.uint64()) + return buf[:8] + case ByteArray, FixedLenByteArray, Int96: + return v.byteArray() + default: + return nil + } +} + +// AppendBytes appends the binary representation of v to b. +// +// If v is the null value, b is returned unchanged. +func (v Value) AppendBytes(b []byte) []byte { + buf := [8]byte{} + switch v.Kind() { + case Boolean: + binary.LittleEndian.PutUint32(buf[:4], v.uint32()) + return append(b, buf[0]) + case Int32, Float: + binary.LittleEndian.PutUint32(buf[:4], v.uint32()) + return append(b, buf[:4]...) + case Int64, Double: + binary.LittleEndian.PutUint64(buf[:8], v.uint64()) + return append(b, buf[:8]...) + case ByteArray, FixedLenByteArray, Int96: + return append(b, v.byteArray()...) + default: + return b + } +} + +// Format outputs a human-readable representation of v to w, using r as the +// formatting verb to describe how the value should be printed. +// +// The following formatting options are supported: +// +// %c prints the column index +// %+c prints the column index, prefixed with "C:" +// %d prints the definition level +// %+d prints the definition level, prefixed with "D:" +// %r prints the repetition level +// %+r prints the repetition level, prefixed with "R:" +// %q prints the quoted representation of v +// %+q prints the quoted representation of v, prefixed with "V:" +// %s prints the string representation of v +// %+s prints the string representation of v, prefixed with "V:" +// %v same as %s +// %+v prints a verbose representation of v +// %#v prints a Go value representation of v +// +// Format satisfies the fmt.Formatter interface. +func (v Value) Format(w fmt.State, r rune) { + switch r { + case 'c': + if w.Flag('+') { + io.WriteString(w, "C:") + } + fmt.Fprint(w, v.column()) + + case 'd': + if w.Flag('+') { + io.WriteString(w, "D:") + } + fmt.Fprint(w, v.definitionLevel) + + case 'r': + if w.Flag('+') { + io.WriteString(w, "R:") + } + fmt.Fprint(w, v.repetitionLevel) + + case 'q': + if w.Flag('+') { + io.WriteString(w, "V:") + } + switch v.Kind() { + case ByteArray, FixedLenByteArray: + fmt.Fprintf(w, "%q", v.byteArray()) + default: + fmt.Fprintf(w, `"%s"`, v) + } + + case 's': + if w.Flag('+') { + io.WriteString(w, "V:") + } + switch v.Kind() { + case Boolean: + fmt.Fprint(w, v.boolean()) + case Int32: + fmt.Fprint(w, v.int32()) + case Int64: + fmt.Fprint(w, v.int64()) + case Int96: + fmt.Fprint(w, v.int96()) + case Float: + fmt.Fprint(w, v.float()) + case Double: + fmt.Fprint(w, v.double()) + case ByteArray, FixedLenByteArray: + w.Write(v.byteArray()) + default: + io.WriteString(w, "") + } + + case 'v': + switch { + case w.Flag('+'): + fmt.Fprintf(w, "%+[1]c %+[1]d %+[1]r %+[1]s", v) + case w.Flag('#'): + v.formatGoString(w) + default: + v.Format(w, 's') + } + } +} + +func (v Value) formatGoString(w fmt.State) { + io.WriteString(w, "parquet.") + switch v.Kind() { + case Boolean: + fmt.Fprintf(w, "BooleanValue(%t)", v.boolean()) + case Int32: + fmt.Fprintf(w, "Int32Value(%d)", v.int32()) + case Int64: + fmt.Fprintf(w, "Int64Value(%d)", v.int64()) + case Int96: + fmt.Fprintf(w, "Int96Value(%#v)", v.int96()) + case Float: + fmt.Fprintf(w, "FloatValue(%g)", v.float()) + case Double: + fmt.Fprintf(w, "DoubleValue(%g)", v.double()) + case ByteArray: + fmt.Fprintf(w, "ByteArrayValue(%q)", v.byteArray()) + case FixedLenByteArray: + fmt.Fprintf(w, "FixedLenByteArrayValue(%#v)", v.byteArray()) + default: + io.WriteString(w, "Value{}") + return + } + fmt.Fprintf(w, ".Level(%d,%d,%d)", + v.RepetitionLevel(), + v.DefinitionLevel(), + v.Column(), + ) +} + +// String returns a string representation of v. +func (v Value) String() string { + switch v.Kind() { + case Boolean: + return strconv.FormatBool(v.boolean()) + case Int32: + return strconv.FormatInt(int64(v.int32()), 10) + case Int64: + return strconv.FormatInt(v.int64(), 10) + case Int96: + return v.Int96().String() + case Float: + return strconv.FormatFloat(float64(v.float()), 'g', -1, 32) + case Double: + return strconv.FormatFloat(v.double(), 'g', -1, 32) + case ByteArray, FixedLenByteArray: + return string(v.byteArray()) + default: + return "" + } +} + +// GoString returns a Go value string representation of v. +func (v Value) GoString() string { return fmt.Sprintf("%#v", v) } + +// Level returns v with the repetition level, definition level, and column index +// set to the values passed as arguments. +// +// The method panics if either argument is negative. +func (v Value) Level(repetitionLevel, definitionLevel, columnIndex int) Value { + v.repetitionLevel = makeRepetitionLevel(repetitionLevel) + v.definitionLevel = makeDefinitionLevel(definitionLevel) + v.columnIndex = ^makeColumnIndex(columnIndex) + return v +} + +// Clone returns a copy of v which does not share any pointers with it. +func (v Value) Clone() Value { + switch k := v.Kind(); k { + case ByteArray, FixedLenByteArray: + v.ptr = unsafe.SliceData(copyBytes(v.byteArray())) + } + return v +} + +func makeInt96(bits []byte) (i96 deprecated.Int96) { + return deprecated.Int96{ + 2: binary.LittleEndian.Uint32(bits[8:12]), + 1: binary.LittleEndian.Uint32(bits[4:8]), + 0: binary.LittleEndian.Uint32(bits[0:4]), + } +} + +func parseValue(kind Kind, data []byte) (val Value, err error) { + switch kind { + case Boolean: + if len(data) == 1 { + val = makeValueBoolean(data[0] != 0) + } + case Int32: + if len(data) == 4 { + val = makeValueInt32(int32(binary.LittleEndian.Uint32(data))) + } + case Int64: + if len(data) == 8 { + val = makeValueInt64(int64(binary.LittleEndian.Uint64(data))) + } + case Int96: + if len(data) == 12 { + val = makeValueInt96(makeInt96(data)) + } + case Float: + if len(data) == 4 { + val = makeValueFloat(float32(math.Float32frombits(binary.LittleEndian.Uint32(data)))) + } + case Double: + if len(data) == 8 { + val = makeValueDouble(float64(math.Float64frombits(binary.LittleEndian.Uint64(data)))) + } + case ByteArray, FixedLenByteArray: + val = makeValueBytes(kind, data) + } + if val.isNull() { + err = fmt.Errorf("cannot decode %s value from input of length %d", kind, len(data)) + } + return val, err +} + +func copyBytes(b []byte) []byte { + c := make([]byte, len(b)) + copy(c, b) + return c +} + +// Equal returns true if v1 and v2 are equal. +// +// Values are considered equal if they are of the same physical type and hold +// the same Go values. For BYTE_ARRAY and FIXED_LEN_BYTE_ARRAY, the content of +// the underlying byte arrays are tested for equality. +// +// Note that the repetition levels, definition levels, and column indexes are +// not compared by this function, use DeepEqual instead. +func Equal(v1, v2 Value) bool { + if v1.kind != v2.kind { + return false + } + switch ^Kind(v1.kind) { + case Boolean: + return v1.boolean() == v2.boolean() + case Int32: + return v1.int32() == v2.int32() + case Int64: + return v1.int64() == v2.int64() + case Int96: + return v1.int96() == v2.int96() + case Float: + return v1.float() == v2.float() + case Double: + return v1.double() == v2.double() + case ByteArray, FixedLenByteArray: + return bytes.Equal(v1.byteArray(), v2.byteArray()) + case -1: // null + return true + default: + return false + } +} + +// DeepEqual returns true if v1 and v2 are equal, including their repetition +// levels, definition levels, and column indexes. +// +// See Equal for details about how value equality is determined. +func DeepEqual(v1, v2 Value) bool { + return Equal(v1, v2) && + v1.repetitionLevel == v2.repetitionLevel && + v1.definitionLevel == v2.definitionLevel && + v1.columnIndex == v2.columnIndex +} + +var ( + _ fmt.Formatter = Value{} + _ fmt.Stringer = Value{} +) + +func clearValues(values []Value) { + for i := range values { + values[i] = Value{} + } +} + +// BooleanReader is an interface implemented by ValueReader instances which +// expose the content of a column of boolean values. +type BooleanReader interface { + // Read boolean values into the buffer passed as argument. + // + // The method returns io.EOF when all values have been read. + ReadBooleans(values []bool) (int, error) +} + +// BooleanWriter is an interface implemented by ValueWriter instances which +// support writing columns of boolean values. +type BooleanWriter interface { + // Write boolean values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteBooleans(values []bool) (int, error) +} + +// Int32Reader is an interface implemented by ValueReader instances which expose +// the content of a column of int32 values. +type Int32Reader interface { + // Read 32 bits integer values into the buffer passed as argument. + // + // The method returns io.EOF when all values have been read. + ReadInt32s(values []int32) (int, error) +} + +// Int32Writer is an interface implemented by ValueWriter instances which +// support writing columns of 32 bits signed integer values. +type Int32Writer interface { + // Write 32 bits signed integer values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteInt32s(values []int32) (int, error) +} + +// Int64Reader is an interface implemented by ValueReader instances which expose +// the content of a column of int64 values. +type Int64Reader interface { + // Read 64 bits integer values into the buffer passed as argument. + // + // The method returns io.EOF when all values have been read. + ReadInt64s(values []int64) (int, error) +} + +// Int64Writer is an interface implemented by ValueWriter instances which +// support writing columns of 64 bits signed integer values. +type Int64Writer interface { + // Write 64 bits signed integer values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteInt64s(values []int64) (int, error) +} + +// Int96Reader is an interface implemented by ValueReader instances which expose +// the content of a column of int96 values. +type Int96Reader interface { + // Read 96 bits integer values into the buffer passed as argument. + // + // The method returns io.EOF when all values have been read. + ReadInt96s(values []deprecated.Int96) (int, error) +} + +// Int96Writer is an interface implemented by ValueWriter instances which +// support writing columns of 96 bits signed integer values. +type Int96Writer interface { + // Write 96 bits signed integer values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteInt96s(values []deprecated.Int96) (int, error) +} + +// FloatReader is an interface implemented by ValueReader instances which expose +// the content of a column of single-precision floating point values. +type FloatReader interface { + // Read single-precision floating point values into the buffer passed as + // argument. + // + // The method returns io.EOF when all values have been read. + ReadFloats(values []float32) (int, error) +} + +// FloatWriter is an interface implemented by ValueWriter instances which +// support writing columns of single-precision floating point values. +type FloatWriter interface { + // Write single-precision floating point values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteFloats(values []float32) (int, error) +} + +// DoubleReader is an interface implemented by ValueReader instances which +// expose the content of a column of double-precision float point values. +type DoubleReader interface { + // Read double-precision floating point values into the buffer passed as + // argument. + // + // The method returns io.EOF when all values have been read. + ReadDoubles(values []float64) (int, error) +} + +// DoubleWriter is an interface implemented by ValueWriter instances which +// support writing columns of double-precision floating point values. +type DoubleWriter interface { + // Write double-precision floating point values. + // + // The method returns the number of values written, and any error that + // occurred while writing the values. + WriteDoubles(values []float64) (int, error) +} + +// ByteArrayReader is an interface implemented by ValueReader instances which +// expose the content of a column of variable length byte array values. +type ByteArrayReader interface { + // Read values into the byte buffer passed as argument, returning the number + // of values written to the buffer (not the number of bytes). Values are + // written using the PLAIN encoding, each byte array prefixed with its + // length encoded as a 4 bytes little endian unsigned integer. + // + // The method returns io.EOF when all values have been read. + // + // If the buffer was not empty, but too small to hold at least one value, + // io.ErrShortBuffer is returned. + ReadByteArrays(values []byte) (int, error) +} + +// ByteArrayWriter is an interface implemented by ValueWriter instances which +// support writing columns of variable length byte array values. +type ByteArrayWriter interface { + // Write variable length byte array values. + // + // The values passed as input must be laid out using the PLAIN encoding, + // with each byte array prefixed with the four bytes little endian unsigned + // integer length. + // + // The method returns the number of values written to the underlying column + // (not the number of bytes), or any error that occurred while attempting to + // write the values. + WriteByteArrays(values []byte) (int, error) +} + +// FixedLenByteArrayReader is an interface implemented by ValueReader instances +// which expose the content of a column of fixed length byte array values. +type FixedLenByteArrayReader interface { + // Read values into the byte buffer passed as argument, returning the number + // of values written to the buffer (not the number of bytes). + // + // The method returns io.EOF when all values have been read. + // + // If the buffer was not empty, but too small to hold at least one value, + // io.ErrShortBuffer is returned. + ReadFixedLenByteArrays(values []byte) (int, error) +} + +// FixedLenByteArrayWriter is an interface implemented by ValueWriter instances +// which support writing columns of fixed length byte array values. +type FixedLenByteArrayWriter interface { + // Writes the fixed length byte array values. + // + // The size of the values is assumed to be the same as the expected size of + // items in the column. The method errors if the length of the input values + // is not a multiple of the expected item size. + WriteFixedLenByteArrays(values []byte) (int, error) +} diff --git a/vendor/github.com/parquet-go/parquet-go/value_amd64.go b/vendor/github.com/parquet-go/parquet-go/value_amd64.go new file mode 100644 index 00000000000..fbd6432fd9b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/value_amd64.go @@ -0,0 +1,18 @@ +//go:build !purego + +package parquet + +import "golang.org/x/sys/cpu" + +//go:noescape +func memsetValuesAVX2(values []Value, model Value, _ uint64) + +func memsetValues(values []Value, model Value) { + if cpu.X86.HasAVX2 { + memsetValuesAVX2(values, model, 0) + } else { + for i := range values { + values[i] = model + } + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/value_amd64.s b/vendor/github.com/parquet-go/parquet-go/value_amd64.s new file mode 100644 index 00000000000..255117412f9 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/value_amd64.s @@ -0,0 +1,59 @@ +//go:build !purego + +#include "textflag.h" + +#define sizeOfValue 24 + +// This function is an optimized implementation of the memsetValues function +// which assigns the parquet.Value passed as second argument to all elements of +// the first slice argument. +// +// The optimizations relies on the fact that we can pack 4 parquet.Value values +// into 3 YMM registers (24 x 4 = 32 x 3 = 96). +// +// func memsetValuesAVX2(values []Value, model Value, _ uint64) +TEXT ·memsetValuesAVX2(SB), NOSPLIT, $0-56 // 48 + padding to load model in YMM + MOVQ values_base+0(FP), AX + MOVQ values_len+8(FP), BX + + MOVQ model_ptr+24(FP), R10 + MOVQ model_u64+32(FP), R11 + MOVQ model+40(FP), R12 // go vet complains about this line but it's OK + + XORQ SI, SI // byte index + MOVQ BX, DI // byte count + IMULQ $sizeOfValue, DI + + CMPQ BX, $4 + JB test + + MOVQ BX, R8 + SHRQ $2, R8 + SHLQ $2, R8 + IMULQ $sizeOfValue, R8 + + VMOVDQU model+24(FP), Y0 + VMOVDQU Y0, Y1 + VMOVDQU Y0, Y2 + + VPERMQ $0b00100100, Y0, Y0 + VPERMQ $0b01001001, Y1, Y1 + VPERMQ $0b10010010, Y2, Y2 +loop4: + VMOVDQU Y0, 0(AX)(SI*1) + VMOVDQU Y1, 32(AX)(SI*1) + VMOVDQU Y2, 64(AX)(SI*1) + ADDQ $4*sizeOfValue, SI + CMPQ SI, R8 + JNE loop4 + VZEROUPPER + JMP test +loop: + MOVQ R10, 0(AX)(SI*1) + MOVQ R11, 8(AX)(SI*1) + MOVQ R12, 16(AX)(SI*1) + ADDQ $sizeOfValue, SI +test: + CMPQ SI, DI + JNE loop + RET diff --git a/vendor/github.com/parquet-go/parquet-go/value_be.go b/vendor/github.com/parquet-go/parquet-go/value_be.go new file mode 100644 index 00000000000..5e7c038df8b --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/value_be.go @@ -0,0 +1,19 @@ +//go:build s390x + +package parquet + +// On a big endian system, a boolean/byte value, which is in little endian byte +// format, is byte aligned to the 7th byte in a u64 (8 bytes) variable. +// Hence the data will be available at 7th byte when interpreted as a little +// endian byte format. So, in order to access a boolean/byte value out of u64 +// variable, we need to add an offset of "7". +// +// In the same way, an int32/uint32/float value, which is in little endian byte +// format, is byte aligned to the 4th byte in a u64 (8 bytes) variable. +// Hence the data will be available at 4th byte when interpreted as a little +// endian byte format. So, in order to access an int32/uint32/float value out of +// u64 variable, we need to add an offset of "4". +const ( + firstByteOffsetOfBooleanValue = 7 + firstByteOffsetOf32BitsValue = 4 +) diff --git a/vendor/github.com/parquet-go/parquet-go/value_le.go b/vendor/github.com/parquet-go/parquet-go/value_le.go new file mode 100644 index 00000000000..f11170ef731 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/value_le.go @@ -0,0 +1,8 @@ +//go:build !s390x + +package parquet + +const ( + firstByteOffsetOfBooleanValue = 0 + firstByteOffsetOf32BitsValue = 0 +) diff --git a/vendor/github.com/parquet-go/parquet-go/values_purego.go b/vendor/github.com/parquet-go/parquet-go/values_purego.go new file mode 100644 index 00000000000..8151134649f --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/values_purego.go @@ -0,0 +1,9 @@ +//go:build purego || !amd64 + +package parquet + +func memsetValues(values []Value, model Value) { + for i := range values { + values[i] = model + } +} diff --git a/vendor/github.com/parquet-go/parquet-go/writer.go b/vendor/github.com/parquet-go/parquet-go/writer.go new file mode 100644 index 00000000000..8413a6f5cec --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/writer.go @@ -0,0 +1,2157 @@ +package parquet + +import ( + "bufio" + "bytes" + "cmp" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math" + "math/bits" + "os" + "reflect" + "slices" + "strings" + + "github.com/parquet-go/parquet-go/compress" + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/encoding/plain" + "github.com/parquet-go/parquet-go/encoding/thrift" + "github.com/parquet-go/parquet-go/format" + "github.com/parquet-go/parquet-go/internal/memory" + "github.com/parquet-go/parquet-go/sparse" +) + +const ( + // The uncompressed page size is stored as int32 and must not be larger than the + // maximum int32 value (see format.PageHeader). + maxUncompressedPageSize = math.MaxInt32 +) + +// GenericWriter is similar to a Writer but uses a type parameter to define the +// Go type representing the schema of rows being written. +// +// Using this type over Writer has multiple advantages: +// +// - By leveraging type information, the Go compiler can provide greater +// guarantees that the code is correct. For example, the parquet.Writer.Write +// method accepts an argument of type interface{}, which delays type checking +// until runtime. The parquet.GenericWriter[T].Write method ensures at +// compile time that the values it receives will be of type T, reducing the +// risk of introducing errors. +// +// - Since type information is known at compile time, the implementation of +// parquet.GenericWriter[T] can make safe assumptions, removing the need for +// runtime validation of how the parameters are passed to its methods. +// Optimizations relying on type information are more effective, some of the +// writer's state can be precomputed at initialization, which was not possible +// with parquet.Writer. +// +// - The parquet.GenericWriter[T].Write method uses a data-oriented design, +// accepting an slice of T instead of a single value, creating more +// opportunities to amortize the runtime cost of abstractions. +// This optimization is not available for parquet.Writer because its Write +// method's argument would be of type []interface{}, which would require +// conversions back and forth from concrete types to empty interfaces (since +// a []T cannot be interpreted as []interface{} in Go), would make the API +// more difficult to use and waste compute resources in the type conversions, +// defeating the purpose of the optimization in the first place. +// +// Note that this type is only available when compiling with Go 1.18 or later. +type GenericWriter[T any] struct { + // At this time GenericWriter is expressed in terms of Writer to reuse the + // underlying logic. In the future, and if we accepted to break backward + // compatibility on the Write method, we could modify Writer to be an alias + // to GenericWriter with: + // + // type Writer = GenericWriter[any] + // + base Writer + // This function writes rows of type T to the writer, it gets generated by + // the NewGenericWriter function based on the type T and the underlying + // schema of the parquet file. + write writeFunc[T] + // This field is used to leverage the optimized writeRowsFunc algorithms. + columns []ColumnBuffer +} + +// NewGenericWriter is like NewWriter but returns a GenericWriter[T] suited to +// write rows of Go type T. +// +// The type parameter T should be a map, struct, or any. Any other types will +// cause a panic at runtime. Type checking is a lot more effective when the +// generic parameter is a struct type, using map and interface types is somewhat +// similar to using a Writer. +// +// If the option list may explicitly declare a schema, it must be compatible +// with the schema generated from T. +// +// Sorting columns may be set on the writer to configure the generated row +// groups metadata. However, rows are always written in the order they were +// seen, no reordering is performed, the writer expects the application to +// ensure proper correlation between the order of rows and the list of sorting +// columns. See SortingWriter[T] for a writer which handles reordering rows +// based on the configured sorting columns. +func NewGenericWriter[T any](output io.Writer, options ...WriterOption) *GenericWriter[T] { + config, err := NewWriterConfig(options...) + if err != nil { + panic(err) + } + + t := typeOf[T]() + + var genWriteErr error + if t != nil { + if columnName, ok := validateColumns(dereference(t)); !ok { + genWriteErr = fmt.Errorf("caonnot write %v: it has columns with the same parquet column name %q", t, columnName) + } + } + + schemaFromConf := config.Schema + schemaFromType := (*Schema)(nil) + + if t != nil && dereference(t).Kind() == reflect.Struct { + schemaFromType = schemaOf(dereference(t), config.SchemaConfig.StructTags...) + } + + config.Schema = cmp.Or(schemaFromConf, schemaFromType) + if config.Schema == nil { + panic("generic writer must be instantiated with schema or concrete type.") + } + if len(config.Schema.Columns()) == 0 { + genWriteErr = fmt.Errorf("cannot write %v: it has no columns (maybe it has no exported fields)", t) + } + + var writeFn writeFunc[T] + switch { + case genWriteErr != nil: + writeFn = func(*GenericWriter[T], []T) (int, error) { return 0, genWriteErr } + case schemaFromType == config.Schema || (schemaFromType != nil && EqualNodes(config.Schema, schemaFromType)): + // The schema matches the type T, we can use the optimized + // writeRowsFunc algorithms mapping Go values directly to + // parquet columns, using the sparse package. + writeRows := writeRowsFuncOf(t, config.Schema, nil, config.SchemaConfig.StructTags) + writeFn = makeWriteFunc[T](t, writeRows) + default: + // The schema does not match the type T, we have to + // deconstruct each value of type T into a Row first. + // This is less efficient but still type-safe. + _, writeValue := writeValueFuncOf(0, config.Schema) + writeFn = makeWriteFunc[T](t, func(columns []ColumnBuffer, levels columnLevels, rows sparse.Array) { + if rows.Len() == 0 { + writeValue(columns, levels, reflect.Value{}) + return + } + for i := range rows.Len() { + v := reflect.ValueOf((*T)(rows.Index(i))).Elem() + writeValue(columns, levels, v) + } + }) + } + + return &GenericWriter[T]{ + base: Writer{ + output: output, + config: config, + schema: config.Schema, + writer: newWriter(output, config), + }, + write: writeFn, + } +} + +func validateColumns(t reflect.Type) (string, bool) { + if t.Kind() != reflect.Struct { + return "", true + } + + columns := make(map[string]struct{}, t.NumField()) + + for i := range t.NumField() { + f := t.Field(i) + if !f.IsExported() { + continue + } + fieldTag, columnName := f.Tag.Get("parquet"), f.Name + if fieldTag != "" { + if commaIdx := strings.IndexByte(fieldTag, ','); commaIdx >= 0 { + fieldTag = fieldTag[:commaIdx] + } + if fieldTag == "-" { + continue + } + if fieldTag != "" { + columnName = fieldTag + } + } + if _, exists := columns[columnName]; exists { + return columnName, false + } else { + columns[columnName] = struct{}{} + } + } + + return "", true +} + +type writeFunc[T any] func(*GenericWriter[T], []T) (int, error) + +func makeWriteFunc[T any](t reflect.Type, writeRows writeRowsFunc) writeFunc[T] { + return func(w *GenericWriter[T], rows []T) (n int, err error) { + if w.columns == nil { + w.columns = make([]ColumnBuffer, len(w.base.writer.currentRowGroup.columns)) + for i, c := range w.base.writer.currentRowGroup.columns { + // These fields are usually lazily initialized when writing rows, + // we need them to exist now tho. + c.columnBuffer = c.newColumnBuffer() + w.columns[i] = c.columnBuffer + } + } + writeRows(w.columns, columnLevels{}, makeArrayFromSlice(rows)) + return len(rows), nil + } +} + +func (w *GenericWriter[T]) Close() error { + if err := w.base.Close(); err != nil { + return err + } + // Nil out the columns slice to allow the column buffers to be garbage + // collected and to ensure that any subsequent use of this writer after + // Close will result in a clear panic rather than operating on closed + // resources. + w.columns = nil + return nil +} + +func (w *GenericWriter[T]) Flush() error { + return w.base.Flush() +} + +func (w *GenericWriter[T]) Reset(output io.Writer) { + w.base.Reset(output) +} + +func (w *GenericWriter[T]) Write(rows []T) (written int, err error) { + var n int + currentRowGroup := w.base.writer.currentRowGroup + for len(rows) > 0 { + n, err = currentRowGroup.writeRows(len(rows), func(i, j int) (int, error) { + n, err := w.write(w, rows[i:j:j]) + if err != nil { + return n, err + } + + for _, c := range currentRowGroup.columns { + if c.columnBuffer != nil && c.columnBuffer.Size() >= int64(c.bufferSize) { + if err := c.Flush(); err != nil { + return n, err + } + } + } + + return n, nil + }) + rows = rows[n:] + written += n + if err != ErrTooManyRowGroups { + break + } + if err = w.base.writer.flush(); err != nil { + break + } + } + return +} + +func (w *GenericWriter[T]) WriteRows(rows []Row) (int, error) { + return w.base.WriteRows(rows) +} + +func (w *GenericWriter[T]) WriteRowGroup(rowGroup RowGroup) (int64, error) { + return w.base.WriteRowGroup(rowGroup) +} + +// SetKeyValueMetadata sets a key/value pair in the Parquet file metadata. +// +// Keys are assumed to be unique, if the same key is repeated multiple times the +// last value is retained. While the parquet format does not require unique keys, +// this design decision was made to optimize for the most common use case where +// applications leverage this extension mechanism to associate single values to +// keys. This may create incompatibilities with other parquet libraries, or may +// cause some key/value pairs to be lost when open parquet files written with +// repeated keys. We can revisit this decision if it ever becomes a blocker. +func (w *GenericWriter[T]) SetKeyValueMetadata(key, value string) { + w.base.SetKeyValueMetadata(key, value) +} + +func (w *GenericWriter[T]) ReadRowsFrom(rows RowReader) (int64, error) { + return w.base.ReadRowsFrom(rows) +} + +func (w *GenericWriter[T]) Schema() *Schema { + return w.base.Schema() +} + +func (w *GenericWriter[T]) ColumnWriters() []*ColumnWriter { + return w.base.ColumnWriters() +} + +// File returns a FileView of the written parquet file. +// Only available after Close is called. +func (w *GenericWriter[T]) File() FileView { + return w.base.File() +} + +// ConcurrentRowGroupWriter is a row group writer that can be used to write row groups +// in parallel. Multiple row groups can be created concurrently and written to independently, +// but they must be committed serially to maintain the order of row groups in the file. +// +// See BeginRowGroup for more information on how this can be used. +// +// While multiple row groups can be created concurrently, a single row group must be written +// sequentially. +type ConcurrentRowGroupWriter interface { + RowWriterWithSchema + + // Flush flushes any buffered data in the row group's column writers. + // This could be called before Commit to ensure all data pages are flushed. + Flush() error + + // ColumnWriters returns the column writers for this row group, allowing + // direct access to write values to individual columns. + ColumnWriters() []*ColumnWriter + + // Commit commits the row group to the parent writer, returning the number + // of rows written and an error if any. This method must be called serially + // (not concurrently) to maintain row group order in the file. + // + // If the parent writer has any pending rows buffered, they will be flushed + // before this row group is written. + // + // After Commit returns successfully, the row group will be empty and can + // be reused. + Commit() (int64, error) +} + +// BeginRowGroup returns a new ConcurrentRowGroupWriter that can be written to in parallel with +// other row groups. However these need to be committed back to the writer serially using the +// Commit method on the row group. +// +// Example usage could look something like: +// +// writer := parquet.NewGenericWriter[any](...) +// rgs := make([]parquet.ConcurrentRowGroupWriter, 5) +// var wg sync.WaitGroup +// for i := range rgs { +// rg := writer.BeginRowGroup() +// rgs[i] = rg +// wg.Add(1) +// go func() { +// defer wg.Done() +// writeChunkRows(i, rg) +// }() +// } +// wg.Wait() +// for _, rg := range rgs { +// if _, err := rg.Commit(); err != nil { +// return err +// } +// } +// return writer.Close() +func (w *GenericWriter[T]) BeginRowGroup() ConcurrentRowGroupWriter { + return newWriterRowGroup(w.base.writer, w.base.config) +} + +var ( + _ RowWriterWithSchema = (*GenericWriter[any])(nil) + _ RowReaderFrom = (*GenericWriter[any])(nil) + _ RowGroupWriter = (*GenericWriter[any])(nil) + + _ RowWriterWithSchema = (*GenericWriter[struct{}])(nil) + _ RowReaderFrom = (*GenericWriter[struct{}])(nil) + _ RowGroupWriter = (*GenericWriter[struct{}])(nil) + + _ RowWriterWithSchema = (*GenericWriter[map[struct{}]struct{}])(nil) + _ RowReaderFrom = (*GenericWriter[map[struct{}]struct{}])(nil) + _ RowGroupWriter = (*GenericWriter[map[struct{}]struct{}])(nil) + + _ ConcurrentRowGroupWriter = (*writerRowGroup)(nil) +) + +// Deprecated: A Writer uses a parquet schema and sequence of Go values to +// produce a parquet file to an io.Writer. +// +// Use NewGenericWriter instead. To maintain dynamic behavior (schema unknown at compile time), +// use "any" as the type parameter: +// +// This gives you the same behavior as the old Writer +// w := parquet.NewGenericWriter[any](output, schema) +// +// This example showcases a typical use of parquet writers: +// +// writer := parquet.NewWriter(output) +// +// for _, row := range rows { +// if err := writer.Write(row); err != nil { +// ... +// } +// } +// +// if err := writer.Close(); err != nil { +// ... +// } +// +// The Writer type optimizes for minimal memory usage, each page is written as +// soon as it has been filled so only a single page per column needs to be held +// in memory and as a result, there are no opportunities to sort rows within an +// entire row group. Programs that need to produce parquet files with sorted +// row groups should use the Buffer type to buffer and sort the rows prior to +// writing them to a Writer. +// +// For programs building with Go 1.18 or later, the GenericWriter[T] type +// supersedes this one. +type Writer struct { + output io.Writer + config *WriterConfig + schema *Schema + writer *writer + rowbuf []Row +} + +// NewWriter constructs a parquet writer writing a file to the given io.Writer. +// +// The function panics if the writer configuration is invalid. Programs that +// cannot guarantee the validity of the options passed to NewWriter should +// construct the writer configuration independently prior to calling this +// function: +// +// config, err := parquet.NewWriterConfig(options...) +// if err != nil { +// // handle the configuration error +// ... +// } else { +// // this call to create a writer is guaranteed not to panic +// writer := parquet.NewWriter(output, config) +// ... +// } +func NewWriter(output io.Writer, options ...WriterOption) *Writer { + config, err := NewWriterConfig(options...) + if err != nil { + panic(err) + } + w := &Writer{ + output: output, + config: config, + } + if config.Schema != nil { + w.configure(config.Schema) + } + return w +} + +func (w *Writer) configure(schema *Schema) { + if schema != nil { + w.config.Schema = schema + w.schema = schema + w.writer = newWriter(w.output, w.config) + } +} + +// Close must be called after all values were produced to the writer in order to +// flush all buffers and write the parquet footer. +func (w *Writer) Close() error { + for _, c := range w.ColumnWriters() { + if err := c.Close(); err != nil { + return err + } + } + if w.writer != nil { + return w.writer.close() + } + return nil +} + +// Flush flushes all buffers into a row group to the underlying io.Writer. +// +// Flush is called automatically on Close, it is only useful to call explicitly +// if the application needs to limit the size of row groups or wants to produce +// multiple row groups per file. +// +// If the writer attempts to create more than MaxRowGroups row groups the method +// returns ErrTooManyRowGroups. +func (w *Writer) Flush() error { + if w.writer != nil { + return w.writer.flush() + } + return nil +} + +// Reset clears the state of the writer without flushing any of the buffers, +// and setting the output to the io.Writer passed as argument, allowing the +// writer to be reused to produce another parquet file. +// +// Reset may be called at any time, including after a writer was closed. +func (w *Writer) Reset(output io.Writer) { + if w.output = output; w.writer != nil { + w.writer.reset(w.output) + } +} + +// Write is called to write another row to the parquet file. +// +// The method uses the parquet schema configured on w to traverse the Go value +// and decompose it into a set of columns and values. If no schema were passed +// to NewWriter, it is deducted from the Go type of the row, which then have to +// be a struct or pointer to struct. +func (w *Writer) Write(row any) error { + if w.schema == nil { + w.configure(SchemaOf(row)) + } + if cap(w.rowbuf) == 0 { + w.rowbuf = make([]Row, 1) + } else { + w.rowbuf = w.rowbuf[:1] + } + defer clearRows(w.rowbuf) + w.rowbuf[0] = w.schema.Deconstruct(w.rowbuf[0][:0], row) + _, err := w.WriteRows(w.rowbuf) + return err +} + +// WriteRows is called to write rows to the parquet file. +// +// The Writer must have been given a schema when NewWriter was called, otherwise +// the structure of the parquet file cannot be determined from the row only. +// +// The row is expected to contain values for each column of the writer's schema, +// in the order produced by the parquet.(*Schema).Deconstruct method. +func (w *Writer) WriteRows(rows []Row) (int, error) { + return w.writer.WriteRows(rows) +} + +// WriteRowGroup writes a row group to the parquet file. +// +// Buffered rows will be flushed prior to writing rows from the group, unless +// the row group was empty in which case nothing is written to the file. +// +// The content of the row group is flushed to the writer; after the method +// returns successfully, the row group will be empty and in ready to be reused. +func (w *Writer) WriteRowGroup(rowGroup RowGroup) (int64, error) { + rowGroupSchema := rowGroup.Schema() + switch { + case rowGroupSchema == nil: + return 0, ErrRowGroupSchemaMissing + case w.schema == nil: + w.configure(rowGroupSchema) + case !EqualNodes(w.schema, rowGroupSchema): + return 0, ErrRowGroupSchemaMismatch + } + if err := w.writer.flush(); err != nil { + return 0, err + } + w.writer.currentRowGroup.configureBloomFilters(rowGroup.ColumnChunks()) + rows := rowGroup.Rows() + defer rows.Close() + n, err := CopyRows(w.writer, rows) + if err != nil { + return n, err + } + return w.writer.writeRowGroup(w.writer.currentRowGroup, rowGroup.Schema(), rowGroup.SortingColumns()) +} + +// ReadRowsFrom reads rows from the reader passed as arguments and writes them +// to w. +// +// This is similar to calling WriteRow repeatedly, but will be more efficient +// if optimizations are supported by the reader. +func (w *Writer) ReadRowsFrom(rows RowReader) (written int64, err error) { + if w.schema == nil { + if r, ok := rows.(RowReaderWithSchema); ok { + w.configure(r.Schema()) + } + } + if cap(w.rowbuf) < defaultRowBufferSize { + w.rowbuf = make([]Row, defaultRowBufferSize) + } else { + w.rowbuf = w.rowbuf[:cap(w.rowbuf)] + } + return copyRows(w.writer, rows, w.rowbuf) +} + +// Schema returns the schema of rows written by w. +// +// The returned value will be nil if no schema has yet been configured on w. +func (w *Writer) Schema() *Schema { return w.schema } + +// SetKeyValueMetadata sets a key/value pair in the Parquet file metadata. +// +// Keys are assumed to be unique, if the same key is repeated multiple times the +// last value is retained. While the parquet format does not require unique keys, +// this design decision was made to optimize for the most common use case where +// applications leverage this extension mechanism to associate single values to +// keys. This may create incompatibilities with other parquet libraries, or may +// cause some key/value pairs to be lost when open parquet files written with +// repeated keys. We can revisit this decision if it ever becomes a blocker. +func (w *Writer) SetKeyValueMetadata(key, value string) { + for i, kv := range w.writer.metadata { + if kv.Key == key { + kv.Value = value + w.writer.metadata[i] = kv + return + } + } + w.writer.metadata = append(w.writer.metadata, format.KeyValue{ + Key: key, + Value: value, + }) +} + +// ColumnWriters returns writers for each column. This allows applications to +// write values directly to each column instead of having to first assemble +// values into rows to use WriteRows. +func (w *Writer) ColumnWriters() []*ColumnWriter { return w.writer.currentRowGroup.columns } + +// BeginRowGroup returns a new ConcurrentRowGroupWriter that can be written to in parallel with +// other row groups. However these need to be committed back to the writer serially using the +// Commit method on the row group. +func (w *Writer) BeginRowGroup() ConcurrentRowGroupWriter { + return newWriterRowGroup(w.writer, w.config) +} + +type writerFileView struct { + writer *writer + schema *Schema +} + +// File returns a FileView of the written parquet file. +// Only available after Close is called. +func (w *Writer) File() FileView { + if w.writer == nil || w.schema == nil { + return nil + } + return &writerFileView{ + w.writer, + w.schema, + } +} + +func (w *writerFileView) Metadata() *format.FileMetaData { + return &w.writer.fileMetaData +} + +func (w *writerFileView) Schema() *Schema { + return w.schema +} + +func (w *writerFileView) NumRows() int64 { + return w.writer.fileMetaData.NumRows +} + +func (w *writerFileView) Lookup(key string) (string, bool) { + return lookupKeyValueMetadata(w.writer.fileMetaData.KeyValueMetadata, key) +} + +func (w *writerFileView) Size() int64 { + return w.writer.writer.offset +} + +func (w *writerFileView) ColumnIndexes() []format.ColumnIndex { + return w.writer.currentRowGroup.columnIndex +} + +func (w *writerFileView) OffsetIndexes() []format.OffsetIndex { + return w.writer.currentRowGroup.offsetIndex +} + +func (w *writerFileView) Root() *Column { + root, _ := openColumns(nil, &w.writer.fileMetaData, w.writer.currentRowGroup.columnIndex, w.writer.currentRowGroup.offsetIndex) + return root +} + +func (w *writerFileView) RowGroups() []RowGroup { + columns := makeLeafColumns(w.Root()) + file := &File{metadata: w.writer.fileMetaData, schema: w.schema} + fileRowGroups := makeFileRowGroups(file, columns) + return makeRowGroups(fileRowGroups) +} + +type writerRowGroup struct { + writer *writer + config *WriterConfig + values [][]Value + numRows int64 + maxRows int64 + columns []*ColumnWriter + columnChunk []format.ColumnChunk + columnIndex []format.ColumnIndex + offsetIndex []format.OffsetIndex +} + +func newWriterRowGroup(w *writer, config *WriterConfig) *writerRowGroup { + rg := &writerRowGroup{ + writer: w, + config: config, + maxRows: config.MaxRowsPerRowGroup, + } + + dataPageType := format.DataPage + if config.DataPageVersion == 2 { + dataPageType = format.DataPageV2 + } + + defaultCompression := config.Compression + if defaultCompression == nil { + defaultCompression = &Uncompressed + } + + forEachLeafColumnOf(config.Schema, func(leaf leafColumn) { + encoding := encodingOf(leaf.node, config.Encodings) + dictionary := Dictionary(nil) + columnType := leaf.node.Type() + columnIndex := int(leaf.columnIndex) + compression := leaf.node.Compression() + + if compression == nil { + compression = defaultCompression + } + + if isDictionaryEncoding(encoding) { + dictBuffer := columnType.NewValues(make([]byte, 0, defaultDictBufferSize), nil) + dictionary = columnType.NewDictionary(columnIndex, 0, dictBuffer) + columnType = dictionary.Type() + } + + c := &ColumnWriter{ + pool: config.ColumnPageBuffers, + columnPath: leaf.path, + columnType: columnType, + originalType: columnType, + columnIndex: columnType.NewColumnIndexer(config.ColumnIndexSizeLimit(leaf.path)), + columnFilter: searchBloomFilterColumn(config.BloomFilters, leaf.path), + compression: compression, + dictionary: dictionary, + maxRepetitionLevel: leaf.maxRepetitionLevel, + maxDefinitionLevel: leaf.maxDefinitionLevel, + bufferIndex: int32(leaf.columnIndex), + bufferSize: int32(float64(config.PageBufferSize) * 0.98), + writePageStats: config.DataPageStatistics, + writePageBounds: !slices.ContainsFunc(config.SkipPageBounds, func(skip []string) bool { + return columnPath(skip).equal(leaf.path) + }), + writeDeprecatedStatistics: config.DeprecatedDataPageStatistics, + encodings: make([]format.Encoding, 0, 3), + // Data pages in version 2 can omit compression when dictionary + // encoding is employed; only the dictionary page needs to be + // compressed, the data pages are encoded with the hybrid + // RLE/Bit-Pack encoding which doesn't benefit from an extra + // compression layer. + isCompressed: isCompressed(compression) && (dataPageType != format.DataPageV2 || dictionary == nil), + dictionaryMaxBytes: config.DictionaryMaxBytes, + } + + if dictionary != nil { + c.header.dict.Type = format.DictionaryPage + c.header.dict.DictionaryPageHeader = new(format.DictionaryPageHeader) + } + + c.header.page.Type = dataPageType + switch dataPageType { + case format.DataPage: + c.header.page.DataPageHeader = new(format.DataPageHeader) + case format.DataPageV2: + c.header.page.DataPageHeaderV2 = new(format.DataPageHeaderV2) + } + + c.header.encoder.Reset(c.header.protocol.NewWriter(&c.header.buffer)) + + if c.maxRepetitionLevel > 0 { + c.repetitionLevelHistogram = make([]int64, int(c.maxRepetitionLevel)+1) + } + if c.maxDefinitionLevel > 0 { + c.definitionLevelHistogram = make([]int64, int(c.maxDefinitionLevel)+1) + c.encodings = addEncoding(c.encodings, format.RLE) + } + + if isDictionaryEncoding(encoding) { + c.encodings = addEncoding(c.encodings, format.Plain) + } + + c.encoding = encoding + c.originalEncoding = encoding + c.encodings = addEncoding(c.encodings, c.encoding.Encoding()) + sortPageEncodings(c.encodings) + + rg.columns = append(rg.columns, c) + }) + + // Pre-allocate the backing array so that in most cases where the rows + // contain a single value we will hit collocated memory areas when writing + // rows to the writer. This won't benefit repeated columns much but in that + // case we would just waste a bit of memory which we can afford. + values := make([]Value, len(rg.columns)) + rg.values = make([][]Value, len(rg.columns)) + for i := range values { + rg.values[i] = values[i : i : i+1] + } + + rg.columnChunk = make([]format.ColumnChunk, len(rg.columns)) + rg.columnIndex = make([]format.ColumnIndex, len(rg.columns)) + rg.offsetIndex = make([]format.OffsetIndex, len(rg.columns)) + + for i, c := range rg.columns { + rg.columnChunk[i] = format.ColumnChunk{ + MetaData: format.ColumnMetaData{ + Type: format.Type(c.columnType.Kind()), + Encoding: c.encodings, + PathInSchema: c.columnPath, + Codec: c.compression.CompressionCodec(), + KeyValueMetadata: nil, // TODO + }, + } + } + + for i, c := range rg.columns { + c.columnChunk = &rg.columnChunk[i] + c.offsetIndex = &rg.offsetIndex[i] + } + + return rg +} + +func (rg *writerRowGroup) reset() { + rg.numRows = 0 + for _, c := range rg.columns { + c.reset() + } +} + +func (rg *writerRowGroup) configureBloomFilters(columnChunks []ColumnChunk) { + for i, c := range rg.columns { + if c.columnFilter != nil { + c.resizeBloomFilter(columnChunks[i].NumValues()) + } + } +} + +func (rg *writerRowGroup) Schema() *Schema { + return rg.config.Schema +} + +func (rg *writerRowGroup) ColumnWriters() []*ColumnWriter { + return rg.columns +} + +func (rg *writerRowGroup) Flush() error { + for _, c := range rg.columns { + if err := c.Flush(); err != nil { + return err + } + } + return nil +} + +func (rg *writerRowGroup) Commit() (int64, error) { + if err := rg.writer.flush(); err != nil { + return 0, err + } + return rg.writer.writeRowGroup(rg, nil, nil) +} + +func (rg *writerRowGroup) WriteRows(rows []Row) (int, error) { + return rg.writeRows(len(rows), func(start, end int) (int, error) { + defer func() { + for i, values := range rg.values { + clearValues(values) + rg.values[i] = values[:0] + } + }() + + // TODO: if an error occurs in this method the writer may be left in an + // partially functional state. Applications are not expected to continue + // using the writer after getting an error, but maybe we could ensure that + // we are preventing further use as well? + for _, row := range rows[start:end] { + for columnIndex, columnValues := range row.Range { + rg.values[columnIndex] = append(rg.values[columnIndex], columnValues...) + } + } + + for i, values := range rg.values { + if len(values) > 0 { + if _, err := rg.columns[i].WriteRowValues(values); err != nil { + return 0, err + } + } + } + + return end - start, nil + }) +} + +func (rg *writerRowGroup) writeRows(numRows int, write func(i, j int) (int, error)) (int, error) { + written := 0 + + for written < numRows { + remain := rg.maxRows - rg.numRows + length := numRows - written + + if remain <= 0 { + return written, ErrTooManyRowGroups + } + + if remain < int64(length) { + length = int(remain) + } + + // Since the writer cannot flush pages across row boundaries, calls to + // WriteRows with very large slices can result in greatly exceeding the + // target page size. To set a limit to the impact of these large writes + // we chunk the input in slices of 64 rows. + const maxRowsPerWrite = 64 + if length > maxRowsPerWrite { + length = maxRowsPerWrite + } + + n, err := write(written, written+length) + written += n + rg.numRows += int64(n) + if err != nil { + return written, err + } + } + + return written, nil +} + +type writer struct { + buffer *bufio.Writer + writer offsetTrackingWriter + currentRowGroup *writerRowGroup + + createdBy string + metadata []format.KeyValue + + columnOrders []format.ColumnOrder + schemaElements []format.SchemaElement + rowGroups []format.RowGroup + columnIndexes [][]format.ColumnIndex + offsetIndexes [][]format.OffsetIndex + sortingColumns []format.SortingColumn + + fileMetaData format.FileMetaData + footer [8]byte +} + +func newWriter(output io.Writer, config *WriterConfig) *writer { + w := new(writer) + if config.WriteBufferSize <= 0 { + w.writer.Reset(output) + } else { + w.buffer = bufio.NewWriterSize(output, config.WriteBufferSize) + w.writer.Reset(w.buffer) + } + w.createdBy = config.CreatedBy + w.metadata = make([]format.KeyValue, 0, len(config.KeyValueMetadata)) + for k, v := range config.KeyValueMetadata { + w.metadata = append(w.metadata, format.KeyValue{Key: k, Value: v}) + } + sortKeyValueMetadata(w.metadata) + w.sortingColumns = make([]format.SortingColumn, len(config.Sorting.SortingColumns)) + + config.Schema.forEachNode(func(name string, node Node) { + nodeType := node.Type() + + repetitionType := (*format.FieldRepetitionType)(nil) + if node != config.Schema { // the root has no repetition type + repetitionType = fieldRepetitionTypePtrOf(node) + } + // For backward compatibility with older readers, the parquet specification + // recommends to set the scale and precision on schema elements when the + // column is of logical type decimal. + logicalType := nodeType.LogicalType() + scale, precision := (*int32)(nil), (*int32)(nil) + if logicalType != nil && logicalType.Decimal != nil { + scale = &logicalType.Decimal.Scale + precision = &logicalType.Decimal.Precision + } + + typeLength := (*int32)(nil) + if n := int32(nodeType.Length()); n > 0 { + typeLength = &n + } + + var numChildren *int32 + if !node.Leaf() { + n := int32(len(node.Fields())) + numChildren = &n + } + + w.schemaElements = append(w.schemaElements, format.SchemaElement{ + Type: nodeType.PhysicalType(), + TypeLength: typeLength, + RepetitionType: repetitionType, + Name: name, + NumChildren: numChildren, + ConvertedType: nodeType.ConvertedType(), + Scale: scale, + Precision: precision, + FieldID: int32(node.ID()), + LogicalType: logicalType, + }) + }) + + w.currentRowGroup = newWriterRowGroup(w, config) + + if len(config.Sorting.SortingColumns) > 0 { + forEachLeafColumnOf(config.Schema, func(leaf leafColumn) { + if sortingIndex := searchSortingColumn(config.Sorting.SortingColumns, leaf.path); sortingIndex < len(w.sortingColumns) { + w.sortingColumns[sortingIndex] = format.SortingColumn{ + ColumnIdx: int32(leaf.columnIndex), + Descending: config.Sorting.SortingColumns[sortingIndex].Descending(), + NullsFirst: config.Sorting.SortingColumns[sortingIndex].NullsFirst(), + } + } + }) + } + + w.columnOrders = make([]format.ColumnOrder, len(w.currentRowGroup.columns)) + for i, c := range w.currentRowGroup.columns { + w.columnOrders[i] = *c.columnType.ColumnOrder() + } + + copy(w.footer[4:], "PAR1") + return w +} + +func (w *writer) reset(writer io.Writer) { + if w.buffer == nil { + w.writer.Reset(writer) + } else { + w.buffer.Reset(writer) + w.writer.Reset(w.buffer) + } + w.currentRowGroup.reset() + for i := range w.rowGroups { + w.rowGroups[i] = format.RowGroup{} + } + for i := range w.columnIndexes { + w.columnIndexes[i] = nil + } + for i := range w.offsetIndexes { + w.offsetIndexes[i] = nil + } + w.rowGroups = w.rowGroups[:0] + w.columnIndexes = w.columnIndexes[:0] + w.offsetIndexes = w.offsetIndexes[:0] + w.fileMetaData = format.FileMetaData{} +} + +func (w *writer) close() error { + if err := w.writeFileHeader(); err != nil { + return err + } + if err := w.flush(); err != nil { + return err + } + if err := w.writeFileFooter(); err != nil { + return err + } + if w.buffer != nil { + return w.buffer.Flush() + } + return nil +} + +func (w *writer) flush() error { + _, err := w.writeRowGroup(w.currentRowGroup, nil, nil) + return err +} + +func (w *writer) writeFileHeader() error { + if w.writer.writer == nil { + return io.ErrClosedPipe + } + if w.writer.offset == 0 { + _, err := w.writer.WriteString("PAR1") + return err + } + return nil +} + +func (w *writer) writeFileFooter() error { + // The page index is composed of two sections: column and offset indexes. + // They are written after the row groups, right before the footer (which + // is written by the parent Writer.Close call). + // + // This section both writes the page index and generates the values of + // ColumnIndexOffset, ColumnIndexLength, OffsetIndexOffset, and + // OffsetIndexLength in the corresponding columns of the file metadata. + // + // Note: the page index is always written, even if we created data pages v1 + // because the parquet format is backward compatible in this case. Older + // readers will simply ignore this section since they do not know how to + // decode its content, nor have loaded any metadata to reference it. + protocol := new(thrift.CompactProtocol) + encoder := thrift.NewEncoder(protocol.NewWriter(&w.writer)) + + for i, columnIndexes := range w.columnIndexes { + rowGroup := &w.rowGroups[i] + for j := range columnIndexes { + column := &rowGroup.Columns[j] + column.ColumnIndexOffset = w.writer.offset + if err := encoder.Encode(&columnIndexes[j]); err != nil { + return err + } + column.ColumnIndexLength = int32(w.writer.offset - column.ColumnIndexOffset) + } + } + + for i, offsetIndexes := range w.offsetIndexes { + rowGroup := &w.rowGroups[i] + for j := range offsetIndexes { + column := &rowGroup.Columns[j] + column.OffsetIndexOffset = w.writer.offset + if err := encoder.Encode(&offsetIndexes[j]); err != nil { + return err + } + column.OffsetIndexLength = int32(w.writer.offset - column.OffsetIndexOffset) + } + } + + numRows := int64(0) + for rowGroupIndex := range w.rowGroups { + numRows += w.rowGroups[rowGroupIndex].NumRows + } + + // We implemented the parquet specification version 2+, which is represented + // by the version number 2 in the file metadata. + // + // For reference, see: + // https://github.com/apache/arrow/blob/70b9ef5/go/parquet/metadata/file.go#L122-L127 + const parquetFileFormatVersion = 2 + + w.fileMetaData = format.FileMetaData{ + Version: parquetFileFormatVersion, + Schema: w.schemaElements, + NumRows: numRows, + RowGroups: w.rowGroups, + KeyValueMetadata: w.metadata, + CreatedBy: w.createdBy, + ColumnOrders: w.columnOrders, + } + + length := w.writer.offset + if err := encoder.Encode(&w.fileMetaData); err != nil { + return err + } + + length = w.writer.offset - length + binary.LittleEndian.PutUint32(w.footer[:4], uint32(length)) + + _, err := w.writer.Write(w.footer[:]) + return err +} + +func (w *writer) writeRowGroup(rg *writerRowGroup, rowGroupSchema *Schema, rowGroupSortingColumns []SortingColumn) (int64, error) { + if len(rg.columns) == 0 { + return 0, nil + } + numRows := rg.columns[0].totalRowCount() + if numRows == 0 { + return 0, nil + } + + if len(w.rowGroups) == MaxRowGroups { + return 0, ErrTooManyRowGroups + } + + defer func() { + rg.reset() + }() + + for _, c := range rg.columns { + if err := c.Flush(); err != nil { + return 0, err + } + if err := c.flushFilterPages(); err != nil { + return 0, err + } + } + + if err := w.writeFileHeader(); err != nil { + return 0, err + } + fileOffset := w.writer.offset + + for i, c := range rg.columns { + columnIndex := c.columnIndex.ColumnIndex() + columnIndex.RepetitionLevelHistogram = slices.Clone(c.pageRepetitionLevelHistograms) + columnIndex.DefinitionLevelHistogram = slices.Clone(c.pageDefinitionLevelHistograms) + rg.columnIndex[i] = columnIndex + + c.columnChunk.MetaData.SizeStatistics = format.SizeStatistics{ + UnencodedByteArrayDataBytes: c.totalUnencodedByteArrayBytes, + RepetitionLevelHistogram: slices.Clone(c.repetitionLevelHistogram), + DefinitionLevelHistogram: slices.Clone(c.definitionLevelHistogram), + } + + if c.dictionary != nil { + c.columnChunk.MetaData.DictionaryPageOffset = w.writer.offset + if err := c.writeDictionaryPage(&w.writer, c.dictionary); err != nil { + return 0, fmt.Errorf("writing dictionary page of row group colum %d: %w", i, err) + } + } + + // Skip columns with nil pageBuffer (e.g., empty struct groups with no leaf columns) + if c.pageBuffer == nil { + continue + } + + dataPageOffset := w.writer.offset + c.columnChunk.MetaData.DataPageOffset = dataPageOffset + for j := range c.offsetIndex.PageLocations { + c.offsetIndex.PageLocations[j].Offset += dataPageOffset + } + + if offset, err := c.pageBuffer.Seek(0, io.SeekStart); err != nil { + return 0, err + } else if offset != 0 { + return 0, fmt.Errorf("resetting parquet page buffer to the start expected offset zero but got %d", offset) + } + if _, err := io.Copy(&w.writer, c.pageBuffer); err != nil { + return 0, fmt.Errorf("writing buffered pages of row group column %d: %w", i, err) + } + } + + for _, c := range rg.columns { + if len(c.filter) > 0 { + bloomFilterOffset := w.writer.offset + c.columnChunk.MetaData.BloomFilterOffset = bloomFilterOffset + if err := c.writeBloomFilter(&w.writer); err != nil { + return 0, err + } + bloomFilterLength := w.writer.offset - bloomFilterOffset + c.columnChunk.MetaData.BloomFilterLength = int32(bloomFilterLength) + } + } + + totalByteSize := int64(0) + totalCompressedSize := int64(0) + + for i := range rg.columnChunk { + c := &rg.columnChunk[i].MetaData + sortPageEncodingStats(c.EncodingStats) + totalByteSize += int64(c.TotalUncompressedSize) + totalCompressedSize += int64(c.TotalCompressedSize) + } + + sortingColumns := w.sortingColumns + if len(sortingColumns) == 0 && len(rowGroupSortingColumns) > 0 { + sortingColumns = make([]format.SortingColumn, 0, len(rowGroupSortingColumns)) + forEachLeafColumnOf(rowGroupSchema, func(leaf leafColumn) { + if sortingIndex := searchSortingColumn(rowGroupSortingColumns, leaf.path); sortingIndex < len(sortingColumns) { + sortingColumns[sortingIndex] = format.SortingColumn{ + ColumnIdx: int32(leaf.columnIndex), + Descending: rowGroupSortingColumns[sortingIndex].Descending(), + NullsFirst: rowGroupSortingColumns[sortingIndex].NullsFirst(), + } + } + }) + } + + columns := slices.Clone(rg.columnChunk) + columnIndex := slices.Clone(rg.columnIndex) + offsetIndex := slices.Clone(rg.offsetIndex) + + for i := range columns { + c := &columns[i] + c.MetaData.EncodingStats = slices.Clone(rg.columnChunk[i].MetaData.EncodingStats) + } + + for i := range offsetIndex { + c := &offsetIndex[i] + c.PageLocations = slices.Clone(rg.offsetIndex[i].PageLocations) + } + + w.rowGroups = append(w.rowGroups, format.RowGroup{ + Columns: columns, + TotalByteSize: totalByteSize, + NumRows: numRows, + SortingColumns: sortingColumns, + FileOffset: fileOffset, + TotalCompressedSize: totalCompressedSize, + Ordinal: int16(len(w.rowGroups)), + }) + + w.columnIndexes = append(w.columnIndexes, columnIndex) + w.offsetIndexes = append(w.offsetIndexes, offsetIndex) + return numRows, nil +} + +func (w *writer) WriteRows(rows []Row) (written int, err error) { + var n int + for len(rows) > 0 { + n, err = w.currentRowGroup.WriteRows(rows) + rows = rows[n:] + written += n + if err != ErrTooManyRowGroups { + break + } + if err = w.flush(); err != nil { + break + } + } + return +} + +// The WriteValues method is intended to work in pair with WritePage to allow +// programs to target writing values to specific columns of of the writer. +func (w *writer) WriteValues(values []Value) (numValues int, err error) { + return w.currentRowGroup.columns[values[0].Column()].writeValues(values) +} + +var ( + dataPageBuffers memory.Pool[writerBuffers] + dictionaryPageBuffers memory.Pool[writerBuffers] +) + +// One writerBuffers is used by each writer instance, the memory buffers here +// are shared by all columns of the writer because serialization is not done +// concurrently, which helps keep memory utilization low, both in the total +// footprint and GC cost. +// +// The type also exposes helper methods to facilitate the generation of parquet +// pages. A scratch space is used when serialization requires combining multiple +// buffers or compressing the page data, with double-buffering technique being +// employed by swapping the scratch and page buffers to minimize memory copies. +type writerBuffers struct { + repetitions []byte // buffer used to encode repetition levels + definitions []byte // buffer used to encode definition levels + page []byte // page buffer holding the page data + scratch []byte // scratch space used for compression +} + +func newWriterBuffers() *writerBuffers { return new(writerBuffers) } + +func (wb *writerBuffers) crc32() (checksum uint32) { + checksum = crc32.Update(checksum, crc32.IEEETable, wb.repetitions) + checksum = crc32.Update(checksum, crc32.IEEETable, wb.definitions) + checksum = crc32.Update(checksum, crc32.IEEETable, wb.page) + return checksum +} + +func (wb *writerBuffers) size() int { + return len(wb.repetitions) + len(wb.definitions) + len(wb.page) +} + +func (wb *writerBuffers) reset() { + wb.repetitions = wb.repetitions[:0] + wb.definitions = wb.definitions[:0] + wb.page = wb.page[:0] +} + +func encodeLevels(dst, src []byte, maxLevel byte) ([]byte, error) { + bitWidth := bits.Len8(maxLevel) + return levelEncodingsRLE[bitWidth-1].EncodeLevels(dst, src) +} + +func (wb *writerBuffers) encodeRepetitionLevels(page Page, maxRepetitionLevel byte) (err error) { + wb.repetitions, err = encodeLevels(wb.repetitions, page.RepetitionLevels(), maxRepetitionLevel) + return +} + +func (wb *writerBuffers) encodeDefinitionLevels(page Page, maxDefinitionLevel byte) (err error) { + wb.definitions, err = encodeLevels(wb.definitions, page.DefinitionLevels(), maxDefinitionLevel) + return +} + +func (wb *writerBuffers) prependLevelsToDataPageV1(maxRepetitionLevel, maxDefinitionLevel byte) { + hasRepetitionLevels := maxRepetitionLevel > 0 + hasDefinitionLevels := maxDefinitionLevel > 0 + + if hasRepetitionLevels || hasDefinitionLevels { + wb.scratch = wb.scratch[:0] + // In data pages v1, the repetition and definition levels are prefixed + // with the 4 bytes length of the sections. While the parquet-format + // documentation indicates that the length prefix is part of the hybrid + // RLE/Bit-Pack encoding, this is the only condition where it is used + // so we treat it as a special case rather than implementing it in the + // encoding. + // + // Reference https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 + if hasRepetitionLevels { + wb.scratch = plain.AppendInt32(wb.scratch, int32(len(wb.repetitions))) + wb.scratch = append(wb.scratch, wb.repetitions...) + wb.repetitions = wb.repetitions[:0] + } + if hasDefinitionLevels { + wb.scratch = plain.AppendInt32(wb.scratch, int32(len(wb.definitions))) + wb.scratch = append(wb.scratch, wb.definitions...) + wb.definitions = wb.definitions[:0] + } + wb.scratch = append(wb.scratch, wb.page...) + wb.swapPageAndScratchBuffers() + } +} + +func (wb *writerBuffers) encode(page Page, enc encoding.Encoding) (err error) { + pageType := page.Type() + pageData := page.Data() + wb.page, err = pageType.Encode(wb.page[:0], pageData, enc) + return err +} + +func (wb *writerBuffers) compress(codec compress.Codec) (err error) { + wb.scratch, err = codec.Encode(wb.scratch[:0], wb.page) + wb.swapPageAndScratchBuffers() + return err +} + +func (wb *writerBuffers) swapPageAndScratchBuffers() { + wb.page, wb.scratch = wb.scratch, wb.page[:0] +} + +// ColumnWriter writes values for a single column to underlying medium. +type ColumnWriter struct { + pool BufferPool + pageBuffer io.ReadWriteSeeker + numPages int + + columnPath columnPath + columnType Type + originalType Type // Original type before any encoding changes + columnIndex ColumnIndexer + columnBuffer ColumnBuffer + plainColumnBuffer ColumnBuffer // Retained plain buffer for fallback after lazy creation + originalColumnBuffer ColumnBuffer // Original buffer to restore after row group flush + columnFilter BloomFilterColumn + encoding encoding.Encoding + originalEncoding encoding.Encoding // Original encoding before any changes + compression compress.Codec + dictionary Dictionary + + maxRepetitionLevel byte + maxDefinitionLevel byte + + header struct { + buffer bytes.Buffer + protocol thrift.CompactProtocol + encoder thrift.Encoder + dict format.PageHeader + page format.PageHeader + } + + filter []byte + numRows int64 + bufferIndex int32 + bufferSize int32 + writePageStats bool + writePageBounds bool + writeDeprecatedStatistics bool + isCompressed bool + encodings []format.Encoding + + columnChunk *format.ColumnChunk + offsetIndex *format.OffsetIndex + hasSwitchedToPlain bool // Tracks if dictionary encoding was switched to PLAIN + dictionaryMaxBytes int64 // Per-column dictionary size limit + + totalUnencodedByteArrayBytes int64 + repetitionLevelHistogram []int64 + definitionLevelHistogram []int64 + pageRepetitionLevelHistograms []int64 + pageDefinitionLevelHistograms []int64 +} + +func (c *ColumnWriter) reset() { + if c.hasSwitchedToPlain { + c.columnType = c.originalType + c.encoding = c.originalEncoding + c.hasSwitchedToPlain = false + } + if c.originalColumnBuffer != nil { + c.columnBuffer = c.originalColumnBuffer + } + if c.columnBuffer != nil { + c.columnBuffer.Reset() + } + if c.columnIndex != nil { + c.columnIndex.Reset() + } + if c.dictionary != nil { + c.dictionary.Reset() + } + if c.pageBuffer != nil { + c.pool.PutBuffer(c.pageBuffer) + c.pageBuffer = nil + } + c.numPages = 0 + // Bloom filters may change in size between row groups, but we retain the + // buffer to avoid reallocating large memory blocks. + c.filter = c.filter[:0] + c.numRows = 0 + // Reset the fields of column chunks that change between row groups, + // but keep the ones that remain unchanged. + c.columnChunk.MetaData.NumValues = 0 + c.columnChunk.MetaData.TotalUncompressedSize = 0 + c.columnChunk.MetaData.TotalCompressedSize = 0 + c.columnChunk.MetaData.DataPageOffset = 0 + c.columnChunk.MetaData.DictionaryPageOffset = 0 + c.columnChunk.MetaData.Statistics = format.Statistics{} + c.columnChunk.MetaData.EncodingStats = c.columnChunk.MetaData.EncodingStats[:0] + c.columnChunk.MetaData.BloomFilterOffset = 0 + c.offsetIndex.PageLocations = c.offsetIndex.PageLocations[:0] + + c.totalUnencodedByteArrayBytes = 0 + clear(c.repetitionLevelHistogram) + clear(c.definitionLevelHistogram) + c.pageRepetitionLevelHistograms = c.pageRepetitionLevelHistograms[:0] + c.pageDefinitionLevelHistograms = c.pageDefinitionLevelHistograms[:0] +} + +func (c *ColumnWriter) totalRowCount() int64 { + n := c.numRows + if c.columnBuffer != nil { + n += int64(c.columnBuffer.Len()) + } + return n +} + +// Flush writes any buffered data to the underlying [io.Writer]. +func (c *ColumnWriter) Flush() (err error) { + if c.columnBuffer == nil { + return nil + } + if c.columnBuffer.Len() > 0 { + // Check dictionary size limit BEFORE writing the page + // to decide if we should switch to PLAIN for future pages + var fallbackToPlain bool + if c.dictionary != nil && !c.hasSwitchedToPlain && c.dictionaryMaxBytes > 0 { + if currentDictSize := c.dictionary.Size(); currentDictSize > c.dictionaryMaxBytes { + fallbackToPlain = true + } + } + + // Write the current buffered page (still with current encoding) + defer c.columnBuffer.Reset() + _, err = c.writeDataPage(c.columnBuffer.Page()) + if err != nil { + return err + } + + // After writing the page, convert to PLAIN for future pages if needed + // This avoids wasteful buffer allocation if this was the last page + if fallbackToPlain { + if err := c.fallbackDictionaryToPlain(); err != nil { + return fmt.Errorf("converting dictionary to plain: %w", err) + } + } + } + return err +} + +func (c *ColumnWriter) flushFilterPages() (err error) { + if c.columnFilter == nil { + return nil + } + + // If there is a dictionary, it contains all the values that we need to + // write to the filter. + if dict := c.dictionary; dict != nil { + // Need to always attempt to resize the filter, as the writer might + // be reused after resetting which would have reset the length of + // the filter to 0. + c.resizeBloomFilter(int64(dict.Len())) + return c.writePageToFilter(dict.Page()) + } + + // When the filter was already allocated, pages have been written to it as + // they were seen by the column writer. + if len(c.filter) > 0 { + return nil + } + + // Skip columns with nil pageBuffer (e.g., empty struct groups with no leaf columns) + if c.pageBuffer == nil { + return nil + } + + // When the filter was not allocated, the writer did not know how many + // values were going to be seen and therefore could not properly size the + // filter ahead of time. In this case, we read back all the pages that we + // have encoded and copy their values back to the filter. + // + // A prior implementation of the column writer used to create in-memory + // copies of the pages to avoid this decoding step; however, this unbounded + // allocation caused memory exhaustion in production applications. CPU being + // a somewhat more stretchable resource, we prefer spending time on this + // decoding step than having to trigger incident response when production + // systems are getting OOM-Killed. + c.resizeBloomFilter(c.columnChunk.MetaData.NumValues) + + column := &Column{ + // Set all the fields required by the decodeDataPage* methods. + typ: c.columnType, + encoding: c.encoding, + compression: c.compression, + maxRepetitionLevel: c.maxRepetitionLevel, + maxDefinitionLevel: c.maxDefinitionLevel, + index: int16(c.bufferIndex), + } + + var pageReader io.Reader = c.pageBuffer + if offset, err := c.pageBuffer.Seek(0, io.SeekStart); err != nil { + return err + } else if offset != 0 { + return fmt.Errorf("resetting parquet page buffer to the start expected offset zero but got %d", offset) + } + + if _, ok := pageReader.(*os.File); ok { + rbuf, pool := getBufioReader(pageReader, 1024) + defer func() { + putBufioReader(rbuf, pool) + }() + pageReader = rbuf + } + + pbuf := (*buffer[byte])(nil) + defer func() { + if pbuf != nil { + pbuf.unref() + } + }() + + decoder := thrift.NewDecoder(c.header.protocol.NewReader(pageReader)) + + for range c.numPages { + header := new(format.PageHeader) + if err := decoder.Decode(header); err != nil { + return err + } + + if pbuf != nil { + pbuf.unref() + } + pbuf = buffers.get(int(header.CompressedPageSize)) + if _, err := io.ReadFull(pageReader, pbuf.data.Slice()); err != nil { + return err + } + + var page Page + + switch header.Type { + case format.DataPage: + page, err = column.decodeDataPageV1(DataPageHeaderV1{header.DataPageHeader}, pbuf, nil, header.UncompressedPageSize) + case format.DataPageV2: + page, err = column.decodeDataPageV2(DataPageHeaderV2{header.DataPageHeaderV2}, pbuf, nil, header.UncompressedPageSize) + } + if page != nil { + err = c.writePageToFilter(page) + Release(page) + } + if err != nil { + return err + } + } + + return nil +} + +func (c *ColumnWriter) resizeBloomFilter(numValues int64) { + filterSize := c.columnFilter.Size(numValues) + if cap(c.filter) < filterSize { + c.filter = make([]byte, filterSize) + } else { + c.filter = c.filter[:filterSize] + for i := range c.filter { + c.filter[i] = 0 + } + } +} + +func (c *ColumnWriter) newColumnBuffer() ColumnBuffer { + columnIndex := int(c.bufferIndex) + switch { + case c.maxRepetitionLevel > 0: + column := c.columnType.NewColumnBuffer(columnIndex, 0) + return newRepeatedColumnBuffer(column, c.maxRepetitionLevel, c.maxDefinitionLevel, nullsGoLast) + case c.maxDefinitionLevel > 0: + column := c.columnType.NewColumnBuffer(columnIndex, 0) + return newOptionalColumnBuffer(column, c.maxDefinitionLevel, nullsGoLast) + default: + numValues := c.columnType.EstimateNumValues(int(c.bufferSize)) + return c.columnType.NewColumnBuffer(columnIndex, numValues) + } +} + +// WriteRowValues writes entire rows to the column. On success, this returns the +// number of rows written (not the number of values). +// +// Unlike ValueWriter, where arbitrary values may be written regardless of row +// boundaries, this method requires whole rows. This is because the written +// values may be automatically flushed to a data page, based on the writer's +// configured page buffer size, and a single row is not permitted to span two +// pages. +func (c *ColumnWriter) WriteRowValues(rows []Value) (int, error) { + var startingRows int64 + if c.columnBuffer == nil { + // Lazily create the row group column so we don't need to allocate it if + // rows are not written individually to the column. + c.columnBuffer = c.newColumnBuffer() + c.originalColumnBuffer = c.columnBuffer + } else { + startingRows = int64(c.columnBuffer.Len()) + } + if _, err := c.columnBuffer.WriteValues(rows); err != nil { + return 0, err + } + numRows := int(int64(c.columnBuffer.Len()) - startingRows) + if c.columnBuffer.Size() >= int64(c.bufferSize) { + return numRows, c.Flush() + } + return numRows, nil +} + +// Close closes the column writer and releases all dependent resources. +// New values should not be written after the ColumnWriter is closed. +func (c *ColumnWriter) Close() (err error) { + if c.columnBuffer == nil { + return nil + } + if err := c.Flush(); err != nil { + return err + } + c.columnBuffer.Reset() + c.columnBuffer = nil + return nil +} + +func (c *ColumnWriter) writeValues(values []Value) (numValues int, err error) { + if c.columnBuffer == nil { + c.columnBuffer = c.newColumnBuffer() + // Save the original dictionary-encoding buffer to restore after row group flush + if c.originalColumnBuffer == nil { + c.originalColumnBuffer = c.columnBuffer + } + } + return c.columnBuffer.WriteValues(values) +} + +func (c *ColumnWriter) writeBloomFilter(w io.Writer) error { + e := thrift.NewEncoder(c.header.protocol.NewWriter(w)) + h := bloomFilterHeader(c.columnFilter) + h.NumBytes = int32(len(c.filter)) + if err := e.Encode(&h); err != nil { + return err + } + _, err := w.Write(c.filter) + return err +} + +func (c *ColumnWriter) writeDataPage(page Page) (int64, error) { + numValues := page.NumValues() + if numValues == 0 { + return 0, nil + } + + buf := dataPageBuffers.Get(newWriterBuffers, (*writerBuffers).reset) + defer dataPageBuffers.Put(buf) + + if c.maxRepetitionLevel > 0 { + buf.encodeRepetitionLevels(page, c.maxRepetitionLevel) + } + if c.maxDefinitionLevel > 0 { + buf.encodeDefinitionLevels(page, c.maxDefinitionLevel) + } + + if err := buf.encode(page, c.encoding); err != nil { + return 0, fmt.Errorf("encoding parquet data page: %w", err) + } + + if c.header.page.Type == format.DataPage { + buf.prependLevelsToDataPageV1(c.maxRepetitionLevel, c.maxDefinitionLevel) + } + + uncompressedPageSize := buf.size() + if uncompressedPageSize > maxUncompressedPageSize { + return 0, fmt.Errorf("page size limit exceeded: %d>%d", uncompressedPageSize, maxUncompressedPageSize) + } + if c.isCompressed { + if err := buf.compress(c.compression); err != nil { + return 0, fmt.Errorf("compressing parquet data page: %w", err) + } + } + + if page.Dictionary() == nil && len(c.filter) > 0 { + // When the writer knows the number of values in advance (e.g. when + // writing a full row group), the filter encoding is set and the page + // can be directly applied to the filter, which minimizes memory usage + // since there is no need to buffer the values in order to determine + // the size of the filter. + if err := c.writePageToFilter(page); err != nil { + return 0, err + } + } + + statistics := format.Statistics{} + if c.writePageStats { + statistics = c.makePageStatistics(page) + } + + c.header.page.UncompressedPageSize = int32(uncompressedPageSize) + c.header.page.CompressedPageSize = int32(buf.size()) + c.header.page.CRC = int32(buf.crc32()) + + numRows := page.NumRows() + numNulls := page.NumNulls() + switch { + case c.header.page.DataPageHeader != nil: + *c.header.page.DataPageHeader = format.DataPageHeader{ + NumValues: int32(numValues), + Encoding: c.encoding.Encoding(), + DefinitionLevelEncoding: format.RLE, + RepetitionLevelEncoding: format.RLE, + Statistics: statistics, + } + case c.header.page.DataPageHeaderV2 != nil: + *c.header.page.DataPageHeaderV2 = format.DataPageHeaderV2{ + NumValues: int32(numValues), + NumNulls: int32(numNulls), + NumRows: int32(numRows), + Encoding: c.encoding.Encoding(), + DefinitionLevelsByteLength: int32(len(buf.definitions)), + RepetitionLevelsByteLength: int32(len(buf.repetitions)), + IsCompressed: &c.isCompressed, + Statistics: statistics, + } + } + + c.header.buffer.Reset() + c.header.buffer.Grow(1024) + + if err := c.header.encoder.Encode(&c.header.page); err != nil { + return 0, err + } + + size := int64(c.header.buffer.Len()) + + int64(len(buf.repetitions)) + + int64(len(buf.definitions)) + + int64(len(buf.page)) + + err := c.writePageTo(size, func(output io.Writer) (written int64, err error) { + for _, data := range [...][]byte{ + c.header.buffer.Bytes(), + buf.repetitions, + buf.definitions, + buf.page, + } { + wn, err := output.Write(data) + written += int64(wn) + if err != nil { + return written, err + } + } + return written, nil + }) + if err != nil { + return 0, err + } + + c.recordPageStats(int32(c.header.buffer.Len()), &c.header.page, page) + return numValues, nil +} + +func (c *ColumnWriter) writeDictionaryPage(output io.Writer, dict Dictionary) (err error) { + buf := dictionaryPageBuffers.Get(newWriterBuffers, (*writerBuffers).reset) + defer dictionaryPageBuffers.Put(buf) + + if err := buf.encode(dict.Page(), &Plain); err != nil { + return fmt.Errorf("writing parquet dictionary page: %w", err) + } + + uncompressedPageSize := buf.size() + if uncompressedPageSize > maxUncompressedPageSize { + return fmt.Errorf("page size limit exceeded: %d>%d", uncompressedPageSize, maxUncompressedPageSize) + } + if isCompressed(c.compression) { + if err := buf.compress(c.compression); err != nil { + return fmt.Errorf("copmressing parquet dictionary page: %w", err) + } + } + + c.header.dict.UncompressedPageSize = int32(uncompressedPageSize) + c.header.dict.CompressedPageSize = int32(buf.size()) + c.header.dict.CRC = int32(buf.crc32()) + + *c.header.dict.DictionaryPageHeader = format.DictionaryPageHeader{ + NumValues: int32(dict.Len()), + Encoding: format.Plain, + IsSorted: false, + } + + c.header.buffer.Reset() + c.header.buffer.Grow(1024) + + if err := c.header.encoder.Encode(&c.header.dict); err != nil { + return err + } + if _, err := output.Write(c.header.buffer.Bytes()); err != nil { + return err + } + if _, err := output.Write(buf.page); err != nil { + return err + } + c.recordPageStats(int32(c.header.buffer.Len()), &c.header.dict, nil) + return nil +} + +func (c *ColumnWriter) writePageToFilter(page Page) (err error) { + pageType := page.Type() + pageData := page.Data() + c.filter, err = pageType.Encode(c.filter, pageData, c.columnFilter.Encoding()) + return err +} + +func (c *ColumnWriter) writePageTo(size int64, writeTo func(io.Writer) (int64, error)) (err error) { + if c.pageBuffer == nil { + c.pageBuffer = c.pool.GetBuffer() + defer func() { + if err != nil { + c.pool.PutBuffer(c.pageBuffer) + c.pageBuffer = nil + } + }() + if _, err = c.pageBuffer.Seek(0, io.SeekStart); err != nil { + return err + } + } + written, err := writeTo(c.pageBuffer) + if err != nil { + return err + } + if written != size { + return fmt.Errorf("writing parquet column page expected %dB but got %dB: %w", size, written, io.ErrShortWrite) + } + c.numPages++ + return nil +} + +// fallbackDictionaryToPlain switches future pages from dictionary to PLAIN encoding. +// This is called when a column's dictionary size limit is exceeded. +func (c *ColumnWriter) fallbackDictionaryToPlain() error { + // Switch to PLAIN encoding for future writes + // Get the underlying type without the indexed wrapper + if indexedType, ok := c.columnType.(*indexedType); ok { + c.columnType = indexedType.Type + } + if c.plainColumnBuffer == nil { + c.plainColumnBuffer = c.columnType.NewColumnBuffer(int(c.bufferIndex), int(c.bufferSize)) + } + c.columnBuffer = c.plainColumnBuffer + c.encoding = &plain.Encoding{} + c.encodings = addEncoding(c.encodings, format.Plain) + // DON'T clear the dictionary reference! + // We need to keep it so: + // 1. The dictionary page can be written (required for existing dict-encoded pages) + // 2. Existing pages that were written with dictionary indexes can be read + // + // The hasSwitchedToPlain flag prevents new values from being added to the dictionary + // + // Note: We are NOT re-encoding existing pages. This means: + // - Pages written before the limit was hit will remain dictionary-encoded + // - Pages written after will be PLAIN-encoded + // - The dictionary page will still be written at row group flush + // - Mixed encodings in the same column chunk are valid per Parquet spec + c.hasSwitchedToPlain = true + return nil +} + +func (c *ColumnWriter) makePageStatistics(page Page) format.Statistics { + numNulls := page.NumNulls() + minValue, maxValue, _ := page.Bounds() + minValueBytes := minValue.Bytes() + maxValueBytes := maxValue.Bytes() + return format.Statistics{ + Min: minValueBytes, // deprecated + Max: maxValueBytes, // deprecated + NullCount: numNulls, + MinValue: minValueBytes, + MaxValue: maxValueBytes, + } +} + +func (c *ColumnWriter) recordPageStats(headerSize int32, header *format.PageHeader, page Page) { + uncompressedSize := headerSize + header.UncompressedPageSize + compressedSize := headerSize + header.CompressedPageSize + + if page != nil { + numNulls := page.NumNulls() + numValues := page.NumValues() + + var minValue, maxValue Value + var pageHasBounds bool + if c.writePageBounds { + minValue, maxValue, pageHasBounds = page.Bounds() + } + + c.columnIndex.IndexPage(numValues, numNulls, minValue, maxValue) + c.columnChunk.MetaData.NumValues += numValues + c.columnChunk.MetaData.Statistics.NullCount += numNulls + + if pageHasBounds { + var existingMaxValue, existingMinValue Value + + if c.columnChunk.MetaData.Statistics.MaxValue != nil && c.columnChunk.MetaData.Statistics.MinValue != nil { + existingMaxValue = c.columnType.Kind().Value(c.columnChunk.MetaData.Statistics.MaxValue) + existingMinValue = c.columnType.Kind().Value(c.columnChunk.MetaData.Statistics.MinValue) + } + + if existingMaxValue.isNull() || c.columnType.Compare(maxValue, existingMaxValue) > 0 { + buf := c.columnChunk.MetaData.Statistics.MaxValue[:0] + // if maxValue is empty string, c.columnChunk.MetaData.Statistics.MaxValue should be []bytes{}, but nil + if buf == nil && maxValue.Kind() == ByteArray && len(maxValue.ByteArray()) == 0 { + buf = make([]byte, 0) + } + c.columnChunk.MetaData.Statistics.MaxValue = maxValue.AppendBytes(buf) + if c.writeDeprecatedStatistics { + c.columnChunk.MetaData.Statistics.Max = c.columnChunk.MetaData.Statistics.MaxValue + } + } + + if existingMinValue.isNull() || c.columnType.Compare(minValue, existingMinValue) < 0 { + buf := c.columnChunk.MetaData.Statistics.MinValue[:0] + // same as above + if buf == nil && minValue.Kind() == ByteArray && len(minValue.ByteArray()) == 0 { + buf = make([]byte, 0) + } + c.columnChunk.MetaData.Statistics.MinValue = minValue.AppendBytes(buf) + if c.writeDeprecatedStatistics { + c.columnChunk.MetaData.Statistics.Min = c.columnChunk.MetaData.Statistics.MinValue + } + } + } + + c.offsetIndex.PageLocations = append(c.offsetIndex.PageLocations, format.PageLocation{ + Offset: c.columnChunk.MetaData.TotalCompressedSize, + CompressedPageSize: compressedSize, + FirstRowIndex: c.numRows, + }) + + c.numRows += page.NumRows() + c.totalUnencodedByteArrayBytes += computeUnencodedByteArraySize(page) + + repetitionLevels := page.RepetitionLevels() + definitionLevels := page.DefinitionLevels() + + if c.maxRepetitionLevel > 0 { + accumulateLevelHistogram(c.repetitionLevelHistogram, repetitionLevels) + c.pageRepetitionLevelHistograms = appendPageLevelHistogram( + c.pageRepetitionLevelHistograms, repetitionLevels, c.maxRepetitionLevel, + ) + } + + if c.maxDefinitionLevel > 0 { + accumulateLevelHistogram(c.definitionLevelHistogram, definitionLevels) + c.pageDefinitionLevelHistograms = appendPageLevelHistogram( + c.pageDefinitionLevelHistograms, definitionLevels, c.maxDefinitionLevel, + ) + } + } + + pageType := header.Type + encoding := format.Encoding(-1) + switch pageType { + case format.DataPageV2: + encoding = header.DataPageHeaderV2.Encoding + case format.DataPage: + encoding = header.DataPageHeader.Encoding + case format.DictionaryPage: + encoding = header.DictionaryPageHeader.Encoding + } + + c.columnChunk.MetaData.TotalUncompressedSize += int64(uncompressedSize) + c.columnChunk.MetaData.TotalCompressedSize += int64(compressedSize) + c.columnChunk.MetaData.EncodingStats = addPageEncodingStats(c.columnChunk.MetaData.EncodingStats, format.PageEncodingStats{ + PageType: pageType, + Encoding: encoding, + Count: 1, + }) +} + +func addEncoding(encodings []format.Encoding, add format.Encoding) []format.Encoding { + if slices.Contains(encodings, add) { + return encodings + } + return append(encodings, add) +} + +func addPageEncodingStats(stats []format.PageEncodingStats, pages ...format.PageEncodingStats) []format.PageEncodingStats { +addPages: + for _, add := range pages { + for i, st := range stats { + if st.PageType == add.PageType && st.Encoding == add.Encoding { + stats[i].Count += add.Count + continue addPages + } + } + stats = append(stats, add) + } + return stats +} + +func sortPageEncodings(encodings []format.Encoding) { + slices.Sort(encodings) +} + +func sortPageEncodingStats(stats []format.PageEncodingStats) { + slices.SortFunc(stats, func(s1, s2 format.PageEncodingStats) int { + if k := cmp.Compare(s1.PageType, s2.PageType); k != 0 { + return k + } + return cmp.Compare(s1.Encoding, s2.Encoding) + }) +} + +type offsetTrackingWriter struct { + writer io.Writer + offset int64 +} + +func (w *offsetTrackingWriter) Reset(writer io.Writer) { + w.writer = writer + w.offset = 0 +} + +func (w *offsetTrackingWriter) Write(b []byte) (int, error) { + n, err := w.writer.Write(b) + w.offset += int64(n) + return n, err +} + +func (w *offsetTrackingWriter) WriteString(s string) (int, error) { + n, err := io.WriteString(w.writer, s) + w.offset += int64(n) + return n, err +} + +func (w *offsetTrackingWriter) ReadFrom(r io.Reader) (int64, error) { + // io.Copy will make use of io.ReaderFrom if w.writer implements it. + n, err := io.Copy(w.writer, r) + w.offset += n + return n, err +} + +var ( + _ RowWriterWithSchema = (*Writer)(nil) + _ RowReaderFrom = (*Writer)(nil) + _ RowGroupWriter = (*Writer)(nil) + + _ RowWriter = (*writer)(nil) + _ ValueWriter = (*writer)(nil) + + _ io.ReaderFrom = (*offsetTrackingWriter)(nil) + _ io.StringWriter = (*offsetTrackingWriter)(nil) +) diff --git a/vendor/github.com/parquet-go/parquet-go/writer_statistics.go b/vendor/github.com/parquet-go/parquet-go/writer_statistics.go new file mode 100644 index 00000000000..dd5aa07e456 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/writer_statistics.go @@ -0,0 +1,42 @@ +package parquet + +import ( + "slices" + + "github.com/parquet-go/parquet-go/encoding" +) + +// computeUnencodedByteArraySize calculates the unencoded size of byte array data +// in a page, excluding 4-byte length prefixes. +func computeUnencodedByteArraySize(page Page) int64 { + values := page.Data() + if values.Kind() != encoding.ByteArray { + return 0 + } + return values.Size() +} + +// accumulateLevelHistogram adds level counts from levels to histogram. +func accumulateLevelHistogram(histogram []int64, levels []byte) { + for _, level := range levels { + histogram[level]++ + } +} + +// appendPageLevelHistogram creates a per-page histogram and appends it to histograms. +// Returns the updated histogram slice with (maxLevel + 1) new elements appended. +func appendPageLevelHistogram(histograms []int64, levels []byte, maxLevel byte) []int64 { + histSize := int(maxLevel) + 1 + startIndex := len(histograms) + histograms = slices.Grow(histograms, histSize)[:startIndex+histSize] + + for i := range histSize { + histograms[startIndex+i] = 0 + } + + for _, level := range levels { + histograms[startIndex+int(level)]++ + } + + return histograms +} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema.go index aac0204c243..86f54039e9d 100644 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema.go +++ b/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema.go @@ -158,8 +158,6 @@ type Schema struct { RootNode *yaml.Node index *index.SpecIndex context context.Context - hashed uint64 // quick hash of the schema, used for quick equality checking - hashLock sync.Mutex // lock to prevent concurrent hashing of the same schema *low.Reference low.NodeMap } diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema_proxy.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema_proxy.go index ae037e67bab..3a25d546225 100644 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema_proxy.go +++ b/vendor/github.com/pb33f/libopenapi/datamodel/low/base/schema_proxy.go @@ -10,6 +10,7 @@ import ( "hash/maphash" "log/slog" "sync" + "sync/atomic" "github.com/pb33f/libopenapi/datamodel" "github.com/pb33f/libopenapi/datamodel/low" @@ -50,21 +51,32 @@ import ( // Schemas are where things can get messy, mainly because the Schema standard changes between versions, and // it's not actually JSONSchema until 3.1, so lots of times a bad schema will break parsing. Errors are only found // when a schema is needed, so the rest of the document is parsed and ready to use. +// +// [ There is a good amount of async code in here, many different ways to slam into the same schema being built/read/ ] +// [ hashed or cached at the same time. So just a warning, if you're thinking of working on this - async safety ] +// [ should be your main concern, cheers - quobix. ] type SchemaProxy struct { low.Reference kn *yaml.Node vn *yaml.Node idx *index.SpecIndex - rendered *Schema - buildError error + schemaOnce sync.Once // guards lazy Schema() build + rendered atomic.Pointer[Schema] // atomic for safe reads from any goroutine + buildError error // protected by schemaOnce (write-once) ctx context.Context - cachedHash *uint64 // Cache computed hash to avoid recalculation + hashMu sync.Mutex // protects cachedHash + hashGen + cachedHash *uint64 // protected by hashMu + hashGen uint64 // generation counter for invalidation TransformedRef *yaml.Node // Original node that contained the ref before transformation *low.NodeMap } // Build will prepare the SchemaProxy for rendering, it does not build the Schema, only sets up internal state. // Key maybe nil if absent. +// +// Lifecycle: Build() must be called exactly once per SchemaProxy, before Schema() is called. +// Calling Build() after Schema() has already been invoked will update internal state (kn, vn, idx, ctx) +// but will NOT re-trigger schema building due to sync.Once semantics. func (sp *SchemaProxy) Build(ctx context.Context, key, value *yaml.Node, idx *index.SpecIndex) error { sp.kn = key sp.idx = idx @@ -141,48 +153,55 @@ func applySchemaIdScope(ctx context.Context, node *yaml.Node, idx *index.SpecInd // If anything goes wrong during the build, then nothing is returned and the error that occurred can // be retrieved by using GetBuildError() func (sp *SchemaProxy) Schema() *Schema { - if sp.rendered != nil { - return sp.rendered - } + sp.schemaOnce.Do(func() { + cfg := sp.getSpecConfig() - // If this proxy represents an unresolved external ref, return nil without error. - if sp.IsReference() && sp.idx != nil && sp.idx.GetConfig() != nil && - sp.idx.GetConfig().SkipExternalRefResolution && utils.IsExternalRef(sp.GetReference()) { - return nil - } + // if this proxy represents an unresolved external ref, return nil without error + if sp.IsReference() && cfg != nil && + cfg.SkipExternalRefResolution && utils.IsExternalRef(sp.GetReference()) { + return + } - // handle property merging for references with sibling properties - buildNode := sp.vn - if sp.idx != nil && sp.idx.GetConfig() != nil { - if docConfig := sp.getDocumentConfig(); docConfig != nil && docConfig.MergeReferencedProperties { - if mergedNode := sp.attemptPropertyMerging(buildNode, docConfig); mergedNode != nil { - buildNode = mergedNode + // handle property merging for references with sibling properties + buildNode := sp.vn + if cfg != nil { + if docConfig := sp.getDocumentConfig(); docConfig != nil && docConfig.MergeReferencedProperties { + if mergedNode := sp.attemptPropertyMerging(buildNode, docConfig); mergedNode != nil { + buildNode = mergedNode + } } } - } - schema := new(Schema) - utils.CheckForMergeNodes(buildNode) - err := schema.Build(sp.ctx, buildNode, sp.idx) - if err != nil { - sp.buildError = err - return nil - } - schema.ParentProxy = sp // https://github.com/pb33f/libopenapi/issues/29 - sp.rendered = schema - - // for all the nodes added, copy them over to the schema - if sp.NodeMap != nil { - sp.NodeMap.Nodes.Range(func(key, value any) bool { - schema.AddNode(key.(int), value.(*yaml.Node)) - return true - }) - } - return schema + schema := new(Schema) + utils.CheckForMergeNodes(buildNode) + err := schema.Build(sp.ctx, buildNode, sp.idx) + if err != nil { + sp.buildError = err + return + } + schema.ParentProxy = sp // https://github.com/pb33f/libopenapi/issues/29 + + // Store rendered FIRST — must happen before NodeMap copy. + // If AddNode() runs during the Range window, it sees rendered != nil + // and writes directly to the schema instead of NodeMap (where it would be missed). + sp.rendered.Store(schema) + + // Copy accumulated nodes to the built schema + if sp.NodeMap != nil { + sp.NodeMap.Nodes.Range(func(key, value any) bool { + schema.AddNode(key.(int), value.(*yaml.Node)) + return true + }) + } + }) + return sp.rendered.Load() } // GetBuildError returns the build error that was set when Schema() was called. If Schema() has not been run, or // there were no errors during build, then nil will be returned. +// +// Thread safety: GetBuildError() is safe to call concurrently only after Schema() has been called at least once +// on this proxy (from any goroutine). All standard code paths (Hash(), high-level Schema()) call Schema() first. func (sp *SchemaProxy) GetBuildError() error { return sp.buildError } @@ -218,85 +237,102 @@ func (sp *SchemaProxy) GetValueNode() *yaml.Node { // Hash will return a consistent Hash of the SchemaProxy object (it will resolve it) func (sp *SchemaProxy) Hash() uint64 { + sp.hashMu.Lock() if sp.cachedHash != nil { - return *sp.cachedHash + h := *sp.cachedHash + sp.hashMu.Unlock() + return h + } + gen := sp.hashGen + sp.hashMu.Unlock() + + hash := sp.computeHash() + + // store only if not invalidated during computation + sp.hashMu.Lock() + if sp.hashGen == gen { + sp.cachedHash = &hash } + sp.hashMu.Unlock() + return hash +} - var hash uint64 +// computeHash contains the actual hash computation logic, called outside the hash lock. +func (sp *SchemaProxy) computeHash() uint64 { + // for unresolved references, hash the ref string without resolving the target schema + sch := sp.rendered.Load() - if sp.rendered != nil { + if sch != nil { if !sp.IsReference() { - hash = sp.rendered.Hash() - } else { - // For references, hash the reference value - hash = low.WithHasher(func(h *maphash.Hash) uint64 { - h.WriteString(sp.GetReference()) - return h.Sum64() - }) + return sch.Hash() } - } else { - if !sp.IsReference() { - // Only resolve this proxy if it's not a ref. - sch := sp.Schema() - sp.rendered = sch - hashError := fmt.Errorf("circular reference detected: %s", sp.GetReference()) - if sch != nil { - if sp.idx != nil && sp.idx.GetConfig() != nil && sp.idx.GetConfig().UseSchemaQuickHash { - if !CheckSchemaProxyForCircularRefs(sp) { - hash = sch.Hash() - } - } else { - hash = sch.Hash() - } - } else { - var logger *slog.Logger - if sp.idx != nil && sp.idx.GetLogger() != nil { - logger = sp.idx.GetLogger() - } - if logger != nil { - bErr := errors.Join(sp.GetBuildError(), hashError) - if bErr != nil { - logger.Warn("SchemaProxy.Hash() unable to complete hash: ", "error", bErr.Error()) - } - } - hash = 0 + return sp.hashReference() + } + + if !sp.IsReference() { + sch = sp.Schema() + if sch != nil { + useQuickHash := sp.getSpecConfig() != nil && sp.getSpecConfig().UseSchemaQuickHash + if !useQuickHash || !CheckSchemaProxyForCircularRefs(sp) { + return sch.Hash() } } else { - // Handle UseSchemaQuickHash case for references - if sp.idx != nil && sp.idx.GetConfig() != nil && sp.idx.GetConfig().UseSchemaQuickHash { - if sp.idx != nil && !CheckSchemaProxyForCircularRefs(sp) { - if sp.rendered == nil { - sp.rendered = sp.Schema() - } - hash = sp.rendered.QuickHash() // quick hash uses a cache to keep things fast. - } else { - hash = low.WithHasher(func(h *maphash.Hash) uint64 { - h.WriteString(sp.GetReference()) - return h.Sum64() - }) + // build failed — log warning + var logger *slog.Logger + if sp.idx != nil && sp.idx.GetLogger() != nil { + logger = sp.idx.GetLogger() + } + if logger != nil { + hashError := fmt.Errorf("circular reference detected: %s", sp.GetReference()) + bErr := errors.Join(sp.GetBuildError(), hashError) + if bErr != nil { + logger.Warn("SchemaProxy.Hash() unable to complete hash: ", "error", bErr.Error()) } - } else { - // Hash reference value only, do not resolve! - hash = low.WithHasher(func(h *maphash.Hash) uint64 { - h.WriteString(sp.GetReference()) - return h.Sum64() - }) } } + return 0 } - // Cache the computed hash for future calls - sp.cachedHash = &hash - return hash + // unresolved reference + cfg := sp.getSpecConfig() + if cfg != nil && cfg.UseSchemaQuickHash { + if !CheckSchemaProxyForCircularRefs(sp) { + sch = sp.Schema() + if sch != nil { + return sch.QuickHash() + } + } else { + return sp.hashReference() + } + } + return sp.hashReference() +} + +// hashReference hashes the $ref string value without resolving the target. +func (sp *SchemaProxy) hashReference() uint64 { + return low.WithHasher(func(h *maphash.Hash) uint64 { + h.WriteString(sp.GetReference()) + return h.Sum64() + }) +} + +// getSpecConfig returns the SpecIndexConfig if available, or nil. +func (sp *SchemaProxy) getSpecConfig() *index.SpecIndexConfig { + if sp.idx != nil && sp.idx.GetConfig() != nil { + return sp.idx.GetConfig() + } + return nil } // AddNode stores nodes in the underlying schema if rendered, otherwise holds in the proxy until build. func (sp *SchemaProxy) AddNode(key int, node *yaml.Node) { - // Clear cached hash since content is being modified + sp.hashMu.Lock() sp.cachedHash = nil + sp.hashGen++ + sp.hashMu.Unlock() - if sp.rendered != nil { - sp.rendered.AddNode(key, node) + if sch := sp.rendered.Load(); sch != nil { + sch.AddNode(key, node) } else { sp.Nodes.Store(key, node) } diff --git a/vendor/github.com/pb33f/libopenapi/index/extract_refs.go b/vendor/github.com/pb33f/libopenapi/index/extract_refs.go index 1f83c238f23..e2b7efeae89 100644 --- a/vendor/github.com/pb33f/libopenapi/index/extract_refs.go +++ b/vendor/github.com/pb33f/libopenapi/index/extract_refs.go @@ -813,6 +813,17 @@ func (index *SpecIndex) ExtractRefs(ctx context.Context, node, parent *yaml.Node // This function uses singleflight to deduplicate concurrent lookups for the same reference, // channel-based collection to avoid mutex contention during resolution, and sorts results // by input position for deterministic ordering. + +// isExternalReference checks whether a Reference originated from an external $ref. +// ref.Definition may have been transformed (e.g., HTTP URL with fragment becomes "#/fragment"), +// so we also check the original raw ref value. +func isExternalReference(ref *Reference) bool { + if ref == nil { + return false + } + return utils.IsExternalRef(ref.Definition) || utils.IsExternalRef(ref.RawRef) +} + func (index *SpecIndex) ExtractComponentsFromRefs(ctx context.Context, refs []*Reference) []*Reference { if len(refs) == 0 { return nil @@ -841,7 +852,7 @@ func (index *SpecIndex) ExtractComponentsFromRefs(ctx context.Context, refs []*R index.refLock.Unlock() } else { // If SkipExternalRefResolution is enabled, don't record errors for external refs - if index.config != nil && index.config.SkipExternalRefResolution && utils.IsExternalRef(ref.Definition) { + if index.config != nil && index.config.SkipExternalRefResolution && isExternalReference(ref) { continue } // Record error for definitive failure @@ -964,7 +975,7 @@ func (index *SpecIndex) ExtractComponentsFromRefs(ctx context.Context, refs []*R index.refLock.Unlock() } else { // If SkipExternalRefResolution is enabled, don't record errors for external refs - if index.config != nil && index.config.SkipExternalRefResolution && utils.IsExternalRef(ref.Definition) { + if index.config != nil && index.config.SkipExternalRefResolution && isExternalReference(ref) { continue } // Definitive failure - record error diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 0ebebc26d52..ba823fecfb1 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -1092,9 +1092,6 @@ type TSDBRetentionConfig struct { // Maximum number of bytes that can be stored for blocks. Size units.Base2Bytes `yaml:"size,omitempty"` - - // Maximum percentage of disk used for TSDB storage. - Percentage uint `yaml:"percentage,omitempty"` } // TSDBConfig configures runtime reloadable configuration options. @@ -1675,6 +1672,10 @@ type OTLPConfig struct { // in label names when AllowUTF8 is false. When false, multiple consecutive underscores are // collapsed to a single underscore during label name sanitization. LabelNamePreserveMultipleUnderscores bool `yaml:"label_name_preserve_multiple_underscores,omitempty"` + // DisableTargetInfo disables generation of the target_info metric. This is + // useful when native resource attribute persistence is enabled and the + // info() PromQL function is used instead of target_info join queries. + DisableTargetInfo bool `yaml:"disable_target_info,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 620d185177f..d457d8ab258 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -2110,337 +2110,3 @@ func (h *FloatHistogram) HasOverflow() bool { } return false } - -// TrimBuckets trims native histogram buckets. -func (h *FloatHistogram) TrimBuckets(rhs float64, isUpperTrim bool) *FloatHistogram { - var ( - trimmedHist = h.Copy() - - updatedCount, updatedSum float64 - trimmedBuckets bool - isCustomBucket = trimmedHist.UsesCustomBuckets() - hasPositive, hasNegative bool - ) - - if isUpperTrim { - // Calculate the fraction to keep for buckets that contain the trim value. - // For TRIM_UPPER, we keep observations below the trim point (rhs). - // Example: histogram / float. - for i, iter := 0, trimmedHist.PositiveBucketIterator(); iter.Next(); i++ { - bucket := iter.At() - if bucket.Count == 0 { - continue - } - hasPositive = true - - switch { - case bucket.Lower >= rhs: - // Bucket is entirely below the trim point - keep all. - updatedCount += bucket.Count - bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, true, isCustomBucket) - updatedSum += bucketMidpoint * bucket.Count - - case bucket.Upper > rhs: - // Bucket contains the trim point - interpolate. - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, true, isCustomBucket) - - updatedCount += keepCount - updatedSum += bucketMidpoint * keepCount - if trimmedHist.PositiveBuckets[i] != keepCount { - trimmedHist.PositiveBuckets[i] = keepCount - trimmedBuckets = true - } - - default: - trimmedHist.PositiveBuckets[i] = 0 - trimmedBuckets = true - } - } - - for i, iter := 0, trimmedHist.NegativeBucketIterator(); iter.Next(); i++ { - bucket := iter.At() - if bucket.Count == 0 { - continue - } - hasNegative = true - - switch { - case bucket.Lower >= rhs: - // Bucket is entirely below the trim point - keep all. - updatedCount += bucket.Count - bucketMidpoint := computeMidpoint(bucket.Lower, bucket.Upper, false, isCustomBucket) - updatedSum += bucketMidpoint * bucket.Count - - case bucket.Upper > rhs: - // Bucket contains the trim point - interpolate. - keepCount, bucketMidpoint := computeBucketTrim(bucket, rhs, isUpperTrim, false, isCustomBucket) - - updatedCount += keepCount - updatedSum += bucketMidpoint * keepCount - if trimmedHist.NegativeBuckets[i] != keepCount { - trimmedHist.NegativeBuckets[i] = keepCount - trimmedBuckets = true - } - - default: - trimmedHist.NegativeBuckets[i] = 0 - trimmedBuckets = true - } - } - } - - // Handle the zero count bucket. - if trimmedHist.ZeroCount > 0 { - keepCount, bucketMidpoint := computeZeroBucketTrim(trimmedHist.ZeroBucket(), rhs, hasNegative, hasPositive, isUpperTrim) - - if trimmedHist.ZeroCount != keepCount { - trimmedHist.ZeroCount = keepCount - trimmedBuckets = true - } - updatedSum += bucketMidpoint * keepCount - updatedCount += keepCount - } - - if trimmedBuckets { - // Only update the totals in case some bucket(s) were fully (or partially) trimmed. - trimmedHist.Count = updatedCount - trimmedHist.Sum = updatedSum - - trimmedHist.Compact(0) - } - - return trimmedHist -} - -func handleInfinityBuckets(isUpperTrim bool, b Bucket[float64], rhs float64) (underCount, bucketMidpoint float64) { - zeroIfInf := func(x float64) float64 { - if math.IsInf(x, 0) { - return 0 - } - return x - } - - // Case 1: Bucket with lower bound -Inf. - if math.IsInf(b.Lower, -1) { - // TRIM_UPPER (= b.Upper { - // As the rhs is greater than the upper bound, we keep the entire current bucket. - return b.Count, 0 - } - if rhs > 0 && b.Upper > 0 && !math.IsInf(b.Upper, 1) { - // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). - // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * rhs / b.Upper, rhs / 2 - } - if b.Upper <= 0 { - return b.Count, rhs - } - // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. - return 0, zeroIfInf(b.Upper) - } - // TRIM_LOWER (>/) - remove values less than rhs - if rhs <= b.Lower { - // Impossible to happen because the lower bound is -Inf. Returning the entire current bucket. - return b.Count, 0 - } - if rhs >= 0 && b.Upper > rhs && !math.IsInf(b.Upper, 1) { - // If upper is finite and positive, we treat lower as 0 (despite it de facto being -Inf). - // This is only possible with NHCB, so we can always use linear interpolation. - return b.Count * (1 - rhs/b.Upper), (rhs + b.Upper) / 2 - } - // Otherwise, we are targeting a valid trim, but as we don't know the exact distribution of values that belongs to an infinite bucket, we need to remove the entire bucket. - return 0, zeroIfInf(b.Upper) - } - - // Case 2: Bucket with upper bound +Inf. - if math.IsInf(b.Upper, 1) { - if isUpperTrim { - // TRIM_UPPER (= lower and the bucket extends to +Inf, some values in this bucket could be > rhs, so we conservatively remove the entire bucket; - // when rhs < lower, all values in this bucket are >= lower > rhs, so all values should be removed. - return 0, zeroIfInf(b.Lower) - } - // TRIM_LOWER (>/) - remove values less than rhs. - if rhs >= b.Lower { - return b.Count, rhs - } - // lower < rhs: we are inside the infinity bucket, but as we don't know the exact distribution of values, we conservatively remove the entire bucket. - return 0, zeroIfInf(b.Lower) - } - - panic(fmt.Errorf("one of the bounds must be infinite for handleInfinityBuckets, got %v", b)) -} - -// computeSplit calculates the portion of the bucket's count <= rhs (trim point). -func computeSplit(b Bucket[float64], rhs float64, isPositive, isLinear bool) float64 { - if rhs <= b.Lower { - return 0 - } - if rhs >= b.Upper { - return b.Count - } - - var fraction float64 - switch { - case isLinear: - fraction = (rhs - b.Lower) / (b.Upper - b.Lower) - default: - // Exponential interpolation. - logLower := math.Log2(math.Abs(b.Lower)) - logUpper := math.Log2(math.Abs(b.Upper)) - logV := math.Log2(math.Abs(rhs)) - - if isPositive { - fraction = (logV - logLower) / (logUpper - logLower) - } else { - fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) - } - } - - return b.Count * fraction -} - -func computeZeroBucketTrim(zeroBucket Bucket[float64], rhs float64, hasNegative, hasPositive, isUpperTrim bool) (float64, float64) { - var ( - lower = zeroBucket.Lower - upper = zeroBucket.Upper - ) - if hasNegative && !hasPositive { - upper = 0 - } - if hasPositive && !hasNegative { - lower = 0 - } - - var fraction, midpoint float64 - - if isUpperTrim { - if rhs <= lower { - return 0, 0 - } - if rhs >= upper { - return zeroBucket.Count, (lower + upper) / 2 - } - - fraction = (rhs - lower) / (upper - lower) - midpoint = (lower + rhs) / 2 - } else { // lower trim - if rhs <= lower { - return zeroBucket.Count, (lower + upper) / 2 - } - if rhs >= upper { - return 0, 0 - } - - fraction = (upper - rhs) / (upper - lower) - midpoint = (rhs + upper) / 2 - } - - return zeroBucket.Count * fraction, midpoint -} - -func computeBucketTrim(b Bucket[float64], rhs float64, isUpperTrim, isPositive, isCustomBucket bool) (float64, float64) { - if math.IsInf(b.Lower, -1) || math.IsInf(b.Upper, 1) { - return handleInfinityBuckets(isUpperTrim, b, rhs) - } - - underCount := computeSplit(b, rhs, isPositive, isCustomBucket) - - if isUpperTrim { - return underCount, computeMidpoint(b.Lower, rhs, isPositive, isCustomBucket) - } - - return b.Count - underCount, computeMidpoint(rhs, b.Upper, isPositive, isCustomBucket) -} - -func computeMidpoint(survivingIntervalLowerBound, survivingIntervalUpperBound float64, isPositive, isLinear bool) float64 { - if math.IsInf(survivingIntervalLowerBound, 0) { - if math.IsInf(survivingIntervalUpperBound, 0) { - return 0 - } - if survivingIntervalUpperBound > 0 { - return survivingIntervalUpperBound / 2 - } - return survivingIntervalUpperBound - } else if math.IsInf(survivingIntervalUpperBound, 0) { - return survivingIntervalLowerBound - } - - if isLinear { - return (survivingIntervalLowerBound + survivingIntervalUpperBound) / 2 - } - - geoMean := math.Sqrt(math.Abs(survivingIntervalLowerBound * survivingIntervalUpperBound)) - - if isPositive { - return geoMean - } - return -geoMean -} diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 9b46210cf05..9b44b9e26b0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -91,6 +91,7 @@ type engineMetrics struct { queryResultSort prometheus.Observer queryResultSortHistogram prometheus.Observer querySamples prometheus.Counter + infoFunctionCalls *prometheus.CounterVec } type ( @@ -295,6 +296,18 @@ type QueryTracker interface { Delete(insertIndex int) } +// InfoResourceStrategy controls how info() resolves resource attributes. +type InfoResourceStrategy string + +const ( + // InfoResourceStrategyTargetInfo uses only target_info metric-join (default). + InfoResourceStrategyTargetInfo InfoResourceStrategy = "target-info" + // InfoResourceStrategyResourceAttributes uses only stored resource attributes. + InfoResourceStrategyResourceAttributes InfoResourceStrategy = "resource-attributes" + // InfoResourceStrategyHybrid combines resource attributes with target_info fallback. + InfoResourceStrategyHybrid InfoResourceStrategy = "hybrid" +) + // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { Logger *slog.Logger @@ -336,10 +349,36 @@ type EngineOpts struct { // FeatureRegistry is the registry for tracking enabled/disabled features. FeatureRegistry features.Collector + // EnableNativeMetadata enables the native metadata path in info(), using + // resource attributes stored in TSDB instead of target_info metric joins. + EnableNativeMetadata bool + + // InfoResourceStrategy controls how the info() function resolves resource + // attributes when native metadata is enabled: + // - "target-info": use only target_info metric-join (default) + // - "resource-attributes": use only stored resource attributes + // - "hybrid": combine resource attributes with target_info metric-join fallback + // When EnableNativeMetadata is false, the strategy is forced to "target-info". + InfoResourceStrategy InfoResourceStrategy + + // LabelNamerConfig holds configuration for translating OTel attribute names to Prometheus label names. + // This is used by the info() function to reverse-translate Prometheus label matchers back to OTel names. + LabelNamerConfig *LabelNamerConfig + // Parser is the PromQL parser instance used for parsing expressions. Parser parser.Parser } +// LabelNamerConfig holds configuration for translating OTel attribute names to Prometheus label names. +type LabelNamerConfig struct { + // UTF8Allowed indicates whether UTF-8 characters are allowed in label names. + UTF8Allowed bool + // UnderscoreLabelSanitization enables prepending 'key' to labels starting with '_'. + UnderscoreLabelSanitization bool + // PreserveMultipleUnderscores enables preserving multiple consecutive underscores. + PreserveMultipleUnderscores bool +} + // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { @@ -358,6 +397,9 @@ type Engine struct { enableDelayedNameRemoval bool enableTypeAndUnitLabels bool parser parser.Parser + enableNativeMetadata bool + infoResourceStrategy InfoResourceStrategy + labelNamerConfig *LabelNamerConfig } // NewEngine returns a new engine. @@ -420,6 +462,12 @@ func NewEngine(opts EngineOpts) *Engine { Name: "query_samples_total", Help: "The total number of samples loaded by all queries.", }), + infoFunctionCalls: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "info_function_calls_total", + Help: "Total number of info() function calls by resolution mode.", + }, []string{"mode"}), queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), queryQueueTimeHistogram: queryResultHistogram.WithLabelValues("queue_time"), queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), @@ -454,6 +502,7 @@ func NewEngine(opts EngineOpts) *Engine { metrics.queryLogEnabled, metrics.queryLogFailures, metrics.querySamples, + metrics.infoFunctionCalls, queryResultSummary, queryResultHistogram, ) @@ -473,7 +522,7 @@ func NewEngine(opts EngineOpts) *Engine { } } - return &Engine{ + ng := &Engine{ timeout: opts.Timeout, logger: opts.Logger, metrics: metrics, @@ -487,7 +536,28 @@ func NewEngine(opts EngineOpts) *Engine { enableDelayedNameRemoval: opts.EnableDelayedNameRemoval, enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels, parser: opts.Parser, + enableNativeMetadata: opts.EnableNativeMetadata, + infoResourceStrategy: opts.InfoResourceStrategy, + labelNamerConfig: opts.LabelNamerConfig, } + + // Gate: when native metadata is disabled, force strategy to target-info. + // When no strategy was set (programmatic callers / tests), default to + // target-info for consistency with the CLI default. + if !ng.enableNativeMetadata || ng.infoResourceStrategy == "" { + ng.infoResourceStrategy = InfoResourceStrategyTargetInfo + } + + // Validate strategy value. + switch ng.infoResourceStrategy { + case InfoResourceStrategyTargetInfo, InfoResourceStrategyResourceAttributes, InfoResourceStrategyHybrid: + // Valid. + default: + ng.logger.Warn("unknown info resource strategy, defaulting to target-info", "strategy", string(ng.infoResourceStrategy)) + ng.infoResourceStrategy = InfoResourceStrategyTargetInfo + } + + return ng } // Close closes ng. @@ -797,7 +867,10 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, + infoResourceStrategy: ng.infoResourceStrategy, querier: querier, + labelNamerConfig: ng.labelNamerConfig, + metrics: ng.metrics, } query.sampleStats.InitStepTracking(start, start, 1) @@ -857,7 +930,10 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ng.enableDelayedNameRemoval, enableTypeAndUnitLabels: ng.enableTypeAndUnitLabels, + infoResourceStrategy: ng.infoResourceStrategy, querier: querier, + labelNamerConfig: ng.labelNamerConfig, + metrics: ng.metrics, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) @@ -1144,7 +1220,10 @@ type evaluator struct { noStepSubqueryIntervalFn func(rangeMillis int64) int64 enableDelayedNameRemoval bool enableTypeAndUnitLabels bool + infoResourceStrategy InfoResourceStrategy querier storage.Querier + labelNamerConfig *LabelNamerConfig + metrics *engineMetrics } // errorf causes a panic with the input formatted into an error. @@ -2323,7 +2402,10 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, + infoResourceStrategy: ev.infoResourceStrategy, querier: ev.querier, + labelNamerConfig: ev.labelNamerConfig, + metrics: ev.metrics, } if e.Step != 0 { @@ -2364,7 +2446,10 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, enableTypeAndUnitLabels: ev.enableTypeAndUnitLabels, + infoResourceStrategy: ev.infoResourceStrategy, querier: ev.querier, + labelNamerConfig: ev.labelNamerConfig, + metrics: ev.metrics, } res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples @@ -3216,8 +3301,6 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram return lhs, nil, lhs <= rhs, nil, nil case parser.ATAN2: return math.Atan2(lhs, rhs), nil, true, nil, nil - case parser.TRIM_LOWER, parser.TRIM_UPPER: - return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "float", pos) } } case hlhs == nil && hrhs != nil: @@ -3225,7 +3308,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram switch op { case parser.MUL: return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil, nil - case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.TRIM_LOWER, parser.TRIM_UPPER, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("float", parser.ItemTypeStr[op], "histogram", pos) } } @@ -3236,10 +3319,6 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil, nil case parser.DIV: return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil, nil - case parser.TRIM_UPPER: - return 0, hlhs.TrimBuckets(rhs, true), true, nil, nil - case parser.TRIM_LOWER: - return 0, hlhs.TrimBuckets(rhs, false), true, nil, nil case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "float", pos) } @@ -3280,7 +3359,7 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram case parser.NEQ: // This operation expects that both histograms are compacted. return 0, hlhs, !hlhs.Equals(hrhs), nil, nil - case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2, parser.TRIM_LOWER, parser.TRIM_UPPER: + case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: return 0, nil, false, nil, annotations.NewIncompatibleTypesInBinOpInfo("histogram", parser.ItemTypeStr[op], "histogram", pos) } } diff --git a/vendor/github.com/prometheus/prometheus/promql/info.go b/vendor/github.com/prometheus/prometheus/promql/info.go index 97a79cd0f15..6cd4742bc70 100644 --- a/vendor/github.com/prometheus/prometheus/promql/info.go +++ b/vendor/github.com/prometheus/prometheus/promql/info.go @@ -17,11 +17,13 @@ import ( "context" "errors" "fmt" + "maps" "slices" "strings" "github.com/grafana/regexp" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" @@ -33,12 +35,472 @@ const targetInfo = "target_info" // identifyingLabels are the labels we consider as identifying for info metrics. // Currently hard coded, so we don't need knowledge of individual info metrics. +// Used by the target_info fallback path. var identifyingLabels = []string{"instance", "job"} +// infoMode describes which enrichment paths info() should use. +type infoMode int + +const ( + // infoModeNativeOnly uses only native metadata (resource attributes from TSDB). + // Used when __name__ is absent or exactly "target_info". + infoModeNativeOnly infoMode = iota + // infoModeMetricJoinOnly uses only metric-join (querying actual info metrics). + // Used when native metadata is disabled or __name__ can't match "target_info". + infoModeMetricJoinOnly + // infoModeHybrid combines native metadata for the target_info portion and + // metric-join for other info metrics. Used when __name__ can match both + // "target_info" and other names (e.g., __name__=~"target_info|build_info"). + infoModeHybrid +) + +func (m infoMode) String() string { + switch m { + case infoModeNativeOnly: + return "native" + case infoModeMetricJoinOnly: + return "metric-join" + case infoModeHybrid: + return "hybrid" + default: + return "unknown" + } +} + // evalInfo implements the info PromQL function. +// It routes between native metadata, metric-join, or a hybrid of both +// depending on the infoResourceStrategy and the __name__ matcher. func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { + mode := ev.classifyInfoMode(args) + if ev.metrics != nil { + ev.metrics.infoFunctionCalls.WithLabelValues(mode.String()).Inc() + } + switch mode { + case infoModeNativeOnly: + return ev.evalInfoNativeMetadata(ctx, args) + case infoModeHybrid: + return ev.evalInfoHybrid(ctx, args) + default: + return ev.evalInfoTargetInfo(ctx, args) + } +} + +// classifyInfoMode determines which enrichment path(s) info() should use. +func (ev *evaluator) classifyInfoMode(args parser.Expressions) infoMode { + if ev.infoResourceStrategy == InfoResourceStrategyTargetInfo { + return infoModeMetricJoinOnly + } + if len(args) <= 1 { + return infoModeNativeOnly + } + + hasNameMatcher := false + matchesTargetInfo := false + onlyTargetInfo := true + + for _, m := range args[1].(*parser.VectorSelector).LabelMatchers { + if m.Name != model.MetricNameLabel { + continue + } + hasNameMatcher = true + if m.Matches(targetInfo) { + matchesTargetInfo = true + } + if m.Type != labels.MatchEqual || m.Value != targetInfo { + onlyTargetInfo = false + } + } + + if !hasNameMatcher { + return infoModeNativeOnly + } + if !matchesTargetInfo { + return infoModeMetricJoinOnly + } + if onlyTargetInfo { + return infoModeNativeOnly + } + return infoModeHybrid +} + +// --------------------------------------------------------------------------- +// Native metadata path (ResourceQuerier-based) +// --------------------------------------------------------------------------- + +// evalInfoNativeMetadata enriches series with resource attributes stored in the TSDB. +// When strategy is "hybrid", it also performs target_info metric-join +// for backwards compatibility with data ingested before native metadata was enabled. +func (ev *evaluator) evalInfoNativeMetadata(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { val, annots := ev.eval(ctx, args[0]) mat := val.(Matrix) + mat = ev.applyNativeMetadata(ctx, mat, args) + + if ev.infoResourceStrategy == InfoResourceStrategyHybrid { + // Also join with target_info metrics for backwards compatibility. + // Native metadata labels already on the series take precedence + // (errorOnBaseConflict=false, so conflicts are silently resolved). + res, ws := ev.applyMetricJoin(ctx, mat, args, false, false) + annots.Merge(ws) + return res, annots + } + + return mat, annots +} + +// applyNativeMetadata enriches a matrix with resource attributes from the TSDB. +// The __name__ matcher in args is ignored; only non-__name__ matchers are used +// to filter which resource attributes to include. +func (ev *evaluator) applyNativeMetadata(ctx context.Context, mat Matrix, args parser.Expressions) Matrix { + rq, ok := ev.querier.(storage.ResourceQuerier) + if !ok { + return mat + } + + mappings := ev.buildAttrNameMappings(rq) + + dataLabelMatchers := map[string][]*labels.Matcher{} + if len(args) > 1 { + labelSelector := args[1].(*parser.VectorSelector) + for _, m := range labelSelector.LabelMatchers { + attrName := m.Name + if mappings != nil { + if original, ok := mappings.toOTel[m.Name]; ok { + attrName = original + } + } + dataLabelMatchers[attrName] = append(dataLabelMatchers[attrName], m) + } + } + delete(dataLabelMatchers, model.MetricNameLabel) + + return ev.enrichWithResourceAttrs(ctx, mat, rq, dataLabelMatchers, mappings) +} + +// attrNameMappings contains bidirectional mappings between OTel and Prometheus attribute names. +type attrNameMappings struct { + // toOTel maps Prometheus label names to original OTel attribute names (for filtering) + toOTel map[string]string + // toPrometheus maps OTel attribute names to Prometheus label names (for output) + toPrometheus map[string]string +} + +// buildAttrNameMappings builds bidirectional mappings between OTel and Prometheus attribute names. +// This uses the same LabelNamer configuration as the API to ensure consistent name translation. +func (ev *evaluator) buildAttrNameMappings(rq storage.ResourceQuerier) *attrNameMappings { + if ev.labelNamerConfig == nil { + // No LabelNamer config, can't build mappings + return nil + } + + labelNamer := &otlptranslator.LabelNamer{ + UTF8Allowed: ev.labelNamerConfig.UTF8Allowed, + UnderscoreLabelSanitization: ev.labelNamerConfig.UnderscoreLabelSanitization, + PreserveMultipleUnderscores: ev.labelNamerConfig.PreserveMultipleUnderscores, + } + + mappings := &attrNameMappings{ + toOTel: make(map[string]string), + toPrometheus: make(map[string]string), + } + + // Iterate all unique attribute names and build both mappings + err := rq.IterUniqueAttributeNames(func(originalName string) { + // Translate the original OTel name to Prometheus format + translatedName, err := labelNamer.Build(originalName) + if err != nil { + // Skip attributes that can't be translated + return + } + mappings.toOTel[translatedName] = originalName + mappings.toPrometheus[originalName] = translatedName + }) + if err != nil { + // On error, return nil to fall back to no translation + return nil + } + + return mappings +} + +// enrichWithResourceAttrs enriches each series in mat with resource attributes. +func (ev *evaluator) enrichWithResourceAttrs(ctx context.Context, mat Matrix, rq storage.ResourceQuerier, dataLabelMatchers map[string][]*labels.Matcher, mappings *attrNameMappings) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + originalNumSamples := ev.currentSamples + + // Keep a copy of the original point slices so they can be returned to the pool. + origMatrix := make(Matrix, len(mat)) + copy(origMatrix, mat) + + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, len(mat)) + tempNumSamples := ev.currentSamples + + baseVector := make(Vector, 0, len(mat)) + enh := &EvalNodeHelper{ + Out: make(Vector, 0, len(mat)), + } + + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + // Reset number of samples in memory after each timestamp. + ev.currentSamples = tempNumSamples + // Gather input vectors for this timestamp. + baseVector, _ = ev.gatherVector(ts, mat, baseVector, nil, nil) + + enh.Ts = ts + result := ev.enrichVectorWithResourceAttrs(baseVector, rq, ts, enh, dataLabelMatchers, mappings) + enh.Out = result[:0] // Reuse result vector. + + vecNumSamples := result.TotalSamples() + ev.currentSamples += vecNumSamples + tempNumSamples += vecNumSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + + // Add samples in result vector to output series. + for _, sample := range result { + h := labels.StableHash(sample.Metric) + ss, exists := seriess[h] + if exists { + if ss.ts == ts { + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + } + addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) + seriess[h] = ss + } + } + + // Reuse the original point slices. + for _, s := range origMatrix { + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) + } + + // Assemble the output matrix. + numSamples := 0 + output := make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + numSamples += len(ss.Floats) + totalHPointSize(ss.Histograms) + output = append(output, ss.Series) + } + ev.currentSamples = originalNumSamples + numSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + return output +} + +// enrichVectorWithResourceAttrs enriches each sample in the vector with resource attributes. +func (*evaluator) enrichVectorWithResourceAttrs(base Vector, rq storage.ResourceQuerier, timestamp int64, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher, mappings *attrNameMappings) Vector { + if len(base) == 0 { + return nil + } + + // Reusable allAttrs map — cleared and repopulated per series instead of + // allocating a new map per series per timestamp. + allAttrs := make(map[string]string, 16) + + for _, bs := range base { + // Use StableHash because resource attributes are keyed by StableHash (not Hash) + hash := labels.StableHash(bs.Metric) + + // Look up resource attributes for this series at this timestamp + rv, found := rq.GetResourceAt(hash, timestamp) + if !found || rv == nil { + // No resource attributes found. + // Check if filters reference labels that don't exist on base metric. + // If a filter requires non-empty value for a non-existent label, skip this series. + if hasUnmatchedFilter(bs.Metric, dataLabelMatchers) { + continue + } + // Otherwise return the original sample unchanged + enh.Out = append(enh.Out, Sample{ + Metric: bs.Metric, + F: bs.F, + H: bs.H, + }) + continue + } + + // Combine all resource attributes for matching. + // Reuse the allAttrs map via clear() to avoid allocation. + clear(allAttrs) + maps.Copy(allAttrs, rv.Identifying) + maps.Copy(allAttrs, rv.Descriptive) + + // If filters are specified, check that ALL matchers are satisfied + if len(dataLabelMatchers) > 0 { + if !allMatchersSatisfied(allAttrs, dataLabelMatchers) { + // At least one matcher didn't match, skip this series entirely + continue + } + } + + // Build the set of labels from the base metric + baseLabels := bs.Metric.Map() + enh.resetBuilder(bs.Metric) + + // Add resource attributes (both identifying and descriptive) + // Skip attributes that clash with existing labels + addAttrsToBuilder(allAttrs, baseLabels, dataLabelMatchers, mappings, enh) + + enh.Out = append(enh.Out, Sample{ + Metric: enh.lb.Labels(), + F: bs.F, + H: bs.H, + }) + } + + return enh.Out +} + +// allMatchersSatisfied checks if all matchers in dataLabelMatchers are satisfied by the attributes. +func allMatchersSatisfied(attrs map[string]string, dataLabelMatchers map[string][]*labels.Matcher) bool { + for attrName, matchers := range dataLabelMatchers { + value, exists := attrs[attrName] + if !exists { + // Attribute doesn't exist - check if matchers accept empty string + for _, m := range matchers { + if !m.Matches("") { + return false + } + } + continue + } + // Check if the value matches all matchers for this attribute + for _, m := range matchers { + if !m.Matches(value) { + return false + } + } + } + return true +} + +// hasUnmatchedFilter returns true if any filter references a label that doesn't exist +// on the base metric and requires a non-empty value. +// This is used when no resource attributes are found to decide if the series should be skipped. +func hasUnmatchedFilter(metric labels.Labels, dataLabelMatchers map[string][]*labels.Matcher) bool { + metricMap := metric.Map() + for attrName, matchers := range dataLabelMatchers { + // Check if this attribute exists on the base metric (either by original or translated name) + if _, exists := metricMap[attrName]; exists { + // Label exists on base metric, filter is satisfied + continue + } + // Label doesn't exist on base metric, check if matchers require non-empty value + for _, m := range matchers { + if !m.Matches("") { + // This matcher requires non-empty value for a non-existent label + return true + } + } + } + return false +} + +// addAttrsToBuilder adds attributes from attrs to the label builder, +// filtering by dataLabelMatchers and skipping attributes that clash with baseLabels. +// If mappings is provided, attribute names are translated to Prometheus-compatible names. +// Note: This function assumes allMatchersSatisfied() has already verified the matchers. +func addAttrsToBuilder( + attrs map[string]string, + baseLabels map[string]string, + dataLabelMatchers map[string][]*labels.Matcher, + mappings *attrNameMappings, + enh *EvalNodeHelper, +) { + for name, value := range attrs { + // Determine the output label name (translated if mappings available) + outputName := name + if mappings != nil { + if translated, ok := mappings.toPrometheus[name]; ok { + outputName = translated + } + } + + // Skip if this attribute already exists as a label on the base metric + // Check both the original and translated names + if _, exists := baseLabels[name]; exists { + continue + } + if _, exists := baseLabels[outputName]; exists { + continue + } + + // If dataLabelMatchers is specified (non-empty), only add attributes that are in the filter + if len(dataLabelMatchers) > 0 { + if _, hasMatchers := dataLabelMatchers[name]; !hasMatchers { + // This attribute name is not in the filter, skip it + continue + } + } + + // Add the attribute to the label builder using the translated name + enh.lb.Set(outputName, value) + } +} + +// --------------------------------------------------------------------------- +// Hybrid path (native metadata + metric-join) +// --------------------------------------------------------------------------- + +// evalInfoHybrid combines native metadata and metric-join enrichment. +// Native metadata handles the target_info portion (resource attributes from TSDB), +// while metric-join handles other info metrics (e.g. build_info). +// Used when __name__ can match both "target_info" and other info metric names. +// +// When strategy is "hybrid", target_info is also included in +// metric-join (not excluded), and conflicts are silently resolved with native +// metadata taking precedence. This supports backwards compatibility. +func (ev *evaluator) evalInfoHybrid(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { + val, annots := ev.eval(ctx, args[0]) + mat := val.(Matrix) + + // Step 1: Enrich with native metadata (resource attributes for the target_info portion). + // Only pass args[0] (no label selector) so that non-__name__ data label matchers + // don't incorrectly filter/drop series based on resource attributes. + // Those matchers are meant for the metric-join step. + mat = ev.applyNativeMetadata(ctx, mat, args[:1]) + + // Step 2: Enrich with metric-join. + excludeTargetInfo := ev.infoResourceStrategy == InfoResourceStrategyResourceAttributes + errorOnBaseConflict := ev.infoResourceStrategy == InfoResourceStrategyResourceAttributes + res, ws := ev.applyMetricJoin(ctx, mat, args, excludeTargetInfo, errorOnBaseConflict) + annots.Merge(ws) + return res, annots +} + +// --------------------------------------------------------------------------- +// Metric-join path (used when native metadata is disabled, or for non-target_info +// info metrics in hybrid mode) +// --------------------------------------------------------------------------- + +// evalInfoTargetInfo implements the info PromQL function using metric joins. +func (ev *evaluator) evalInfoTargetInfo(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { + val, annots := ev.eval(ctx, args[0]) + mat := val.(Matrix) + res, ws := ev.applyMetricJoin(ctx, mat, args, false, false) + annots.Merge(ws) + return res, annots +} + +// applyMetricJoin enriches a matrix by joining with info metric series. +// When excludeTargetInfo is true, target_info is excluded from the storage +// query (used in hybrid mode where native metadata handles target_info). +// When errorOnBaseConflict is true, an error is returned if a base label +// (e.g. from native metadata enrichment) conflicts with an info metric label. +func (ev *evaluator) applyMetricJoin(ctx context.Context, mat Matrix, args parser.Expressions, excludeTargetInfo, errorOnBaseConflict bool) (Matrix, annotations.Annotations) { // Map from data label name to matchers. dataLabelMatchers := map[string][]*labels.Matcher{} var infoNameMatchers []*labels.Matcher @@ -55,7 +517,7 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)} } - // Don't try to enrich info series. + // Don't try to enrich info series with themselves. ignoreSeries := map[uint64]struct{}{} for _, s := range mat { name := s.Metric.Get(model.MetricNameLabel) @@ -65,13 +527,14 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par } selectHints := ev.infoSelectHints(args[0]) - infoSeries, ws, err := ev.fetchInfoSeries(ctx, mat, ignoreSeries, dataLabelMatchers, selectHints) + infoSeries, ws, err := ev.fetchInfoSeries(ctx, mat, ignoreSeries, dataLabelMatchers, selectHints, excludeTargetInfo) if err != nil { ev.error(err) } - annots.Merge(ws) - res, ws := ev.combineWithInfoSeries(ctx, mat, infoSeries, ignoreSeries, dataLabelMatchers) + var annots annotations.Annotations + annots.Merge(ws) + res, ws := ev.combineWithInfoSeries(ctx, mat, infoSeries, ignoreSeries, dataLabelMatchers, errorOnBaseConflict) annots.Merge(ws) return res, annots } @@ -126,8 +589,9 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { // fetchInfoSeries fetches info series given matching identifying labels in mat. // Series in ignoreSeries are not fetched. +// When excludeTargetInfo is true, target_info is excluded from the storage query. // dataLabelMatchers may be mutated. -func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) { +func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints, excludeTargetInfo bool) (Matrix, annotations.Annotations, error) { removeNameFromDataLabelMatchers := func() { for name, ms := range dataLabelMatchers { ms = slices.DeleteFunc(ms, func(m *labels.Matcher) bool { @@ -204,6 +668,11 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri // Default to using the target_info metric. infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}, infoLabelMatchers...) } + if excludeTargetInfo { + // In hybrid mode, native metadata handles target_info. + // Exclude it from the metric-join storage query. + infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchNotEqual, model.MetricNameLabel, targetInfo)) + } infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...) infoSeries, ws, err := expandSeriesSet(ctx, infoIt) @@ -216,7 +685,10 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri } // combineWithInfoSeries combines mat with select data labels from infoMat. -func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) { +// When errorOnBaseConflict is true, an error is returned if a label already +// on the base metric conflicts with an info metric label (used in hybrid mode +// to detect conflicts between native metadata and metric-join labels). +func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[uint64]struct{}, dataLabelMatchers map[string][]*labels.Matcher, errorOnBaseConflict bool) (Matrix, annotations.Annotations) { buf := make([]byte, 0, 1024) lb := labels.NewScratchBuilder(0) sigFunction := func(name string) func(labels.Labels) string { @@ -283,7 +755,6 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat infoSigs[s.Metric.Hash()] = sigfs[name](s.Metric) } - var warnings annotations.Annotations for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) @@ -296,7 +767,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat infoVector, _ = ev.gatherVector(ts, infoMat, infoVector, nil, nil) enh.Ts = ts - result, err := ev.combineWithInfoVector(baseVector, infoVector, ignoreSeries, baseSigs, infoSigs, enh, dataLabelMatchers) + result, err := ev.combineWithInfoVector(baseVector, infoVector, ignoreSeries, baseSigs, infoSigs, enh, dataLabelMatchers, errorOnBaseConflict) if err != nil { ev.error(err) } @@ -314,7 +785,7 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat // Add samples in result vector to output series. for _, sample := range result { - h := sample.Metric.Hash() + h := labels.StableHash(sample.Metric) ss, exists := seriess[h] if exists { if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. @@ -345,12 +816,14 @@ func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Mat } ev.currentSamples = originalNumSamples + numSamples ev.samplesStats.UpdatePeak(ev.currentSamples) - return output, warnings + return output, nil } // combineWithInfoVector combines base and info Vectors. // Base series in ignoreSeries are not combined. -func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[uint64]struct{}, baseSigs map[uint64]map[string]string, infoSigs map[uint64]string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) { +// When errorOnBaseConflict is true, an error is returned if a label already +// on the base metric has a different value than the corresponding info metric label. +func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[uint64]struct{}, baseSigs map[uint64]map[string]string, infoSigs map[uint64]string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher, errorOnBaseConflict bool) (Vector, error) { if len(base) == 0 { return nil, nil // Short-circuit: nothing is going to match. } @@ -406,15 +879,12 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[u enh.resetBuilder(labels.Labels{}) // For every info metric name, try to find an info series with the same signature. - seenInfoMetrics := map[string]struct{}{} - for infoName, sig := range baseSigs[hash] { + matched := false + for _, sig := range baseSigs[hash] { is, exists := enh.rightStrSigs[sig] if !exists { continue } - if _, exists := seenInfoMetrics[infoName]; exists { - continue - } err := is.Metric.Validate(func(l labels.Label) error { if l.Name == model.MetricNameLabel { @@ -428,7 +898,10 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[u if v := enh.lb.Get(l.Name); v != "" && v != l.Value { return fmt.Errorf("conflicting label: %s", l.Name) } - if _, exists := baseLabels[l.Name]; exists { + if v, exists := baseLabels[l.Name]; exists { + if errorOnBaseConflict && v != l.Value { + return fmt.Errorf("conflicting label %s: enriched metric value %q, info metric value %q", l.Name, v, l.Value) + } // Skip labels already on the base metric. return nil } @@ -439,11 +912,11 @@ func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[u if err != nil { return nil, err } - seenInfoMetrics[infoName] = struct{}{} + matched = true } infoLbls := enh.lb.Labels() - if len(seenInfoMetrics) == 0 { + if !matched { // No info series matched this base series. If there's at least one data // label matcher not matching the empty string, we have to ignore this // series as there are no matching info series. diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 4f821a5795e..1196002b763 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -98,8 +98,6 @@ EQLC EQL_REGEX GTE GTR -TRIM_UPPER -TRIM_LOWER LAND LOR LSS @@ -202,7 +200,7 @@ START_METRIC_SELECTOR // Operators are listed with increasing precedence. %left LOR %left LAND LUNLESS -%left EQLC GTE GTR LSS LTE NEQ TRIM_UPPER TRIM_LOWER +%left EQLC GTE GTR LSS LTE NEQ %left ADD SUB %left MUL DIV MOD ATAN2 %right POW @@ -293,8 +291,6 @@ binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinar | expr EQLC bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr GTE bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr GTR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } - | expr TRIM_UPPER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } - | expr TRIM_LOWER bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LAND bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LOR bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } | expr LSS bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index ba20e191457..3a69f555163 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -78,71 +78,69 @@ const EQLC = 57385 const EQL_REGEX = 57386 const GTE = 57387 const GTR = 57388 -const TRIM_UPPER = 57389 -const TRIM_LOWER = 57390 -const LAND = 57391 -const LOR = 57392 -const LSS = 57393 -const LTE = 57394 -const LUNLESS = 57395 -const MOD = 57396 -const MUL = 57397 -const NEQ = 57398 -const NEQ_REGEX = 57399 -const POW = 57400 -const SUB = 57401 -const AT = 57402 -const ATAN2 = 57403 -const operatorsEnd = 57404 -const aggregatorsStart = 57405 -const AVG = 57406 -const BOTTOMK = 57407 -const COUNT = 57408 -const COUNT_VALUES = 57409 -const GROUP = 57410 -const MAX = 57411 -const MIN = 57412 -const QUANTILE = 57413 -const STDDEV = 57414 -const STDVAR = 57415 -const SUM = 57416 -const TOPK = 57417 -const LIMITK = 57418 -const LIMIT_RATIO = 57419 -const aggregatorsEnd = 57420 -const keywordsStart = 57421 -const BOOL = 57422 -const BY = 57423 -const GROUP_LEFT = 57424 -const GROUP_RIGHT = 57425 -const FILL = 57426 -const FILL_LEFT = 57427 -const FILL_RIGHT = 57428 -const IGNORING = 57429 -const OFFSET = 57430 -const SMOOTHED = 57431 -const ANCHORED = 57432 -const ON = 57433 -const WITHOUT = 57434 -const keywordsEnd = 57435 -const preprocessorStart = 57436 -const START = 57437 -const END = 57438 -const STEP = 57439 -const RANGE = 57440 -const preprocessorEnd = 57441 -const counterResetHintsStart = 57442 -const UNKNOWN_COUNTER_RESET = 57443 -const COUNTER_RESET = 57444 -const NOT_COUNTER_RESET = 57445 -const GAUGE_TYPE = 57446 -const counterResetHintsEnd = 57447 -const startSymbolsStart = 57448 -const START_METRIC = 57449 -const START_SERIES_DESCRIPTION = 57450 -const START_EXPRESSION = 57451 -const START_METRIC_SELECTOR = 57452 -const startSymbolsEnd = 57453 +const LAND = 57389 +const LOR = 57390 +const LSS = 57391 +const LTE = 57392 +const LUNLESS = 57393 +const MOD = 57394 +const MUL = 57395 +const NEQ = 57396 +const NEQ_REGEX = 57397 +const POW = 57398 +const SUB = 57399 +const AT = 57400 +const ATAN2 = 57401 +const operatorsEnd = 57402 +const aggregatorsStart = 57403 +const AVG = 57404 +const BOTTOMK = 57405 +const COUNT = 57406 +const COUNT_VALUES = 57407 +const GROUP = 57408 +const MAX = 57409 +const MIN = 57410 +const QUANTILE = 57411 +const STDDEV = 57412 +const STDVAR = 57413 +const SUM = 57414 +const TOPK = 57415 +const LIMITK = 57416 +const LIMIT_RATIO = 57417 +const aggregatorsEnd = 57418 +const keywordsStart = 57419 +const BOOL = 57420 +const BY = 57421 +const GROUP_LEFT = 57422 +const GROUP_RIGHT = 57423 +const FILL = 57424 +const FILL_LEFT = 57425 +const FILL_RIGHT = 57426 +const IGNORING = 57427 +const OFFSET = 57428 +const SMOOTHED = 57429 +const ANCHORED = 57430 +const ON = 57431 +const WITHOUT = 57432 +const keywordsEnd = 57433 +const preprocessorStart = 57434 +const START = 57435 +const END = 57436 +const STEP = 57437 +const RANGE = 57438 +const preprocessorEnd = 57439 +const counterResetHintsStart = 57440 +const UNKNOWN_COUNTER_RESET = 57441 +const COUNTER_RESET = 57442 +const NOT_COUNTER_RESET = 57443 +const GAUGE_TYPE = 57444 +const counterResetHintsEnd = 57445 +const startSymbolsStart = 57446 +const START_METRIC = 57447 +const START_SERIES_DESCRIPTION = 57448 +const START_EXPRESSION = 57449 +const START_METRIC_SELECTOR = 57450 +const startSymbolsEnd = 57451 var yyToknames = [...]string{ "$end", @@ -191,8 +189,6 @@ var yyToknames = [...]string{ "EQL_REGEX", "GTE", "GTR", - "TRIM_UPPER", - "TRIM_LOWER", "LAND", "LOR", "LSS", @@ -269,372 +265,369 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 44, - 1, 163, - 10, 163, - 24, 163, + 1, 161, + 10, 161, + 24, 161, -2, 0, -1, 75, + 2, 204, + 15, 204, + 79, 204, + 90, 204, + -2, 115, + -1, 76, + 2, 205, + 15, 205, + 79, 205, + 90, 205, + -2, 116, + -1, 77, 2, 206, 15, 206, - 81, 206, - 92, 206, - -2, 117, - -1, 76, + 79, 206, + 90, 206, + -2, 118, + -1, 78, 2, 207, 15, 207, - 81, 207, - 92, 207, - -2, 118, - -1, 77, + 79, 207, + 90, 207, + -2, 119, + -1, 79, 2, 208, 15, 208, - 81, 208, - 92, 208, - -2, 120, - -1, 78, + 79, 208, + 90, 208, + -2, 123, + -1, 80, 2, 209, 15, 209, - 81, 209, - 92, 209, - -2, 121, - -1, 79, + 79, 209, + 90, 209, + -2, 128, + -1, 81, 2, 210, 15, 210, - 81, 210, - 92, 210, - -2, 125, - -1, 80, + 79, 210, + 90, 210, + -2, 130, + -1, 82, 2, 211, 15, 211, - 81, 211, - 92, 211, - -2, 130, - -1, 81, + 79, 211, + 90, 211, + -2, 132, + -1, 83, 2, 212, 15, 212, - 81, 212, - 92, 212, - -2, 132, - -1, 82, + 79, 212, + 90, 212, + -2, 133, + -1, 84, 2, 213, 15, 213, - 81, 213, - 92, 213, + 79, 213, + 90, 213, -2, 134, - -1, 83, + -1, 85, 2, 214, 15, 214, - 81, 214, - 92, 214, + 79, 214, + 90, 214, -2, 135, - -1, 84, + -1, 86, 2, 215, 15, 215, - 81, 215, - 92, 215, + 79, 215, + 90, 215, -2, 136, - -1, 85, + -1, 87, 2, 216, 15, 216, - 81, 216, - 92, 216, - -2, 137, - -1, 86, + 79, 216, + 90, 216, + -2, 140, + -1, 88, 2, 217, 15, 217, - 81, 217, - 92, 217, - -2, 138, - -1, 87, - 2, 218, - 15, 218, - 81, 218, - 92, 218, - -2, 142, - -1, 88, - 2, 219, - 15, 219, - 81, 219, - 92, 219, - -2, 143, - -1, 142, - 41, 290, - 42, 290, - 54, 290, - 55, 290, - 59, 290, + 79, 217, + 90, 217, + -2, 141, + -1, 140, + 41, 288, + 42, 288, + 52, 288, + 53, 288, + 57, 288, -2, 22, - -1, 262, - 9, 275, - 12, 275, - 13, 275, - 18, 275, - 19, 275, - 25, 275, - 41, 275, - 49, 275, - 50, 275, - 53, 275, - 59, 275, - 64, 275, - 65, 275, - 66, 275, - 67, 275, - 68, 275, - 69, 275, - 70, 275, - 71, 275, - 72, 275, - 73, 275, - 74, 275, - 75, 275, - 76, 275, - 77, 275, - 81, 275, - 84, 275, - 85, 275, - 86, 275, - 88, 275, - 89, 275, - 90, 275, - 92, 275, - 95, 275, - 96, 275, - 97, 275, - 98, 275, + -1, 258, + 9, 273, + 12, 273, + 13, 273, + 18, 273, + 19, 273, + 25, 273, + 41, 273, + 47, 273, + 48, 273, + 51, 273, + 57, 273, + 62, 273, + 63, 273, + 64, 273, + 65, 273, + 66, 273, + 67, 273, + 68, 273, + 69, 273, + 70, 273, + 71, 273, + 72, 273, + 73, 273, + 74, 273, + 75, 273, + 79, 273, + 82, 273, + 83, 273, + 84, 273, + 86, 273, + 87, 273, + 88, 273, + 90, 273, + 93, 273, + 94, 273, + 95, 273, + 96, 273, -2, 0, - -1, 263, - 9, 275, - 12, 275, - 13, 275, - 18, 275, - 19, 275, - 25, 275, - 41, 275, - 49, 275, - 50, 275, - 53, 275, - 59, 275, - 64, 275, - 65, 275, - 66, 275, - 67, 275, - 68, 275, - 69, 275, - 70, 275, - 71, 275, - 72, 275, - 73, 275, - 74, 275, - 75, 275, - 76, 275, - 77, 275, - 81, 275, - 84, 275, - 85, 275, - 86, 275, - 88, 275, - 89, 275, - 90, 275, - 92, 275, - 95, 275, - 96, 275, - 97, 275, - 98, 275, + -1, 259, + 9, 273, + 12, 273, + 13, 273, + 18, 273, + 19, 273, + 25, 273, + 41, 273, + 47, 273, + 48, 273, + 51, 273, + 57, 273, + 62, 273, + 63, 273, + 64, 273, + 65, 273, + 66, 273, + 67, 273, + 68, 273, + 69, 273, + 70, 273, + 71, 273, + 72, 273, + 73, 273, + 74, 273, + 75, 273, + 79, 273, + 82, 273, + 83, 273, + 84, 273, + 86, 273, + 87, 273, + 88, 273, + 90, 273, + 93, 273, + 94, 273, + 95, 273, + 96, 273, -2, 0, } const yyPrivate = 57344 -const yyLast = 1241 +const yyLast = 1224 var yyAct = [...]int16{ - 61, 369, 194, 435, 357, 442, 437, 299, 251, 205, - 98, 51, 149, 197, 73, 96, 235, 260, 375, 376, - 134, 261, 68, 135, 133, 132, 449, 450, 451, 452, - 418, 419, 198, 137, 136, 257, 258, 259, 262, 263, - 165, 70, 126, 129, 131, 125, 432, 431, 397, 125, - 348, 159, 236, 227, 237, 130, 128, 395, 395, 131, - 127, 404, 464, 453, 421, 202, 457, 140, 420, 142, - 6, 385, 103, 105, 106, 201, 107, 108, 109, 110, - 111, 112, 113, 114, 115, 116, 117, 118, 138, 119, - 120, 124, 104, 119, 239, 124, 46, 203, 436, 135, - 126, 129, 151, 383, 382, 230, 238, 240, 229, 255, - 136, 143, 370, 130, 128, 204, 380, 131, 127, 121, - 123, 122, 190, 121, 123, 122, 207, 212, 213, 214, - 215, 216, 217, 185, 379, 65, 189, 208, 208, 208, - 208, 208, 208, 208, 186, 64, 378, 231, 209, 209, - 209, 209, 209, 209, 209, 220, 223, 218, 208, 219, - 347, 345, 145, 139, 65, 141, 351, 89, 241, 209, - 243, 144, 426, 253, 64, 2, 3, 4, 5, 125, - 349, 352, 291, 346, 344, 90, 249, 286, 247, 237, - 202, 425, 254, 252, 290, 203, 443, 255, 281, 353, - 201, 284, 289, 288, 287, 208, 103, 105, 137, 209, - 286, 246, 245, 204, 160, 129, 209, 292, 293, 116, - 117, 208, 203, 119, 120, 124, 104, 130, 128, 239, - 146, 131, 209, 152, 226, 244, 222, 202, 91, 42, - 204, 238, 240, 150, 151, 196, 343, 201, 7, 221, - 415, 342, 202, 121, 123, 122, 100, 256, 414, 413, - 371, 372, 201, 264, 265, 266, 267, 268, 269, 270, - 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, - 373, 374, 377, 350, 203, 340, 381, 412, 99, 391, - 384, 469, 158, 10, 390, 43, 207, 208, 97, 411, - 339, 208, 204, 93, 202, 386, 242, 208, 209, 389, - 468, 100, 209, 467, 201, 410, 471, 409, 209, 166, - 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 203, 408, 199, 200, - 470, 407, 393, 406, 405, 59, 296, 466, 403, 102, - 295, 92, 1, 208, 204, 394, 396, 183, 398, 126, - 129, 399, 400, 402, 209, 294, 126, 129, 152, 234, - 401, 8, 130, 128, 233, 44, 131, 127, 150, 130, - 128, 422, 416, 131, 127, 74, 148, 232, 388, 58, - 99, 100, 208, 417, 155, 65, 57, 56, 428, 154, - 97, 193, 55, 209, 430, 64, 434, 387, 95, 438, - 439, 440, 153, 100, 424, 164, 445, 444, 447, 446, - 455, 456, 157, 441, 156, 458, 54, 89, 69, 454, - 53, 9, 9, 52, 302, 50, 208, 392, 459, 460, - 429, 162, 224, 461, 315, 90, 463, 209, 126, 129, - 321, 49, 163, 48, 427, 192, 191, 301, 465, 126, - 129, 130, 128, 161, 47, 131, 127, 60, 250, 208, - 472, 462, 130, 128, 354, 101, 131, 127, 248, 297, - 209, 317, 318, 210, 211, 319, 94, 448, 195, 300, - 62, 147, 0, 332, 0, 0, 303, 305, 307, 308, - 309, 320, 322, 325, 326, 327, 328, 329, 333, 334, - 0, 0, 304, 306, 310, 311, 312, 313, 314, 316, - 323, 338, 337, 324, 302, 0, 423, 330, 331, 335, - 336, 0, 228, 0, 315, 0, 0, 0, 0, 0, - 321, 0, 0, 0, 298, 126, 129, 301, 0, 0, - 0, 126, 129, 0, 0, 0, 0, 0, 130, 128, - 0, 0, 131, 127, 130, 128, 0, 0, 131, 127, - 0, 317, 318, 0, 0, 319, 0, 0, 0, 0, - 0, 341, 0, 332, 0, 0, 303, 305, 307, 308, - 309, 320, 322, 325, 326, 327, 328, 329, 333, 334, - 0, 0, 304, 306, 310, 311, 312, 313, 314, 316, - 323, 338, 337, 324, 0, 126, 129, 330, 331, 335, - 336, 65, 0, 0, 63, 91, 0, 66, 130, 128, - 25, 64, 131, 127, 225, 0, 0, 67, 0, 0, + 61, 363, 190, 429, 351, 436, 431, 293, 247, 201, + 98, 51, 147, 193, 369, 96, 231, 412, 413, 370, + 132, 133, 68, 130, 73, 163, 194, 131, 443, 444, + 445, 446, 134, 135, 256, 253, 254, 255, 257, 258, + 259, 129, 70, 426, 123, 425, 124, 127, 391, 342, + 157, 458, 223, 198, 447, 389, 415, 128, 126, 345, + 451, 129, 125, 197, 414, 465, 398, 138, 379, 140, + 6, 103, 105, 106, 346, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 199, 117, 118, 122, 104, + 347, 136, 343, 46, 124, 127, 389, 133, 334, 251, + 397, 200, 149, 377, 192, 128, 126, 199, 134, 129, + 125, 198, 141, 333, 420, 396, 119, 121, 120, 123, + 186, 197, 395, 200, 203, 208, 209, 210, 211, 212, + 213, 181, 376, 419, 430, 204, 204, 204, 204, 204, + 204, 204, 182, 199, 185, 227, 205, 205, 205, 205, + 205, 205, 205, 216, 219, 215, 204, 341, 214, 200, + 137, 117, 139, 122, 339, 385, 237, 205, 239, 464, + 384, 249, 226, 2, 3, 4, 5, 91, 290, 225, + 340, 123, 289, 280, 250, 383, 364, 338, 124, 127, + 284, 119, 121, 120, 275, 195, 196, 288, 218, 128, + 126, 204, 460, 129, 125, 205, 280, 278, 158, 105, + 374, 217, 205, 286, 287, 423, 243, 204, 241, 114, + 115, 124, 127, 117, 373, 122, 104, 372, 205, 222, + 143, 437, 128, 126, 124, 127, 129, 125, 65, 242, + 149, 240, 337, 142, 42, 128, 126, 418, 64, 129, + 125, 285, 252, 119, 121, 120, 365, 366, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 344, 371, 127, 367, 368, 198, 283, + 375, 124, 127, 282, 378, 128, 126, 281, 197, 129, + 203, 204, 128, 126, 135, 204, 129, 125, 198, 380, + 65, 204, 205, 144, 7, 409, 205, 408, 197, 407, + 64, 406, 205, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 202, 232, + 199, 233, 89, 156, 417, 65, 387, 405, 463, 233, + 404, 189, 102, 224, 403, 64, 200, 204, 90, 388, + 390, 10, 392, 124, 127, 393, 394, 462, 205, 402, + 461, 93, 124, 127, 128, 126, 401, 89, 129, 125, + 400, 235, 399, 128, 126, 416, 410, 129, 125, 235, + 8, 234, 236, 90, 44, 59, 204, 411, 43, 234, + 236, 92, 422, 188, 187, 1, 179, 205, 424, 155, + 428, 154, 230, 432, 433, 434, 150, 229, 74, 335, + 439, 438, 441, 440, 449, 450, 148, 435, 58, 452, + 228, 206, 207, 448, 336, 57, 296, 56, 386, 100, + 204, 69, 453, 454, 9, 9, 309, 455, 99, 55, + 457, 205, 315, 124, 127, 162, 421, 150, 97, 295, + 99, 54, 459, 53, 128, 126, 238, 148, 129, 125, + 97, 100, 153, 204, 466, 146, 52, 152, 95, 50, + 100, 311, 312, 100, 205, 313, 160, 220, 49, 161, + 151, 48, 159, 326, 47, 60, 297, 299, 301, 302, + 303, 314, 316, 319, 320, 321, 322, 323, 327, 328, + 246, 456, 298, 300, 304, 305, 306, 307, 308, 310, + 317, 332, 331, 318, 296, 348, 101, 324, 325, 329, + 330, 245, 244, 291, 309, 198, 94, 442, 248, 191, + 315, 350, 251, 294, 292, 197, 62, 295, 349, 145, + 0, 0, 353, 354, 352, 359, 361, 358, 360, 355, + 356, 357, 362, 0, 0, 0, 0, 199, 0, 311, + 312, 0, 0, 313, 0, 0, 0, 0, 0, 0, + 0, 326, 0, 200, 297, 299, 301, 302, 303, 314, + 316, 319, 320, 321, 322, 323, 327, 328, 0, 0, + 298, 300, 304, 305, 306, 307, 308, 310, 317, 332, + 331, 318, 0, 0, 0, 324, 325, 329, 330, 65, + 0, 0, 63, 91, 0, 66, 427, 0, 25, 64, + 0, 0, 221, 0, 0, 67, 0, 353, 354, 352, + 359, 361, 358, 360, 355, 356, 357, 362, 0, 0, + 0, 89, 0, 0, 0, 0, 0, 21, 22, 0, + 0, 23, 0, 0, 0, 0, 0, 90, 0, 0, + 0, 0, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 0, 0, 0, 13, + 0, 0, 16, 17, 18, 0, 27, 41, 40, 0, + 33, 0, 0, 34, 35, 71, 72, 65, 45, 0, + 63, 91, 0, 66, 0, 0, 25, 64, 0, 0, + 0, 0, 0, 67, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 89, + 0, 0, 0, 0, 0, 21, 22, 0, 0, 23, + 0, 0, 0, 0, 0, 90, 0, 0, 0, 0, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 0, 0, 0, 13, 0, 0, + 16, 17, 18, 0, 27, 41, 40, 0, 33, 0, + 0, 34, 35, 71, 72, 65, 0, 0, 63, 91, + 0, 66, 0, 0, 25, 64, 0, 0, 0, 0, + 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 89, 0, 0, + 0, 0, 0, 21, 22, 0, 0, 23, 0, 0, + 0, 0, 0, 90, 0, 0, 0, 0, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 0, 0, 0, 13, 0, 0, 16, 17, + 18, 0, 27, 41, 40, 0, 33, 20, 91, 34, + 35, 71, 72, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, - 0, 21, 22, 0, 0, 23, 0, 0, 0, 0, - 0, 90, 0, 0, 0, 0, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, - 0, 0, 0, 13, 0, 0, 16, 17, 18, 0, - 27, 41, 40, 0, 33, 0, 0, 34, 35, 71, - 72, 65, 45, 0, 63, 91, 0, 66, 356, 0, - 25, 64, 0, 0, 0, 355, 0, 67, 0, 359, - 360, 358, 365, 367, 364, 366, 361, 362, 363, 368, - 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, - 0, 21, 22, 0, 0, 23, 0, 0, 0, 0, - 0, 90, 0, 0, 0, 0, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, - 0, 0, 0, 13, 0, 0, 16, 17, 18, 0, - 27, 41, 40, 0, 33, 0, 0, 34, 35, 71, - 72, 65, 0, 0, 63, 91, 0, 66, 433, 0, - 25, 64, 0, 0, 0, 0, 0, 67, 0, 359, - 360, 358, 365, 367, 364, 366, 361, 362, 363, 368, - 0, 0, 0, 89, 0, 0, 206, 0, 0, 0, - 0, 21, 22, 65, 0, 23, 0, 0, 0, 193, - 0, 90, 0, 64, 0, 0, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, - 0, 0, 0, 13, 0, 89, 16, 17, 18, 0, - 27, 41, 40, 0, 33, 20, 91, 34, 35, 71, - 72, 25, 0, 90, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 192, 191, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 184, 0, 0, - 0, 0, 21, 22, 65, 0, 23, 0, 0, 0, - 193, 210, 211, 0, 64, 0, 0, 11, 12, 14, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 21, 22, 0, 0, 23, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, 19, 24, 26, 28, 29, 30, 31, 32, 36, - 37, 0, 0, 0, 13, 0, 89, 16, 17, 18, + 37, 0, 0, 0, 13, 0, 0, 16, 17, 18, 0, 27, 41, 40, 0, 33, 20, 42, 34, 35, - 38, 39, 25, 0, 90, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 192, 191, 0, 0, 0, 0, + 38, 39, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 21, 22, 0, 0, 23, 0, 0, - 0, 0, 187, 188, 0, 0, 0, 0, 11, 12, - 14, 15, 19, 24, 26, 28, 29, 30, 31, 32, - 36, 37, 125, 0, 0, 13, 0, 0, 16, 17, - 18, 0, 27, 41, 40, 0, 33, 0, 0, 34, - 35, 38, 39, 125, 0, 0, 0, 0, 0, 103, - 105, 106, 0, 107, 108, 109, 110, 111, 112, 113, - 114, 115, 116, 117, 118, 0, 119, 120, 124, 104, - 103, 105, 106, 0, 107, 108, 109, 110, 111, 0, - 113, 114, 115, 116, 117, 118, 0, 119, 120, 124, - 104, 0, 0, 125, 0, 0, 121, 123, 122, 0, - 65, 0, 0, 0, 0, 0, 193, 0, 125, 0, - 64, 0, 0, 0, 0, 0, 0, 121, 123, 122, - 103, 105, 106, 0, 107, 108, 109, 110, 0, 0, - 113, 114, 89, 116, 117, 118, 105, 119, 120, 124, - 104, 65, 0, 0, 0, 0, 0, 285, 116, 117, - 90, 64, 119, 0, 124, 104, 0, 0, 0, 0, - 192, 191, 0, 0, 0, 0, 0, 121, 123, 122, - 0, 0, 0, 89, 0, 0, 0, 0, 0, 0, - 0, 0, 121, 123, 122, 0, 0, 0, 210, 211, - 0, 90, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 192, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 282, - 283, + 0, 21, 22, 0, 0, 23, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, + 19, 24, 26, 28, 29, 30, 31, 32, 36, 37, + 123, 0, 0, 13, 0, 0, 16, 17, 18, 0, + 27, 41, 40, 0, 33, 0, 0, 34, 35, 38, + 39, 123, 0, 0, 0, 0, 0, 103, 105, 106, + 0, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 0, 117, 118, 122, 104, 0, 0, 103, 105, + 106, 0, 107, 108, 109, 0, 111, 112, 113, 114, + 115, 116, 382, 117, 118, 122, 104, 0, 0, 65, + 0, 123, 119, 121, 120, 189, 65, 0, 0, 64, + 0, 381, 189, 0, 0, 0, 64, 0, 0, 0, + 0, 0, 0, 119, 121, 120, 0, 0, 103, 105, + 106, 89, 107, 108, 0, 0, 111, 112, 89, 114, + 115, 116, 180, 117, 118, 122, 104, 90, 0, 65, + 0, 0, 0, 0, 90, 189, 65, 188, 187, 64, + 0, 0, 279, 0, 188, 187, 64, 123, 0, 0, + 0, 0, 0, 119, 121, 120, 0, 0, 0, 0, + 0, 89, 0, 0, 0, 206, 207, 0, 89, 0, + 0, 0, 206, 207, 103, 105, 0, 90, 0, 0, + 0, 0, 0, 0, 90, 114, 115, 188, 187, 117, + 118, 122, 104, 0, 188, 187, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 183, 184, 0, 0, 119, + 121, 120, 276, 277, } var yyPact = [...]int16{ - 68, 238, 954, 954, 702, 873, -1000, -1000, -1000, 226, + 68, 294, 934, 934, 688, 855, -1000, -1000, -1000, 231, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 388, -1000, 347, -1000, 1018, -1000, -1000, -1000, + -1000, -1000, 448, -1000, 340, -1000, 996, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 1, 18, 193, -1000, -1000, 792, -1000, 792, 225, - -1000, 156, 147, 215, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 366, -1000, -1000, 392, -1000, -1000, 420, 288, -1000, - -1000, 27, -1000, -40, -40, -40, -40, -40, -40, -40, - -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, - -40, 915, -1000, -1000, 243, 834, 1101, 1101, 1101, 1101, - 1101, 1101, 193, -58, -1000, 234, 234, 612, -1000, 31, - 510, 35, -14, -1000, 86, 83, 1101, 367, -1000, -1000, - 50, 185, -1000, -1000, 286, -1000, 210, -1000, 186, 181, - 792, -1000, -49, -70, -44, -1000, 792, 792, 792, 792, - 792, 792, 792, 792, 792, 792, 792, 792, 792, 792, - 792, 792, 792, -1000, -1000, -1000, 1142, 189, 188, 187, - 1, -1000, -1000, 1101, -1000, 167, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 228, 228, 344, -1000, 1, -1000, 1101, - 156, 147, 173, 173, -14, -14, -14, -14, -1000, -1000, - -1000, 522, -1000, -1000, 278, -1000, 1018, -1000, -1000, -1000, - -1000, 574, -1000, 231, -1000, 159, -1000, -1000, -1000, -1000, - -1000, 158, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 24, - 154, 140, -1000, -1000, -1000, 701, 1104, 97, 97, 97, - 234, 234, 234, 234, 35, 35, 165, 165, 165, 165, - 165, 1089, 1039, 165, 165, 1089, 35, 35, 165, 35, - 1104, -1000, 131, 119, 101, 1101, -14, 82, 81, 1101, - 510, 49, -1000, -1000, -1000, 386, -1000, 287, -1000, -1000, + -1000, 5, 18, 279, -1000, -1000, 776, -1000, 776, 164, + -1000, 228, 215, 288, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 445, -1000, -1000, 460, -1000, -1000, 397, 329, -1000, + -1000, 26, -1000, -53, -53, -53, -53, -53, -53, -53, + -53, -53, -53, -53, -53, -53, -53, -53, -53, 1120, + -1000, -1000, 102, 326, 1077, 1077, 1077, 1077, 1077, 1077, + 279, -58, -1000, 196, 196, 600, -1000, 30, 321, 105, + -15, -1000, 157, 150, 1077, 400, -1000, -1000, 327, 335, + -1000, -1000, 436, -1000, 216, -1000, 214, 516, 776, -1000, + -47, -51, -41, -1000, 776, 776, 776, 776, 776, 776, + 776, 776, 776, 776, 776, 776, 776, 776, 776, -1000, + -1000, -1000, 1127, 272, 268, 264, 5, -1000, -1000, 1077, + -1000, 236, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 269, + 269, 176, -1000, 5, -1000, 1077, 228, 215, 233, 233, + -15, -15, -15, -15, -1000, -1000, -1000, 512, -1000, -1000, + 91, -1000, 996, -1000, -1000, -1000, -1000, 402, -1000, 404, + -1000, 162, -1000, -1000, -1000, -1000, -1000, 155, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 23, 66, 33, -1000, -1000, + -1000, 514, 167, 171, 171, 171, 196, 196, 196, 196, + 105, 105, 1133, 1133, 1133, 1067, 1017, 1133, 1133, 1067, + 105, 105, 1133, 105, 167, -1000, 212, 209, 195, 1077, + -15, 110, 81, 1077, 321, 46, -1000, -1000, -1000, 1070, + -1000, 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 792, 1101, -1000, -1000, -1000, -1000, -1000, -1000, 38, 38, - 22, 38, 93, 93, 346, 44, -1000, -1000, 338, 337, - 335, 331, 311, 309, 293, 281, 253, 252, 244, -1000, - 126, -56, -54, -1000, -1000, -1000, -1000, -1000, 46, 42, - 1101, 504, -1000, -1000, 407, -1000, 170, -1000, -1000, -1000, - 432, -1000, 1018, 418, -1000, -1000, -1000, 38, -1000, 21, - 20, 791, -1000, -1000, -1000, 39, 56, 56, 56, 228, - 182, 182, 39, 182, 39, -75, 41, 155, 97, 97, - -1000, -1000, 59, -1000, 1101, -1000, -1000, -1000, -1000, -1000, - -1000, 38, 38, -1000, -1000, -1000, 38, -1000, -1000, -1000, - -1000, -1000, -1000, 56, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 40, -1000, -1000, 1101, 325, -1000, - -1000, -1000, 289, -1000, -1000, 318, -1000, 295, -1000, -1000, - -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 776, 1077, -1000, -1000, -1000, -1000, + -1000, -1000, 36, 36, 22, 36, 83, 83, 98, 49, + -1000, -1000, 366, 364, 360, 353, 338, 334, 331, 305, + 303, 301, 299, -1000, 291, -67, -65, -1000, -1000, -1000, + -1000, -1000, 42, 34, 1077, 312, -1000, -1000, 240, -1000, + 112, -1000, -1000, -1000, 424, -1000, 996, 193, -1000, -1000, + -1000, 36, -1000, 19, 17, 599, -1000, -1000, -1000, 77, + 289, 289, 289, 269, 217, 217, 77, 217, 77, -71, + 32, 229, 171, 171, -1000, -1000, 53, -1000, 1077, -1000, + -1000, -1000, -1000, -1000, -1000, 36, 36, -1000, -1000, -1000, + 36, -1000, -1000, -1000, -1000, -1000, -1000, 289, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 29, -1000, + -1000, 1077, 180, -1000, -1000, -1000, 336, -1000, -1000, 147, + -1000, 44, -1000, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 491, 12, 490, 7, 16, 489, 428, 22, 488, - 10, 487, 14, 293, 371, 486, 15, 479, 19, 18, - 478, 475, 8, 474, 4, 5, 471, 3, 6, 13, - 468, 32, 2, 467, 464, 25, 214, 463, 453, 452, - 96, 451, 442, 24, 441, 1, 41, 435, 11, 433, - 430, 426, 415, 402, 397, 396, 389, 345, 0, 385, - 9, 357, 352, 295, + 0, 539, 12, 536, 7, 16, 533, 431, 22, 529, + 10, 527, 24, 351, 380, 526, 15, 523, 19, 14, + 522, 516, 8, 515, 4, 5, 501, 3, 6, 13, + 500, 26, 2, 485, 484, 23, 208, 482, 481, 479, + 93, 478, 477, 27, 476, 1, 42, 469, 11, 466, + 453, 451, 445, 439, 427, 425, 418, 385, 0, 408, + 9, 396, 395, 388, } var yyR1 = [...]int8{ @@ -642,33 +635,32 @@ var yyR1 = [...]int8{ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 34, 34, 34, 34, 35, 35, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, - 38, 38, 38, 38, 38, 38, 38, 36, 39, 39, - 52, 52, 44, 44, 44, 44, 37, 37, 37, 37, - 37, 37, 18, 18, 18, 18, 17, 17, 17, 4, - 4, 4, 45, 45, 41, 43, 43, 42, 42, 42, - 53, 60, 49, 49, 50, 51, 33, 33, 33, 9, - 9, 47, 55, 55, 55, 55, 55, 55, 56, 57, - 57, 57, 46, 46, 46, 1, 1, 1, 2, 2, - 2, 2, 2, 2, 2, 14, 14, 7, 7, 7, + 38, 38, 38, 38, 38, 36, 39, 39, 52, 52, + 44, 44, 44, 44, 37, 37, 37, 37, 37, 37, + 18, 18, 18, 18, 17, 17, 17, 4, 4, 4, + 45, 45, 41, 43, 43, 42, 42, 42, 53, 60, + 49, 49, 50, 51, 33, 33, 33, 9, 9, 47, + 55, 55, 55, 55, 55, 55, 56, 57, 57, 57, + 46, 46, 46, 1, 1, 1, 2, 2, 2, 2, + 2, 2, 2, 14, 14, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 13, 13, - 13, 13, 15, 15, 15, 16, 16, 16, 16, 16, - 16, 16, 63, 21, 21, 21, 21, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 30, 30, 30, 22, - 22, 22, 22, 23, 23, 23, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 25, 25, 26, - 26, 26, 11, 11, 11, 11, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 7, 7, 7, 7, 7, 7, 13, 13, 13, 13, + 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, + 63, 21, 21, 21, 21, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 30, 30, 30, 22, 22, 22, + 22, 23, 23, 23, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 25, 25, 26, 26, 26, + 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 8, 8, 5, 5, - 5, 5, 48, 48, 29, 29, 31, 31, 32, 32, - 28, 27, 27, 54, 10, 19, 19, 61, 61, 61, - 61, 61, 61, 61, 61, 61, 61, 12, 12, 58, - 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, - 58, 59, + 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, + 48, 48, 29, 29, 31, 31, 32, 32, 28, 27, + 27, 54, 10, 19, 19, 61, 61, 61, 61, 61, + 61, 61, 61, 61, 61, 12, 12, 58, 58, 58, + 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, } var yyR2 = [...]int8{ @@ -676,135 +668,132 @@ var yyR2 = [...]int8{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 1, 0, 1, - 3, 3, 1, 1, 3, 3, 1, 3, 3, 3, - 5, 5, 3, 4, 2, 1, 3, 1, 2, 1, - 1, 1, 3, 4, 2, 3, 2, 3, 1, 2, - 3, 1, 3, 3, 2, 2, 3, 5, 3, 1, - 1, 4, 6, 5, 6, 5, 4, 3, 2, 2, - 1, 1, 3, 4, 2, 3, 1, 2, 3, 3, - 1, 3, 3, 2, 1, 2, 1, 1, 1, 1, + 4, 4, 4, 4, 4, 1, 0, 1, 3, 3, + 1, 1, 3, 3, 1, 3, 3, 3, 5, 5, + 3, 4, 2, 1, 3, 1, 2, 1, 1, 1, + 3, 4, 2, 3, 2, 3, 1, 2, 3, 1, + 3, 3, 2, 2, 3, 5, 3, 1, 1, 4, + 6, 5, 6, 5, 4, 3, 2, 2, 1, 1, + 3, 4, 2, 3, 1, 2, 3, 3, 1, 3, + 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, + 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, + 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, + 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, + 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, - 2, 0, 3, 1, 2, 3, 3, 1, 3, 3, - 2, 1, 2, 0, 3, 2, 1, 1, 3, 1, - 3, 4, 1, 3, 5, 5, 1, 1, 1, 4, - 3, 3, 2, 3, 1, 2, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, - 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, - 1, 2, 1, 1, 1, 0, 1, 1, 2, 3, - 3, 4, 4, 6, 7, 4, 1, 1, 1, 1, - 2, 3, 3, 3, 3, 3, 3, 3, 3, 6, - 1, 3, + 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, + 1, 1, 1, 0, 1, 1, 2, 3, 3, 4, + 4, 6, 7, 4, 1, 1, 1, 1, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 6, 1, 3, } var yyChk = [...]int16{ - -1000, -62, 107, 108, 109, 110, 2, 10, -14, -7, - -13, 64, 65, 81, 66, 67, 84, 85, 86, 68, - 12, 49, 50, 53, 69, 18, 70, 88, 71, 72, - 73, 74, 75, 92, 95, 96, 76, 77, 97, 98, - 90, 89, 13, -63, -14, 10, -40, -34, -38, -41, + -1000, -62, 105, 106, 107, 108, 2, 10, -14, -7, + -13, 62, 63, 79, 64, 65, 82, 83, 84, 66, + 12, 47, 48, 51, 67, 18, 68, 86, 69, 70, + 71, 72, 73, 90, 93, 94, 74, 75, 95, 96, + 88, 87, 13, -63, -14, 10, -40, -34, -38, -41, -47, -48, -49, -50, -51, -53, -54, -55, -56, -57, -33, -58, -3, 12, 19, 9, 15, 25, -8, -7, - -46, 97, 98, -12, -59, 64, 65, 66, 67, 68, - 69, 70, 71, 72, 73, 74, 75, 76, 77, 41, - 59, 13, -57, -13, -15, 20, -16, 12, -10, 2, - 25, -21, 2, 41, 61, 42, 43, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 58, - 59, 88, 90, 89, 60, 14, 41, 59, 55, 42, - 54, 58, -35, -43, 2, 81, 92, 15, -43, -40, - -58, -40, -58, -46, 15, 15, 15, -1, 20, -2, - 12, -10, 2, 20, 7, 2, 4, 2, 4, 24, - -36, -37, -44, -39, -52, 80, -36, -36, -36, -36, - -36, -36, -36, -36, -36, -36, -36, -36, -36, -36, - -36, -36, -36, -61, 2, -48, -8, 97, 98, -12, - -58, 70, 69, 15, -32, -9, 2, -29, -31, 95, - 96, 19, 9, 41, 59, -60, 2, -58, -48, -8, - 97, 98, -58, -58, -58, -58, -58, -58, -43, -35, - -18, 15, 2, -18, -42, 22, -40, 22, 22, 22, - 22, -58, 20, 7, 2, -5, 2, 4, 56, 44, - 57, -5, 20, -16, 25, 2, 25, 2, -20, 5, - -30, -22, 12, -29, -31, 16, -40, 84, 85, 86, - 87, 91, 82, 83, -40, -40, -40, -40, -40, -40, + -46, 95, 96, -12, -59, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 41, + 57, 13, -57, -13, -15, 20, -16, 12, -10, 2, + 25, -21, 2, 41, 59, 42, 43, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 56, 57, 86, + 88, 87, 58, 14, 41, 57, 53, 42, 52, 56, + -35, -43, 2, 79, 90, 15, -43, -40, -58, -40, + -58, -46, 15, 15, 15, -1, 20, -2, 12, -10, + 2, 20, 7, 2, 4, 2, 4, 24, -36, -37, + -44, -39, -52, 78, -36, -36, -36, -36, -36, -36, + -36, -36, -36, -36, -36, -36, -36, -36, -36, -61, + 2, -48, -8, 95, 96, -12, -58, 68, 67, 15, + -32, -9, 2, -29, -31, 93, 94, 19, 9, 41, + 57, -60, 2, -58, -48, -8, 95, 96, -58, -58, + -58, -58, -58, -58, -43, -35, -18, 15, 2, -18, + -42, 22, -40, 22, 22, 22, 22, -58, 20, 7, + 2, -5, 2, 4, 54, 44, 55, -5, 20, -16, + 25, 2, 25, 2, -20, 5, -30, -22, 12, -29, + -31, 16, -40, 82, 83, 84, 85, 89, 80, 81, -40, -40, -40, -40, -40, -40, -40, -40, -40, -40, - -40, -48, 97, 98, -12, 15, -58, 15, 15, 15, - -58, 15, -29, -29, 21, 6, 2, -17, 22, -4, - -6, 25, 2, 64, 80, 65, 81, 66, 67, 68, - 82, 83, 84, 85, 86, 12, 87, 49, 50, 53, - 69, 18, 70, 88, 91, 71, 72, 73, 74, 75, - 95, 96, 61, 76, 77, 97, 98, 90, 89, 22, - 7, 7, 20, -2, 25, 2, 25, 2, 26, 26, - -31, 26, 41, 59, -23, 24, 17, -24, 30, 28, - 29, 35, 36, 37, 33, 31, 34, 32, 38, -45, - 15, -45, -45, -18, -18, -19, -18, -19, 15, 15, - 15, -58, 22, 22, -58, 22, -60, 21, 2, 22, - 7, 2, -40, -58, -28, 19, -28, 26, -28, -22, - -22, 24, 17, 2, 17, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, -48, -8, 86, 85, - 22, 22, -58, 22, 7, 21, 2, 22, -4, 22, - -28, 26, 26, 17, -24, -27, 59, -28, -32, -32, - -32, -29, -25, 14, -25, -27, -25, -27, -11, 101, - 102, 103, 104, 22, -48, -45, -45, 7, -58, -28, - -28, -28, -26, -32, 22, -58, 22, 24, 21, 2, - 22, 21, -32, + -40, -40, -40, -40, -40, -48, 95, 96, -12, 15, + -58, 15, 15, 15, -58, 15, -29, -29, 21, 6, + 2, -17, 22, -4, -6, 25, 2, 62, 78, 63, + 79, 64, 65, 66, 80, 81, 82, 83, 84, 12, + 85, 47, 48, 51, 67, 18, 68, 86, 89, 69, + 70, 71, 72, 73, 93, 94, 59, 74, 75, 95, + 96, 88, 87, 22, 7, 7, 20, -2, 25, 2, + 25, 2, 26, 26, -31, 26, 41, 57, -23, 24, + 17, -24, 30, 28, 29, 35, 36, 37, 33, 31, + 34, 32, 38, -45, 15, -45, -45, -18, -18, -19, + -18, -19, 15, 15, 15, -58, 22, 22, -58, 22, + -60, 21, 2, 22, 7, 2, -40, -58, -28, 19, + -28, 26, -28, -22, -22, 24, 17, 2, 17, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + -48, -8, 84, 83, 22, 22, -58, 22, 7, 21, + 2, 22, -4, 22, -28, 26, 26, 17, -24, -27, + 57, -28, -32, -32, -32, -29, -25, 14, -25, -27, + -25, -27, -11, 99, 100, 101, 102, 22, -48, -45, + -45, 7, -58, -28, -28, -28, -26, -32, 22, -58, + 22, 24, 21, 2, 22, 21, -32, } var yyDef = [...]int16{ - 0, -2, 151, 151, 0, 0, 7, 6, 1, 151, - 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, - 146, 147, 0, 2, -2, 3, 4, 8, 9, 10, + 0, -2, 149, 149, 0, 0, 7, 6, 1, 149, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 0, 126, 262, 263, 0, 273, 0, 100, - 101, 144, 145, 0, 300, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, -2, -2, -2, -2, 256, - 257, 0, 5, 115, 0, 150, 153, 0, 157, 161, - 274, 162, 166, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 0, 84, 85, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 25, 26, 0, 0, 0, 74, 0, - 22, 98, -2, 99, 0, 0, 0, 0, 104, 106, - 0, 110, 114, 148, 0, 154, 0, 160, 0, 165, - 0, 47, 56, 52, 53, 49, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 82, 83, 277, 0, 0, 0, 0, - 286, 287, 288, 0, 86, 0, 88, 268, 269, 89, - 90, 264, 265, 0, 0, 0, 97, 81, 289, 0, - 0, 0, 291, 292, 293, 294, 295, 296, 23, 24, - 27, 0, 65, 28, 0, 76, 78, 80, 301, 297, - 298, 0, 102, 0, 107, 0, 113, 258, 259, 260, - 261, 0, 149, 152, 155, 158, 156, 159, 164, 167, - 169, 172, 176, 177, 178, 0, 29, 0, 0, 0, - 0, 0, -2, -2, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, - 46, 278, 0, 0, 0, 0, 290, 0, 0, 0, - 0, 0, 266, 267, 91, 0, 96, 0, 64, 67, - 69, 70, 71, 220, 221, 222, 223, 224, 225, 226, - 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, - 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, - 247, 248, 249, 250, 251, 252, 253, 254, 255, 75, - 79, 0, 103, 105, 108, 112, 109, 111, 0, 0, - 0, 0, 0, 0, 0, 0, 182, 184, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 57, - 0, 58, 59, 50, 51, 54, 276, 55, 0, 0, - 0, 0, 279, 280, 0, 87, 0, 93, 95, 62, - 0, 68, 77, 0, 168, 270, 170, 0, 173, 0, - 0, 0, 180, 185, 181, 0, 0, 0, 0, 0, + 21, 22, 0, 124, 260, 261, 0, 271, 0, 98, + 99, 142, 143, 0, 298, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, 254, + 255, 0, 5, 113, 0, 148, 151, 0, 155, 159, + 272, 160, 164, 46, 46, 46, 46, 46, 46, 46, + 46, 46, 46, 46, 46, 46, 46, 46, 46, 0, + 82, 83, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 25, 26, 0, 0, 0, 72, 0, 22, 96, + -2, 97, 0, 0, 0, 0, 102, 104, 0, 108, + 112, 146, 0, 152, 0, 158, 0, 163, 0, 45, + 54, 50, 51, 47, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 80, + 81, 275, 0, 0, 0, 0, 284, 285, 286, 0, + 84, 0, 86, 266, 267, 87, 88, 262, 263, 0, + 0, 0, 95, 79, 287, 0, 0, 0, 289, 290, + 291, 292, 293, 294, 23, 24, 27, 0, 63, 28, + 0, 74, 76, 78, 299, 295, 296, 0, 100, 0, + 105, 0, 111, 256, 257, 258, 259, 0, 147, 150, + 153, 156, 154, 157, 162, 165, 167, 170, 174, 175, + 176, 0, 29, 0, 0, 0, 0, 0, -2, -2, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 276, 0, 0, 0, 0, + 288, 0, 0, 0, 0, 0, 264, 265, 89, 0, + 94, 0, 62, 65, 67, 68, 69, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 73, 77, 0, 101, 103, 106, 110, + 107, 109, 0, 0, 0, 0, 0, 0, 0, 0, + 180, 182, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 55, 0, 56, 57, 48, 49, 52, + 274, 53, 0, 0, 0, 0, 277, 278, 0, 85, + 0, 91, 93, 60, 0, 66, 75, 0, 166, 268, + 168, 0, 171, 0, 0, 0, 178, 183, 179, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 281, 282, 0, 285, 0, 92, 94, 63, 66, 299, - 171, 0, 0, 179, 183, 186, 0, 272, 187, 188, - 189, 190, 191, 0, 192, 193, 194, 195, 196, 202, - 203, 204, 205, 72, 0, 60, 61, 0, 0, 174, - 175, 271, 0, 200, 73, 0, 283, 0, 198, 201, - 284, 197, 199, + 0, 0, 0, 0, 279, 280, 0, 283, 0, 90, + 92, 61, 64, 297, 169, 0, 0, 177, 181, 184, + 0, 270, 185, 186, 187, 188, 189, 0, 190, 191, + 192, 193, 194, 200, 201, 202, 203, 70, 0, 58, + 59, 0, 0, 172, 173, 269, 0, 198, 71, 0, + 281, 0, 196, 199, 282, 195, 197, } var yyTok1 = [...]int8{ @@ -822,7 +811,7 @@ var yyTok2 = [...]int8{ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 102, 103, 104, 105, 106, 107, 108, 109, } var yyTok3 = [...]int8{ @@ -1305,24 +1294,14 @@ yydefault: { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } - case 45: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) - } case 46: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) - } - case 48: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, } } - case 49: + case 47: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &BinaryExpr{ @@ -1330,34 +1309,34 @@ yydefault: ReturnBool: true, } } - case 50: + case 48: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } - case 51: + case 49: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings yyVAL.node.(*BinaryExpr).VectorMatching.On = true } - case 54: + case 52: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 55: + case 53: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 57: + case 55: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node @@ -1365,21 +1344,21 @@ yydefault: yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill } - case 58: + case 56: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node fill := yyDollar[3].node.(*NumberLiteral).Val yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill } - case 59: + case 57: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[1].node fill := yyDollar[3].node.(*NumberLiteral).Val yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill } - case 60: + case 58: yyDollar = yyS[yypt-5 : yypt+1] { yyVAL.node = yyDollar[1].node @@ -1388,7 +1367,7 @@ yydefault: yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right } - case 61: + case 59: yyDollar = yyS[yypt-5 : yypt+1] { fill_right := yyDollar[3].node.(*NumberLiteral).Val @@ -1396,44 +1375,44 @@ yydefault: yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.LHS = &fill_left yyVAL.node.(*BinaryExpr).VectorMatching.FillValues.RHS = &fill_right } - case 62: + case 60: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 63: + case 61: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.strings = yyDollar[2].strings } - case 64: + case 62: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.strings = []string{} } - case 65: + case 63: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } - case 66: + case 64: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } - case 67: + case 65: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.strings = []string{yyDollar[1].item.Val} } - case 68: + case 66: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } - case 69: + case 67: yyDollar = yyS[yypt-1 : yypt+1] { if !model.UTF8Validation.IsValidLabelName(yyDollar[1].item.Val) { @@ -1441,7 +1420,7 @@ yydefault: } yyVAL.item = yyDollar[1].item } - case 70: + case 68: yyDollar = yyS[yypt-1 : yypt+1] { unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) @@ -1452,18 +1431,18 @@ yydefault: yyVAL.item.Pos++ yyVAL.item.Val = unquoted } - case 71: + case 69: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 72: + case 70: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node.(*NumberLiteral) } - case 73: + case 71: yyDollar = yyS[yypt-4 : yypt+1] { nl := yyDollar[3].node.(*NumberLiteral) @@ -1473,7 +1452,7 @@ yydefault: nl.PosRange.Start = yyDollar[2].item.Pos yyVAL.node = nl } - case 74: + case 72: yyDollar = yyS[yypt-2 : yypt+1] { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) @@ -1492,38 +1471,38 @@ yydefault: }, } } - case 75: + case 73: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node } - case 76: + case 74: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = Expressions{} } - case 77: + case 75: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 78: + case 76: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 79: + case 77: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 80: + case 78: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 81: + case 79: yyDollar = yyS[yypt-1 : yypt+1] { if numLit, ok := yyDollar[1].node.(*NumberLiteral); ok { @@ -1537,7 +1516,7 @@ yydefault: } yyVAL.node = yyDollar[1].node } - case 82: + case 80: yyDollar = yyS[yypt-3 : yypt+1] { if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { @@ -1548,41 +1527,41 @@ yydefault: yylex.(*parser).addOffsetExpr(yyDollar[1].node, yyDollar[3].node.(*DurationExpr)) yyVAL.node = yyDollar[1].node } - case 83: + case 81: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("offset", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } - case 84: + case 82: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).setAnchored(yyDollar[1].node) } - case 85: + case 83: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).setSmoothed(yyDollar[1].node) } - case 86: + case 84: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } - case 87: + case 85: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } - case 88: + case 86: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } - case 91: + case 89: yyDollar = yyS[yypt-4 : yypt+1] { var errMsg string @@ -1612,7 +1591,7 @@ yydefault: EndPos: yylex.(*parser).lastClosing, } } - case 92: + case 90: yyDollar = yyS[yypt-6 : yypt+1] { var rangeNl time.Duration @@ -1634,7 +1613,7 @@ yydefault: EndPos: yyDollar[6].item.Pos + 1, } } - case 93: + case 91: yyDollar = yyS[yypt-5 : yypt+1] { var rangeNl time.Duration @@ -1649,31 +1628,31 @@ yydefault: EndPos: yyDollar[5].item.Pos + 1, } } - case 94: + case 92: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 95: + case 93: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\"") yyVAL.node = yyDollar[1].node } - case 96: + case 94: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 97: + case 95: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()") yyVAL.node = yyDollar[1].node } - case 98: + case 96: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1686,7 +1665,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 99: + case 97: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1695,7 +1674,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 100: + case 98: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1706,14 +1685,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 101: + case 99: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 102: + case 100: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1721,7 +1700,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 103: + case 101: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1729,7 +1708,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 104: + case 102: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1737,7 +1716,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 105: + case 103: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1746,144 +1725,144 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 106: + case 104: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 107: + case 105: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 108: + case 106: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } - case 109: + case 107: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } - case 110: + case 108: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) } - case 111: + case 109: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 112: + case 110: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 113: + case 111: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 114: + case 112: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 115: + case 113: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 116: + case 114: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 148: + case 146: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 149: + case 147: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } - case 150: + case 148: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 151: + case 149: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 152: + case 150: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 153: + case 151: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 154: + case 152: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 155: + case 153: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 156: + case 154: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 157: + case 155: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} } - case 158: + case 156: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 159: + case 157: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 160: + case 158: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 161: + case 159: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 162: + case 160: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1891,33 +1870,33 @@ yydefault: values: yyDollar[2].series, } } - case 163: + case 161: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 164: + case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 165: + case 163: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 166: + case 164: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 167: + case 165: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 168: + case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1925,12 +1904,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 169: + case 167: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 170: + case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1939,7 +1918,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 171: + case 169: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1949,12 +1928,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 172: + case 170: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{yylex.(*parser).newHistogramSequenceValue(yyDollar[1].histogram)} } - case 173: + case 171: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1965,7 +1944,7 @@ yydefault: //$1 += $2 } } - case 174: + case 172: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1974,7 +1953,7 @@ yydefault: } yyVAL.series = val } - case 175: + case 173: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1983,7 +1962,7 @@ yydefault: } yyVAL.series = val } - case 176: + case 174: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1991,130 +1970,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 179: + case 177: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 180: + case 178: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 181: + case 179: yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 182: + case 180: yyDollar = yyS[yypt-2 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } - case 183: + case 181: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 184: + case 182: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 185: + case 183: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 186: + case 184: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["schema"] = yyDollar[3].int } - case 187: + case 185: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["sum"] = yyDollar[3].float } - case 188: + case 186: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["count"] = yyDollar[3].float } - case 189: + case 187: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket"] = yyDollar[3].float } - case 190: + case 188: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } - case 191: + case 189: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } - case 192: + case 190: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } - case 193: + case 191: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["offset"] = yyDollar[3].int } - case 194: + case 192: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } - case 195: + case 193: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["n_offset"] = yyDollar[3].int } - case 196: + case 194: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } - case 197: + case 195: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 198: + case 196: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } - case 199: + case 197: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) } - case 200: + case 198: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 262: + case 260: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -2122,7 +2101,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 263: + case 261: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2137,12 +2116,12 @@ yydefault: Duration: true, } } - case 264: + case 262: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 265: + case 263: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2153,17 +2132,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 266: + case 264: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 267: + case 265: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 270: + case 268: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -2172,17 +2151,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 271: + case 269: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 272: + case 270: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 273: + case 271: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -2190,7 +2169,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 274: + case 272: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -2199,12 +2178,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 275: + case 273: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil } - case 277: + case 275: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2215,7 +2194,7 @@ yydefault: } yyVAL.node = nl } - case 278: + case 276: yyDollar = yyS[yypt-2 : yypt+1] { nl := yyDollar[2].node.(*NumberLiteral) @@ -2230,7 +2209,7 @@ yydefault: nl.PosRange.Start = yyDollar[1].item.Pos yyVAL.node = nl } - case 279: + case 277: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2239,7 +2218,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 280: + case 278: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2248,7 +2227,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 281: + case 279: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2261,7 +2240,7 @@ yydefault: StartPos: yyDollar[1].item.Pos, } } - case 282: + case 280: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2274,7 +2253,7 @@ yydefault: StartPos: yyDollar[1].item.Pos, } } - case 283: + case 281: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2285,7 +2264,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 284: + case 282: yyDollar = yyS[yypt-7 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2301,7 +2280,7 @@ yydefault: }, } } - case 285: + case 283: yyDollar = yyS[yypt-4 : yypt+1] { de := yyDollar[3].node.(*DurationExpr) @@ -2316,7 +2295,7 @@ yydefault: } yyVAL.node = yyDollar[3].node } - case 289: + case 287: yyDollar = yyS[yypt-1 : yypt+1] { nl := yyDollar[1].node.(*NumberLiteral) @@ -2327,7 +2306,7 @@ yydefault: } yyVAL.node = nl } - case 290: + case 288: yyDollar = yyS[yypt-2 : yypt+1] { switch expr := yyDollar[2].node.(type) { @@ -2360,25 +2339,25 @@ yydefault: break } } - case 291: + case 289: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 292: + case 290: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 293: + case 291: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 294: + case 292: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2389,7 +2368,7 @@ yydefault: } yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 295: + case 293: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) @@ -2400,13 +2379,13 @@ yydefault: } yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 296: + case 294: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr)) yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)} } - case 297: + case 295: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2415,7 +2394,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 298: + case 296: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2424,7 +2403,7 @@ yydefault: EndPos: yyDollar[3].item.PositionRange().End, } } - case 299: + case 297: yyDollar = yyS[yypt-6 : yypt+1] { yyVAL.node = &DurationExpr{ @@ -2435,7 +2414,7 @@ yydefault: RHS: yyDollar[5].node.(Expr), } } - case 301: + case 299: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr)) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index 8aa9e9dcbeb..71499857678 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -189,23 +189,21 @@ var ItemTypeStr = map[ItemType]string{ TIMES: "x", SPACE: "", - SUB: "-", - ADD: "+", - MUL: "*", - MOD: "%", - DIV: "/", - EQLC: "==", - NEQ: "!=", - LTE: "<=", - LSS: "<", - GTE: ">=", - GTR: ">", - TRIM_UPPER: "/", - EQL_REGEX: "=~", - NEQ_REGEX: "!~", - POW: "^", - AT: "@", + SUB: "-", + ADD: "+", + MUL: "*", + MOD: "%", + DIV: "/", + EQLC: "==", + NEQ: "!=", + LTE: "<=", + LSS: "<", + GTE: ">=", + GTR: ">", + EQL_REGEX: "=~", + NEQ_REGEX: "!~", + POW: "^", + AT: "@", } func init() { @@ -448,9 +446,6 @@ func lexStatements(l *Lexer) stateFn { if t := l.peek(); t == '=' { l.next() l.emit(LTE) - } else if t := l.peek(); t == '/' { - l.next() - l.emit(TRIM_UPPER) } else { l.emit(LSS) } @@ -458,9 +453,6 @@ func lexStatements(l *Lexer) stateFn { if t := l.peek(); t == '=' { l.next() l.emit(GTE) - } else if t := l.peek(); t == '/' { - l.next() - l.emit(TRIM_LOWER) } else { l.emit(GTR) } diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 92540281e86..40789b295a6 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -2020,356 +2020,3 @@ eval instant at 1m irate(nhcb_add_bucket[2m]) * 60 expect no_warn expect no_info {} {{schema:-53 sum:56 count:15 custom_values:[2 3 4 6] buckets:[1 0 1 5 8] counter_reset_hint:gauge}} - - -# Test native histogram with trim operators ("/": TRIM_LOWER) -load 1m - h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} - -eval instant at 1m h_test >/ -Inf - h_test {{schema:0 sum:123.75 count:34 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1 2]}} - -eval instant at 1m h_test / +Inf - h_test {{schema:0 z_bucket_w:0.001}} - -eval instant at 1m h_test / 0 - h_test {{schema:0 sum:120.20840280171308 count:30.5 z_bucket:0.5 z_bucket_w:0.001 buckets:[2 4 8 16]}} - -eval instant at 1m h_test / 1.4142135624 - h_test {{count:26 sum:116.50067065070982 z_bucket_w:0.001 buckets:[0 2 8 16]}} - - -load 1m - h_test_2 {{schema:2 sum:12.8286080906 count:28 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 3 1]}} - -eval instant at 1m h_test_2 / 1.13 - h_test_2 {{schema:2 count:14.589417818876296 sum:22.168126492693734 z_bucket_w:0.001 offset:1 buckets:[0.589417818876296 4 7 3]}} - -eval instant at 1m h_test_2 >/ -1.3 - h_test_2 {{schema:2 count:25.54213947904476 sum:16.29588491217537 z_bucket:1 z_bucket_w:0.001 buckets:[1 2 4 7 3] n_buckets:[1 5 1.54213947904476]}} - -eval instant at 1m h_test_2 / 2 - h_test{} {{count:24 sum:113.13708498984761 z_bucket_w:0.001 offset:2 buckets:[8 16]}} - -eval instant at 1m h_test >/ -1 - h_test{} {{count:32 sum:119.50104602052653 z_bucket:1 z_bucket_w:0.001 buckets:[2 4 8 16] n_buckets:[1]}} - -eval instant at 1m h_test / 0.5 - h_positive_buckets {{schema:0 count:10 sum:7.0710678118654755 z_bucket:0 z_bucket_w:0.5 buckets:[10]}} - -eval instant at 1m h_positive_buckets >/ 0.1 - h_positive_buckets {{schema:0 count:11.6 sum:7.551067811865476 z_bucket:1.6 z_bucket_w:0.5 buckets:[10]}} - -eval instant at 1m h_positive_buckets >/ 0 - h_positive_buckets {{schema:0 sum:8.0210678118654755 count:12 z_bucket:2 z_bucket_w:0.5 buckets:[10]}} - -eval instant at 1m h_positive_buckets / -0.5 - h_negative_buckets {{schema:0 count:2 sum:-0.5 z_bucket:2 z_bucket_w:0.5}} - -eval instant at 1m h_negative_buckets >/ -0.1 - h_negative_buckets {{schema:0 count:0.4 sum:-0.02 z_bucket:0.4 z_bucket_w:0.5}} - -eval instant at 1m h_negative_buckets >/ 0 - h_negative_buckets {{schema:0 z_bucket_w:0.5}} - - -# Exponential buckets: trim zero bucket when there are no other buckets. -load 1m - zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} - -eval instant at 1m zero_bucket_only >/ 0.1 - zero_bucket_only {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.1 }} - -eval instant at 1m zero_bucket_only / 0.05 - zero_bucket_only {{schema:0 count:1.25 sum:0.09375 z_bucket:1.25 z_bucket_w:0.1 }} - -eval instant at 1m zero_bucket_only / 0 - zero_bucket_only {{schema:0 count:2.5 sum:0.125 z_bucket:2.5 z_bucket_w:0.1 }} - -eval instant at 1m zero_bucket_only / -0.05 - zero_bucket_only {{schema:0 count:3.75 sum:0.09375 z_bucket:3.75 z_bucket_w:0.1 }} - -eval instant at 1m zero_bucket_only / -0.1 - zero_bucket_only {{schema:0 count:5 sum:0 z_bucket:5 z_bucket_w:0.1 }} - - -load 1m - cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} - -# Custom buckets: trim on bucket boundary without interpolation -eval instant at 1m cbh / 15 - cbh{} {{schema:-53 count:4 sum:72.5 custom_values:[5 10 15 20] offset:3 buckets:[3 1]}} - -# Custom buckets: trim uses linear interpolation if cutoff is inside a bucket -eval instant at 1m cbh / 13 - cbh{} {{schema:-53 count:5.6 sum:94.9 custom_values:[5 10 15 20] offset:2 buckets:[1.6 3 1]}} - -eval instant at 1m cbh / +Inf - cbh{} {{schema:-53 custom_values:[5 10 15 20]}} - -eval instant at 1m cbh / -Inf - cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} - -# Noop -eval instant at 1m cbh >/ 0 - cbh {{schema:-53 sum:172.5 count:15 custom_values:[5 10 15 20] buckets:[1 6 4 3 1]}} - -eval instant at 1m cbh / 0 - zero_bucket{} {{count:7.5 sum:5.669354249492381 z_bucket:2.5 z_bucket_w:0.01 buckets:[2 3]}} - - -load 1m - cbh_one_bucket {{schema:-53 sum:100.0 count:100 buckets:[100]}} - -# Skip [-Inf; +Inf] bucket (100). -eval instant at 1m cbh_one_bucket / 10.0 - cbh_one_bucket {{schema:-53 sum:0.0 count:0 buckets:[0]}} - -# Keep [-Inf; +Inf] bucket (100). -eval instant at 1m cbh_one_bucket / +Inf - cbh_one_bucket {{schema:-53 sum:0 count:0 buckets:[0]}} - -# Keep [-Inf; +Inf] bucket (100). -eval instant at 1m cbh_one_bucket >/ -Inf - cbh_one_bucket {{schema:-53 sum:100 count:100 buckets:[100]}} - -# Skip [-Inf; +Inf] bucket (100). -eval instant at 1m cbh_one_bucket / -10.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} - -# Skip [-Inf, 0] bucket (1). -eval instant at 1m cbh_two_buckets_split_at_zero >/ 0.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:0.0 count:100 custom_values:[0] buckets:[0 100]}} - -# Skip first bucket. -eval instant at 1m cbh_two_buckets_split_at_zero >/ 10.0 - cbh_two_buckets_split_at_zero {{schema:-53 sum:1000.0 count:100 custom_values:[0] buckets:[0 100]}} - - -load 1m - cbh_two_buckets_split_at_positive {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} - -# Skip (5, +Inf] bucket (100). -eval instant at 1m cbh_two_buckets_split_at_positive / -10.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:500.0 count:100 custom_values:[5] buckets:[0 100]}} - -# Noop. -eval instant at 1m cbh_two_buckets_split_at_positive >/ 0.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:33.0 count:101 custom_values:[5] buckets:[1 100]}} - -# Keep (5, +Inf] bucket (100) and 3/5 of [0, 5] bucket (0.6 * 3.5). -eval instant at 1m cbh_two_buckets_split_at_positive >/ 2.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:502.1 count:100.6 custom_values:[5] buckets:[0.6 100]}} - -# Skip first bucket. -eval instant at 1m cbh_two_buckets_split_at_positive >/ 10.0 - cbh_two_buckets_split_at_positive {{schema:-53 sum:1000.0 count:100 custom_values:[5] buckets:[0 100]}} - - -load 1m - cbh_two_buckets_split_at_negative {{schema:-53 sum:33 count:101 custom_values:[-5] buckets:[1 100]}} - -# Skip (-5, +Inf] bucket (100). -eval instant at 1m cbh_two_buckets_split_at_negative / -10.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:-500 count:100 custom_values:[-5] buckets:[0 100]}} - -# Skip [-Inf, -5] bucket (1). -eval instant at 1m cbh_two_buckets_split_at_negative >/ -2.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:-200 count:100 custom_values:[-5] buckets:[0 100]}} - -# Skip [-Inf, -5] bucket (1). -eval instant at 1m cbh_two_buckets_split_at_negative >/ 0.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:0.0 count:100 custom_values:[-5] buckets:[0 100]}} - -# Skip [-Inf, -5] bucket (1). -eval instant at 1m cbh_two_buckets_split_at_negative >/ 10.0 - cbh_two_buckets_split_at_negative {{schema:-53 sum:1000.0 count:100 custom_values:[-5] buckets:[0 100]}} - - -load 1m - cbh_for_join{label="a"} {{schema:-53 sum:33 count:101 custom_values:[5] buckets:[1 100]}} - cbh_for_join{label="b"} {{schema:-53 sum:66 count:202 custom_values:[5] buckets:[2 200]}} - cbh_for_join{label="c"} {{schema:-53 sum:99 count:303 custom_values:[5] buckets:[3 300]}} - float_for_join{label="a"} 1 - float_for_join{label="b"} 4 - -eval instant at 1m cbh_for_join >/ on (label) float_for_join - {label="a"} {{schema:-53 count:100.8 sum:502.4 custom_values:[5] buckets:[0.8 100]}} - {label="b"} {{schema:-53 count:200.4 sum:1001.8 custom_values:[5] buckets:[0.4 200]}} - - -clear diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index b4d69078155..d5a9ba72b41 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -105,7 +105,6 @@ type scrapePool struct { activeTargets map[uint64]*Target droppedTargets []*Target // Subject to KeepDroppedTargets limit. droppedTargetsCount int // Count of all dropped targets. - scrapeFailureLogger FailureLogger // newLoop injection for testing purposes. injectTestNewLoop func(scrapeLoopOptions) loop @@ -113,6 +112,9 @@ type scrapePool struct { metrics *scrapeMetrics buffers *pool.Pool offsetSeed uint64 + + scrapeFailureLogger FailureLogger + scrapeFailureLoggerMtx sync.RWMutex } type labelLimits struct { @@ -222,18 +224,26 @@ func (sp *scrapePool) DroppedTargetsCount() int { } func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) { - sp.targetMtx.Lock() - defer sp.targetMtx.Unlock() + sp.scrapeFailureLoggerMtx.Lock() + defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger) } sp.scrapeFailureLogger = l + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() for _, s := range sp.loops { s.setScrapeFailureLogger(sp.scrapeFailureLogger) } } +func (sp *scrapePool) getScrapeFailureLogger() FailureLogger { + sp.scrapeFailureLoggerMtx.RLock() + defer sp.scrapeFailureLoggerMtx.RUnlock() + return sp.scrapeFailureLogger +} + // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { sp.mtx.Lock() @@ -313,7 +323,6 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() - scrapeFailureLogger := sp.scrapeFailureLogger for fp, oldLoop := range sp.loops { var cache *scrapeCache if oc := oldLoop.getCache(); reuseCache && oc != nil { @@ -355,7 +364,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { wg.Done() newLoop.setForcedError(forcedErr) - newLoop.setScrapeFailureLogger(scrapeFailureLogger) + newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger()) newLoop.run(nil) }(oldLoop, newLoop) diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 21f5f715e43..2c2ca24f546 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -248,6 +248,20 @@ func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metada return ref, nil } +func (f *fanoutAppender) UpdateResource(ref SeriesRef, l labels.Labels, identifying, descriptive map[string]string, entities []EntityData, t int64) (SeriesRef, error) { + ref, err := f.primary.UpdateResource(ref, l, identifying, descriptive, entities, t) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.UpdateResource(ref, l, identifying, descriptive, entities, t); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) AppendSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64) (SeriesRef, error) { ref, err := f.primary.AppendSTZeroSample(ref, l, t, st) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/generic.go b/vendor/github.com/prometheus/prometheus/storage/generic.go index e85ac77b9c6..f6f161e6420 100644 --- a/vendor/github.com/prometheus/prometheus/storage/generic.go +++ b/vendor/github.com/prometheus/prometheus/storage/generic.go @@ -20,6 +20,7 @@ import ( "context" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -92,10 +93,46 @@ func (q *querierAdapter) Select(ctx context.Context, sortSeries bool, hints *Sel return &seriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)} } +// GetResourceAt implements ResourceQuerier by delegating to the underlying genericQuerier +// if it supports ResourceQuerier. +func (q *querierAdapter) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + if rq, ok := q.genericQuerier.(ResourceQuerier); ok { + return rq.GetResourceAt(labelsHash, timestamp) + } + return nil, false +} + +// IterUniqueAttributeNames implements ResourceQuerier by delegating to the underlying +// genericQuerier if it supports ResourceQuerier. +func (q *querierAdapter) IterUniqueAttributeNames(fn func(name string)) error { + if rq, ok := q.genericQuerier.(ResourceQuerier); ok { + return rq.IterUniqueAttributeNames(fn) + } + return nil +} + type chunkQuerierAdapter struct { genericQuerier } +// GetResourceAt implements ResourceQuerier by delegating to the underlying genericQuerier +// if it supports ResourceQuerier. +func (q *chunkQuerierAdapter) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + if rq, ok := q.genericQuerier.(ResourceQuerier); ok { + return rq.GetResourceAt(labelsHash, timestamp) + } + return nil, false +} + +// IterUniqueAttributeNames implements ResourceQuerier by delegating to the underlying +// genericQuerier if it supports ResourceQuerier. +func (q *chunkQuerierAdapter) IterUniqueAttributeNames(fn func(name string)) error { + if rq, ok := q.genericQuerier.(ResourceQuerier); ok { + return rq.IterUniqueAttributeNames(fn) + } + return nil +} + type chunkSeriesSetAdapter struct { genericSeriesSet } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index f1b77c14df4..8fd2965c306 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -200,6 +201,22 @@ type ExemplarQuerier interface { Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) } +// ResourceQuerier provides access to OTel resource attributes for series. +// This is an optional interface that queriers may implement to support +// the info() PromQL function with resource attributes. +type ResourceQuerier interface { + // GetResourceAt returns the resource version active at the given timestamp for the series. + // The labelsHash is the stable hash of the series labels (from labels.StableHash()). + // Returns nil, false if no resource is found for the series. + GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) + + // IterUniqueAttributeNames iterates over all unique resource attribute names + // across all stored resources. The function is called once for each unique + // attribute name (both identifying and descriptive attributes). + // This is used to build reverse mappings from translated names to original names. + IterUniqueAttributeNames(fn func(name string)) error +} + // SelectHints specifies hints passed for data selections. // This is used only as an option for implementation to use. type SelectHints struct { @@ -308,6 +325,7 @@ type Appender interface { ExemplarAppender HistogramAppender MetadataUpdater + ResourceUpdater StartTimestampAppender } @@ -389,6 +407,37 @@ type MetadataUpdater interface { UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) } +// EntityData represents an OTel entity with its type and attributes. +// Used for passing entity information through the storage interface. +type EntityData struct { + // Type defines the entity type (e.g., "service", "host", "container", "resource"). + Type string + // ID contains identifying attributes that uniquely identify the entity. + ID map[string]string + // Description contains descriptive (non-identifying) attributes. + Description map[string]string +} + +// ResourceUpdater provides an interface for associating OTel resources to stored series. +// A resource contains both resource-level attributes and typed entities. +type ResourceUpdater interface { + // UpdateResource updates the resource for the given series. + // The identifying map contains resource-level attributes that uniquely identify the resource + // (by default: service.name, service.namespace, service.instance.id). + // The descriptive map contains all other resource-level attributes. + // The entities slice contains typed entities (e.g., service, container, host). + // The timestamp t is used to track when this resource version was observed. + // If the resource differs from the current version, a new version is created. + // If it matches, the existing version's time range is extended. + // A series reference number is returned which can be used to modify the + // resource of the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to UpdateResource() at any point. If the series does not exist, + // UpdateResource returns an error. + // If the reference is 0 it must not be used for caching. + UpdateResource(ref SeriesRef, l labels.Labels, identifying, descriptive map[string]string, entities []EntityData, t int64) (SeriesRef, error) +} + // StartTimestampAppender provides an interface for appending ST to storage. // // WARNING(bwplotka): Switch to AppendableV2 is in progress (https://github.com/prometheus/prometheus/issues/17632). diff --git a/vendor/github.com/prometheus/prometheus/storage/interface_append.go b/vendor/github.com/prometheus/prometheus/storage/interface_append.go index 3753544eb07..bbf7ec2050f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface_append.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface_append.go @@ -36,6 +36,24 @@ type AppendableV2 interface { // NOTE: AppendOption is used already. type AOptions = AppendV2Options +// ResourceContext provides optional OTel resource attributes for a series. +// When set, the storage layer persists resource identifying/descriptive attributes +// and entity data alongside the series, enabling resource-level queries (e.g. info()). +type ResourceContext struct { + Identifying map[string]string + Descriptive map[string]string + Entities []EntityData +} + +// ScopeContext provides optional OTel InstrumentationScope data for a series. +// When set, the storage layer persists scope metadata alongside the series. +type ScopeContext struct { + Name string + Version string + SchemaURL string + Attrs map[string]string +} + // AppendV2Options provides optional, auxiliary data and configuration for AppenderV2.Append. type AppendV2Options struct { // MetricFamilyName (optional) provides metric family name for the appended sample's @@ -72,6 +90,15 @@ type AppendV2Options struct { // Duplicate exemplars errors MUST be ignored by implementations. Exemplars []exemplar.Exemplar + // Resource (optional) provides OTel resource attributes for the series. + // When set, the storage layer persists resource attributes alongside the series. + // This enables resource-level queries such as the info() PromQL function. + Resource *ResourceContext + + // Scope (optional) provides OTel InstrumentationScope data for the series. + // When set, the storage layer persists scope metadata alongside the series. + Scope *ScopeContext + // RejectOutOfOrder tells implementation that this append should not be out // of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample // error. diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 0bb44beff49..5c42de74a77 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -278,6 +279,59 @@ func (q *mergeGenericQuerier) Close() error { return errors.Join(errs...) } +// extractResourceQuerier unwraps a genericQuerier to find the underlying ResourceQuerier. +// Handles both *genericQuerierAdapter (primary queriers) and *secondaryQuerier (remote storage). +func extractResourceQuerier(gq genericQuerier) ResourceQuerier { + switch q := gq.(type) { + case *genericQuerierAdapter: + if q.q != nil { + if rq, ok := q.q.(ResourceQuerier); ok { + return rq + } + } + if q.cq != nil { + if rq, ok := q.cq.(ResourceQuerier); ok { + return rq + } + } + case *secondaryQuerier: + return extractResourceQuerier(q.genericQuerier) + } + return nil +} + +// GetResourceAt implements ResourceQuerier by trying each underlying querier. +// Returns the first non-nil result from any querier that supports ResourceQuerier. +func (q *mergeGenericQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + for _, gq := range q.queriers { + if rq := extractResourceQuerier(gq); rq != nil { + if rv, found := rq.GetResourceAt(labelsHash, timestamp); found { + return rv, true + } + } + } + return nil, false +} + +// IterUniqueAttributeNames implements ResourceQuerier by iterating unique attribute +// names across all underlying queriers that support ResourceQuerier. +func (q *mergeGenericQuerier) IterUniqueAttributeNames(fn func(name string)) error { + seen := make(map[string]struct{}) + for _, gq := range q.queriers { + if rq := extractResourceQuerier(gq); rq != nil { + if err := rq.IterUniqueAttributeNames(func(name string) { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + }); err != nil { + return err + } + } + } + return nil +} + func truncateToLimit(s []string, hints *LabelHints) []string { if hints != nil && hints.Limit > 0 && len(s) > hints.Limit { s = s[:hints.Limit] diff --git a/vendor/github.com/prometheus/prometheus/storage/noop.go b/vendor/github.com/prometheus/prometheus/storage/noop.go index 751e6304dbe..39fbb741590 100644 --- a/vendor/github.com/prometheus/prometheus/storage/noop.go +++ b/vendor/github.com/prometheus/prometheus/storage/noop.go @@ -17,9 +17,15 @@ import ( "context" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) +var ( + _ ResourceQuerier = noopQuerier{} + _ ResourceQuerier = noopChunkQuerier{} +) + type noopQuerier struct{} // NoopQuerier is a Querier that does nothing. @@ -43,6 +49,14 @@ func (noopQuerier) Close() error { return nil } +func (noopQuerier) GetResourceAt(uint64, int64) (*seriesmetadata.ResourceVersion, bool) { + return nil, false +} + +func (noopQuerier) IterUniqueAttributeNames(func(string)) error { + return nil +} + type noopChunkQuerier struct{} // NoopChunkedQuerier is a ChunkQuerier that does nothing. @@ -66,6 +80,14 @@ func (noopChunkQuerier) Close() error { return nil } +func (noopChunkQuerier) GetResourceAt(uint64, int64) (*seriesmetadata.ResourceVersion, bool) { + return nil, false +} + +func (noopChunkQuerier) IterUniqueAttributeNames(func(string)) error { + return nil +} + type noopSeriesSet struct{} // NoopSeriesSet is a SeriesSet that does nothing. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 1d321218e78..c17069f3047 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -271,6 +271,9 @@ func (c *PrometheusConverter) addHistogramDataPoints( var cumulativeCount uint64 // Process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1. + // Note: appOpts.Exemplars is resliced per bucket below. This is safe because appOpts is + // passed by value to Append(). The shared Resource pointer is also safe because TSDB + // deep-copies it via NewVersionedResource/AddOrExtend. for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -528,6 +531,8 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } + // Resource is intentionally nil: target_info IS the resource representation, + // so attaching resource attributes to it would be redundant/circular. appOpts := storage.AOptions{ Metadata: metadata.Metadata{ Type: model.MetricTypeGauge, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 600282af6fa..6332e226cd3 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -26,12 +26,14 @@ import ( "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/xpdata/entity" semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -95,6 +97,15 @@ type PrometheusConverter struct { scopeLabels *cachedScopeLabels labelNamer otlptranslator.LabelNamer + // resourceCtx holds the current resource context (attributes + entities) for the + // current ResourceMetrics boundary. Set once per resource via setResourceContext, + // then passed through AppendV2Options.Resource to the storage layer. + resourceCtx *storage.ResourceContext + + // scopeCtx holds the current scope context for the current ScopeMetrics boundary. + // Set once per scope via buildScopeContext, then passed through AppendV2Options.Scope. + scopeCtx *storage.ScopeContext + // sanitizedLabels caches the results of label name sanitization within a request. // This avoids repeated string allocations for the same label names. sanitizedLabels map[string]string @@ -191,6 +202,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = errors.Join(errs, err) continue } + c.buildResourceContext(resource) // keep track of the earliest and latest timestamp in the ResourceMetrics for // use with the "target" info metric @@ -203,6 +215,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = errors.Join(errs, err) continue } + c.buildScopeContext(scopeMetrics) metricSlice := scopeMetrics.Metrics() @@ -245,6 +258,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric Help: metric.Description(), }, MetricFamilyName: promName, + Resource: c.resourceCtx, + Scope: c.scopeCtx, } // handle individual metrics based on type @@ -478,4 +493,111 @@ func (c *PrometheusConverter) setScopeContext(scope scope, settings Settings) er func (c *PrometheusConverter) clearResourceContext() { c.resourceLabels = nil c.scopeLabels = nil + c.resourceCtx = nil + c.scopeCtx = nil +} + +// buildResourceContext builds a ResourceContext from the given OTLP resource. +// The context is cached on the converter and reused for all datapoints within the same ResourceMetrics. +func (c *PrometheusConverter) buildResourceContext(resource pcommon.Resource) { + attrs := resourceAttrsToMap(resource.Attributes()) + if len(attrs) == 0 { + c.resourceCtx = nil + return + } + + identifying, descriptive := seriesmetadata.SplitAttributes(attrs) + entities := extractEntities(resource, attrs) + + c.resourceCtx = &storage.ResourceContext{ + Identifying: identifying, + Descriptive: descriptive, + Entities: entities, + } +} + +// buildScopeContext builds a ScopeContext from the given OTLP ScopeMetrics. +// The context is cached on the converter and reused for all datapoints within the same ScopeMetrics. +func (c *PrometheusConverter) buildScopeContext(scopeMetrics pmetric.ScopeMetrics) { + ils := scopeMetrics.Scope() + name := ils.Name() + version := ils.Version() + schemaURL := scopeMetrics.SchemaUrl() + + attrs := make(map[string]string, ils.Attributes().Len()) + ils.Attributes().Range(func(k string, v pcommon.Value) bool { + attrs[k] = v.AsString() + return true + }) + + if name == "" && version == "" && schemaURL == "" && len(attrs) == 0 { + c.scopeCtx = nil + return + } + + c.scopeCtx = &storage.ScopeContext{ + Name: name, + Version: version, + SchemaURL: schemaURL, + Attrs: attrs, + } +} + +// resourceAttrsToMap converts OTel resource attributes to a map[string]string. +func resourceAttrsToMap(attrs pcommon.Map) map[string]string { + if attrs.Len() == 0 { + return nil + } + result := make(map[string]string, attrs.Len()) + attrs.Range(func(key string, value pcommon.Value) bool { + result[key] = value.AsString() + return true + }) + return result +} + +// extractEntities extracts entities from OTLP entity_refs. +// Returns nil if no entity_refs are present. +func extractEntities(resource pcommon.Resource, attrs map[string]string) []storage.EntityData { + entityRefs := entity.ResourceEntityRefs(resource) + + if entityRefs.Len() == 0 { + return nil + } + + entities := make([]storage.EntityData, 0, entityRefs.Len()) + for i := 0; i < entityRefs.Len(); i++ { + ref := entityRefs.At(i) + entityType := ref.Type() + if entityType == "" { + entityType = seriesmetadata.EntityTypeResource + } + + // Extract identifying attributes by looking up the id_keys in the attributes map + idKeys := ref.IdKeys() + id := make(map[string]string, idKeys.Len()) + for j := 0; j < idKeys.Len(); j++ { + key := idKeys.At(j) + if val, ok := attrs[key]; ok { + id[key] = val + } + } + + // Extract descriptive attributes by looking up the description_keys in the attributes map + descKeys := ref.DescriptionKeys() + desc := make(map[string]string, descKeys.Len()) + for j := 0; j < descKeys.Len(); j++ { + key := descKeys.At(j) + if val, ok := attrs[key]; ok { + desc[key] = val + } + } + + entities = append(entities, storage.EntityData{ + Type: entityType, + ID: id, + Description: desc, + }) + } + return entities } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 6a336dc06b3..9fe90314159 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -362,6 +362,10 @@ func (*timestampTracker) UpdateMetadata(storage.SeriesRef, labels.Labels, metada return 0, nil } +func (*timestampTracker) UpdateResource(storage.SeriesRef, labels.Labels, map[string]string, map[string]string, []storage.EntityData, int64) (storage.SeriesRef, error) { + return 0, nil +} + // Commit implements storage.Appender. func (t *baseTimestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_otlp_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_otlp_handler.go index 6cb4a0fff0f..84de6a250e4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_otlp_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_otlp_handler.go @@ -127,6 +127,7 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er EnableTypeAndUnitLabels: rw.enableTypeAndUnitLabels, LabelNameUnderscoreSanitization: otlpCfg.LabelNameUnderscoreSanitization, LabelNamePreserveMultipleUnderscores: otlpCfg.LabelNamePreserveMultipleUnderscores, + DisableTargetInfo: otlpCfg.DisableTargetInfo, }) defer func() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 2ed47a53088..9ffab6b03b1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -163,6 +164,9 @@ type BlockReader interface { // Tombstones returns a tombstones.Reader over the block's deleted data. Tombstones() (tombstones.Reader, error) + // SeriesMetadata returns a seriesmetadata.Reader over the block's series metadata. + SeriesMetadata() (seriesmetadata.Reader, error) + // Meta provides meta information about the block reader. Meta() BlockMeta @@ -191,6 +195,26 @@ type BlockMeta struct { // OutOfOrder is true if the block was directly created from out-of-order samples. OutOfOrder bool `json:"out_of_order"` + + // SeriesMetadata contains optional stats about the OTel series metadata + // Parquet file. Populated during compaction when native metadata is enabled. + // Nil when no metadata is present. + SeriesMetadata *BlockSeriesMetadata `json:"seriesMetadata,omitempty"` +} + +// BlockSeriesMetadata holds metadata stats for a block's Parquet sidecar file. +// This enables Mimir store-gateway to pre-plan queries without opening the Parquet file. +type BlockSeriesMetadata struct { + // Enabled indicates that the block contains series metadata. + Enabled bool `json:"enabled"` + + // NamespaceRowCounts maps namespace names (e.g. "resource_table", "scope_mapping") + // to the number of rows in that namespace. + NamespaceRowCounts map[string]uint64 `json:"namespaceRowCounts,omitempty"` + + // IndexedResourceAttrs lists the resource attribute names that are + // included in the inverted index (resource_attr_index namespace). + IndexedResourceAttrs []string `json:"indexedResourceAttrs,omitempty"` } // BlockStats contains stats about contents of a block. @@ -304,7 +328,7 @@ func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, err jsonMeta, err := json.MarshalIndent(meta, "", "\t") if err != nil { - return 0, errors.Join(err, f.Close()) + return 0, err } n, err := f.Write(jsonMeta) @@ -339,12 +363,19 @@ type Block struct { indexr IndexReader tombstones tombstones.Reader + // Series metadata is lazily loaded on first access via sync.Once. + // This avoids the cost of resolving all seriesRef → labelsHash at block open time. + seriesMetadataOnce sync.Once + seriesMetadata seriesmetadata.Reader + seriesMetadataErr error + logger *slog.Logger - numBytesChunks int64 - numBytesIndex int64 - numBytesTombstone int64 - numBytesMeta int64 + numBytesChunks int64 + numBytesIndex int64 + numBytesTombstone int64 + numBytesMeta int64 + numBytesSeriesMetadata int64 } // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used @@ -396,18 +427,27 @@ func OpenBlockWithOptions(logger *slog.Logger, dir string, pool chunkenc.Pool, p } closers = append(closers, tr) + // Series metadata (Parquet) is loaded lazily on first SeriesMetadata() call + // to avoid the cost of resolving all seriesRef → labelsHash at block open time. + // We stat the file now (cheap) so that Size() is correct before lazy load. + var sizeSeriesMeta int64 + if fi, statErr := os.Stat(filepath.Join(dir, seriesmetadata.SeriesMetadataFilename)); statErr == nil { + sizeSeriesMeta = fi.Size() + } + pb = &Block{ - dir: dir, - meta: *meta, - chunkr: cr, - indexr: ir, - tombstones: tr, - symbolTableSize: ir.SymbolTableSize(), - logger: logger, - numBytesChunks: cr.Size(), - numBytesIndex: ir.Size(), - numBytesTombstone: sizeTomb, - numBytesMeta: sizeMeta, + dir: dir, + meta: *meta, + chunkr: cr, + indexr: ir, + tombstones: tr, + symbolTableSize: ir.SymbolTableSize(), + logger: logger, + numBytesChunks: cr.Size(), + numBytesIndex: ir.Size(), + numBytesTombstone: sizeTomb, + numBytesMeta: sizeMeta, + numBytesSeriesMetadata: sizeSeriesMeta, } return pb, nil @@ -421,10 +461,16 @@ func (pb *Block) Close() error { pb.pendingReaders.Wait() + var metaCloseErr error + if pb.seriesMetadata != nil { + metaCloseErr = pb.seriesMetadata.Close() + } + return errors.Join( pb.chunkr.Close(), pb.indexr.Close(), pb.tombstones.Close(), + metaCloseErr, ) } @@ -446,7 +492,7 @@ func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime } // Size returns the number of bytes that the block takes up. func (pb *Block) Size() int64 { - return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta + return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta + pb.numBytesSeriesMetadata } // ErrClosing is returned when a block is in the process of being closed. @@ -487,6 +533,55 @@ func (pb *Block) Tombstones() (tombstones.Reader, error) { return blockTombstoneReader{Reader: pb.tombstones, b: pb}, nil } +// SeriesMetadata returns a new SeriesMetadataReader against the block data. +// The Parquet metadata file is loaded lazily on first call. +func (pb *Block) SeriesMetadata() (seriesmetadata.Reader, error) { + if err := pb.startRead(); err != nil { + return nil, err + } + + pb.seriesMetadataOnce.Do(func() { + pb.seriesMetadata, pb.seriesMetadataErr = pb.loadSeriesMetadata() + }) + if pb.seriesMetadataErr != nil { + pb.pendingReaders.Done() + return nil, pb.seriesMetadataErr + } + + return &blockSeriesMetadataReader{Reader: pb.seriesMetadata, b: pb}, nil +} + +// loadSeriesMetadata reads the Parquet file and resolves seriesRef → labelsHash. +func (pb *Block) loadSeriesMetadata() (seriesmetadata.Reader, error) { + var builder labels.ScratchBuilder + labelsCapture := make(map[uint64]labels.Labels) + readRefResolver := seriesmetadata.WithRefResolver(func(seriesRef uint64) (uint64, bool) { + if err := pb.indexr.Series(storage.SeriesRef(seriesRef), &builder, nil); err != nil { + return 0, false + } + lset := builder.Labels() + hash := labels.StableHash(lset) + if _, exists := labelsCapture[hash]; !exists { + labelsCapture[hash] = lset + } + return hash, true + }) + + smr, sizeSeriesMeta, err := seriesmetadata.ReadSeriesMetadata(pb.logger, pb.dir, readRefResolver) + if err != nil { + return nil, fmt.Errorf("read series metadata: %w", err) + } + + if populator, ok := smr.(seriesmetadata.LabelsPopulator); ok { + for hash, lset := range labelsCapture { + populator.SetLabels(hash, lset) + } + } + + pb.numBytesSeriesMetadata = sizeSeriesMeta + return smr, nil +} + // GetSymbolTableSize returns the Symbol Table Size in the index of this block. func (pb *Block) GetSymbolTableSize() uint64 { return pb.symbolTableSize @@ -620,6 +715,21 @@ func (r blockChunkReader) Close() error { return nil } +type blockSeriesMetadataReader struct { + seriesmetadata.Reader + b *Block + closeOnce sync.Once + closeErr error +} + +func (r *blockSeriesMetadataReader) Close() error { + r.closeOnce.Do(func() { + r.b.pendingReaders.Done() + r.closeErr = r.Reader.Close() + }) + return r.closeErr +} + // Delete matching series between mint and maxt in the block. func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error { pb.mtx.Lock() @@ -727,13 +837,18 @@ func (pb *Block) Snapshot(dir string) error { return fmt.Errorf("create snapshot chunk dir: %w", err) } - // Hardlink meta, index and tombstones + // Hardlink meta, index, tombstones, and series metadata for _, fname := range []string{ metaFilename, indexFilename, tombstones.TombstonesFilename, + seriesmetadata.SeriesMetadataFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { + // Ignore missing series metadata file for backward compatibility + if os.IsNotExist(err) && fname == seriesmetadata.SeriesMetadataFilename { + continue + } return fmt.Errorf("create snapshot %s: %w", fname, err) } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 92e0e9f2bac..15a41928d65 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -99,6 +100,9 @@ type LeveledCompactor struct { postingsDecoderFactory PostingsDecoderFactory enableOverlappingCompaction bool concurrencyOpts LeveledCompactorConcurrencyOptions + enableNativeMetadata bool + indexedResourceAttrs map[string]struct{} + enableResourceAttrIndex bool } type CompactorMetrics struct { @@ -191,6 +195,14 @@ type LeveledCompactorOptions struct { Metrics *CompactorMetrics // UseUncachedIO allows bypassing the page cache when appropriate. UseUncachedIO bool + // EnableNativeMetadata enables persistence of OTel resource/scope attributes during compaction. + EnableNativeMetadata bool + // IndexedResourceAttrs specifies additional descriptive resource attribute + // names to include in the inverted index beyond identifying attributes. + IndexedResourceAttrs map[string]struct{} + // EnableResourceAttrIndex enables the resource attribute inverted index + // in compacted Parquet files. + EnableResourceAttrIndex bool } type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder @@ -247,6 +259,9 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer enableOverlappingCompaction: opts.EnableOverlappingCompaction, concurrencyOpts: DefaultLeveledCompactorConcurrencyOptions(), blockExcludeFunc: opts.BlockExcludeFilter, + enableNativeMetadata: opts.EnableNativeMetadata, + indexedResourceAttrs: opts.IndexedResourceAttrs, + enableResourceAttrIndex: opts.EnableResourceAttrIndex, }, nil } @@ -951,6 +966,19 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop return fmt.Errorf("write new tombstones file: %w", err) } + // Merge and write series metadata (resources and scopes) from source blocks. + if c.enableNativeMetadata { + if err := c.mergeAndWriteSeriesMetadata(ob.tmpDir, blocks, ob.meta); err != nil { + return fmt.Errorf("merge and write series metadata: %w", err) + } + // Re-write meta.json if metadata stats were populated. + if ob.meta.SeriesMetadata != nil { + if _, err = writeMetaFile(c.logger, ob.tmpDir, ob.meta); err != nil { + return fmt.Errorf("rewrite meta with series metadata stats: %w", err) + } + } + } + df, err := fileutil.OpenDir(ob.tmpDir) if err != nil { return fmt.Errorf("open temporary block dir: %w", err) @@ -1031,6 +1059,132 @@ func timeFromMillis(ms int64) time.Time { return time.Unix(0, ms*int64(time.Millisecond)) } +// mergeAndWriteSeriesMetadata merges versioned resources and scopes from +// source blocks and writes them to the new compacted block. The merged data +// is keyed by labelsHash in memory; on write, a RefResolver built from the +// new block's index converts labelsHash → seriesRef for Parquet mapping rows. +// If metadata is written, meta.SeriesMetadata is populated with stats. +func (c *LeveledCompactor) mergeAndWriteSeriesMetadata(tmp string, blocks []BlockReader, meta *BlockMeta) error { + output := seriesmetadata.NewMemSeriesMetadata() + + for _, b := range blocks { + mr, err := b.SeriesMetadata() + if err != nil { + return fmt.Errorf("get series metadata from block: %w", err) + } + + // Merge all metadata kinds from this block into the output. + for _, kind := range seriesmetadata.AllKinds() { + err = mr.IterKind(context.Background(), kind.ID(), func(labelsHash uint64, versioned any) error { + store := output.StoreForKind(kind.ID()) + kind.SetVersioned(store, labelsHash, versioned) + return nil + }) + if err != nil { + mr.Close() + return fmt.Errorf("iterate %s: %w", kind.ID(), err) + } + } + + mr.Close() + } + + if output.ResourceCount() == 0 && output.ScopeCount() == 0 { + return nil + } + + // Open the new block's index to build labelsHash → seriesRef mapping. + ir, err := index.NewFileReader(filepath.Join(tmp, indexFilename), index.DecodePostingsRaw) + if err != nil { + return fmt.Errorf("open new block index for ref resolver: %w", err) + } + defer ir.Close() + + // Build labelsHash → seriesRef mapping by scanning the index. + // Collect hashes that need resolving from the merged metadata to avoid + // storing entries for series without metadata. + needsResolve := make(map[uint64]struct{}, output.ResourceCount()+output.ScopeCount()) + for _, kind := range seriesmetadata.AllKinds() { + _ = output.IterKind(c.ctx, kind.ID(), func(labelsHash uint64, _ any) error { + needsResolve[labelsHash] = struct{}{} + return nil + }) + } + labelsHashToRef := make(map[uint64]uint64, len(needsResolve)) + var builder labels.ScratchBuilder + k, v := index.AllPostingsKey() + p, err := ir.Postings(c.ctx, k, v) + if err != nil { + return fmt.Errorf("get all postings for ref resolver: %w", err) + } + for p.Next() { + ref := p.At() + if err := ir.Series(ref, &builder, nil); err != nil { + return fmt.Errorf("read series for ref resolver: %w", err) + } + lh := labels.StableHash(builder.Labels()) + if _, ok := needsResolve[lh]; ok { + labelsHashToRef[lh] = uint64(ref) + // Early exit if all hashes resolved. + if len(labelsHashToRef) == len(needsResolve) { + break + } + } + } + if err := p.Err(); err != nil { + return fmt.Errorf("iterate postings for ref resolver: %w", err) + } + + // Filter the merged metadata to only include entries for series that + // exist in this output block's index. In sharded compaction each shard + // only contains ~1/N of all series, so without filtering the RefResolver + // would fail for every cross-shard labelsHash, causing warn-level log spam. + filtered := seriesmetadata.NewMemSeriesMetadata() + for _, kind := range seriesmetadata.AllKinds() { + _ = output.IterKind(c.ctx, kind.ID(), func(labelsHash uint64, versioned any) error { + if _, ok := labelsHashToRef[labelsHash]; ok { + store := filtered.StoreForKind(kind.ID()) + kind.SetVersioned(store, labelsHash, versioned) + } + return nil + }) + } + + writeStats := &seriesmetadata.WriteStats{} + wopts := seriesmetadata.WriterOptions{ + EnableInvertedIndex: c.enableResourceAttrIndex, + IndexedResourceAttrs: c.indexedResourceAttrs, + RefResolver: func(labelsHash uint64) (uint64, bool) { + ref, ok := labelsHashToRef[labelsHash] + return ref, ok + }, + WriteStats: writeStats, + } + if _, err := seriesmetadata.WriteFileWithOptions(c.logger, tmp, filtered, wopts); err != nil { + return fmt.Errorf("write series metadata file: %w", err) + } + + // Populate BlockMeta with metadata stats. + if len(writeStats.NamespaceRowCounts) > 0 { + nsCounts := make(map[string]uint64, len(writeStats.NamespaceRowCounts)) + for k, v := range writeStats.NamespaceRowCounts { + nsCounts[k] = uint64(v) + } + indexedAttrs := make([]string, 0, len(c.indexedResourceAttrs)) + for attr := range c.indexedResourceAttrs { + indexedAttrs = append(indexedAttrs, attr) + } + slices.Sort(indexedAttrs) + meta.SeriesMetadata = &BlockSeriesMetadata{ + Enabled: true, + NamespaceRowCounts: nsCounts, + IndexedResourceAttrs: indexedAttrs, + } + } + + return nil +} + type BlockPopulator interface { PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) error } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 849e69a3034..1c58a7152b6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -45,11 +45,11 @@ import ( _ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met. "github.com/prometheus/prometheus/tsdb/hashcache" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/features" - prom_runtime "github.com/prometheus/prometheus/util/runtime" ) const ( @@ -143,11 +143,6 @@ type Options struct { // the current size of the database. MaxBytes int64 - // Maximum % of disk space to use for blocks to be retained. - // 0 or less means disabled. - // If both MaxBytes and MaxPercentage are set, percentage prevails. - MaxPercentage uint - // NoLockfile disables creation and consideration of a lock file. NoLockfile bool @@ -356,6 +351,21 @@ type Options struct { // is implemented. EnableMetadataWALRecords bool + // EnableNativeMetadata represents 'native-metadata' feature flag. + // When enabled, OTel resource/scope attributes are persisted per time series + // in Parquet-based metadata files alongside TSDB blocks. + EnableNativeMetadata bool + + // IndexedResourceAttrs specifies additional descriptive resource attribute + // names to include in the inverted index beyond identifying attributes + // (which are always indexed). nil means index only identifying attributes. + IndexedResourceAttrs map[string]struct{} + + // EnableResourceAttrIndex enables the resource attribute inverted index + // for O(1) reverse lookup by attribute key:value. When disabled, the index + // is not built in memory or written to Parquet. Default: true. + EnableResourceAttrIndex bool + // BlockCompactionExcludeFunc is a function which returns true for blocks that should NOT be compacted. // It's passed down to the TSDB compactor. BlockCompactionExcludeFunc BlockExcludeFilterFunc @@ -369,9 +379,6 @@ type Options struct { // StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in // the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately. StaleSeriesCompactionThreshold float64 - - // FsSizeFunc is a function returning the total disk size for a given path. - FsSizeFunc FsSizeFunc } type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) @@ -382,8 +389,6 @@ type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, er type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) -type FsSizeFunc func(path string) uint64 - type IndexLookupPlannerFunc func(meta BlockMeta, reader IndexReader) index.LookupPlanner var DefaultIndexLookupPlannerFunc IndexLookupPlannerFunc = func(BlockMeta, IndexReader) index.LookupPlanner { return &index.ScanEmptyMatchersLookupPlanner{} } @@ -450,10 +455,22 @@ type DB struct { blockChunkQuerierFunc BlockChunkQuerierFunc - fsSizeFunc FsSizeFunc - // blockPostingsForMatchersCacheFactory returns a factory for creating PostingsForMatchersCache instances for compacted blocks. blockPostingsForMatchersCacheFactory PostingsForMatchersCacheFactory + + // Blocks-only metadata cache — avoids re-merging blocks on every request. + // metadataCache is read lock-free via atomic load; metadataBuildMtx prevents + // thundering herd on cache miss. Head data is layered on top at query time. + metadataCache atomic.Value // stores *metadataCacheEntry + metadataBuildMtx sync.Mutex +} + +// metadataCacheEntry holds the cached blocks-only merged metadata reader. +// The cache is keyed solely by block ULIDs — it never expires for the same +// block set. Head metadata is always served live via layered reader. +type metadataCacheEntry struct { + reader seriesmetadata.Reader + blocksKey string // sorted block ULIDs fingerprint } type dbMetrics struct { @@ -470,11 +487,11 @@ type dbMetrics struct { tombCleanTimer prometheus.Histogram blocksBytes prometheus.Gauge maxBytes prometheus.Gauge - maxPercentage prometheus.Gauge retentionDuration prometheus.Gauge staleSeriesCompactionsTriggered prometheus.Counter staleSeriesCompactionsFailed prometheus.Counter staleSeriesCompactionDuration prometheus.Histogram + seriesMetadataBytes prometheus.Gauge } func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { @@ -551,10 +568,6 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_retention_limit_bytes", Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled", }) - m.maxPercentage = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_tsdb_retention_limit_percentage", - Help: "Max percentage of total storage space to be retained in the tsdb blocks, configured 0 means disabled", - }) m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_retention_limit_seconds", Help: "How long to retain samples in storage.", @@ -579,6 +592,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 1 * time.Hour, }) + m.seriesMetadataBytes = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_storage_series_metadata_bytes", + Help: "The number of bytes used by series metadata (Parquet) files across all blocks.", + }) if r != nil { r.MustRegister( @@ -595,11 +612,11 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { m.tombCleanTimer, m.blocksBytes, m.maxBytes, - m.maxPercentage, m.retentionDuration, m.staleSeriesCompactionsTriggered, m.staleSeriesCompactionsFailed, m.staleSeriesCompactionDuration, + m.seriesMetadataBytes, ) } return m @@ -801,7 +818,6 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue head: head, blockQuerierFunc: NewBlockQuerier, blockChunkQuerierFunc: NewBlockChunkQuerier, - fsSizeFunc: prom_runtime.FsSize, }, nil } @@ -1132,6 +1148,9 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn PD: opts.PostingsDecoderFactory, UseUncachedIO: opts.UseUncachedIO, BlockExcludeFilter: opts.BlockCompactionExcludeFunc, + EnableNativeMetadata: opts.EnableNativeMetadata, + IndexedResourceAttrs: opts.IndexedResourceAttrs, + EnableResourceAttrIndex: opts.EnableResourceAttrIndex, }) } if err != nil { @@ -1170,12 +1189,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.blockPostingsForMatchersCacheFactory = NewPostingsForMatchersCacheFactory(config) } - if opts.FsSizeFunc == nil { - db.fsSizeFunc = prom_runtime.FsSize - } else { - db.fsSizeFunc = opts.FsSizeFunc - } - var wal, wbl *wlog.WL segmentSize := wlog.DefaultSegmentSize // Wal is enabled. @@ -1241,6 +1254,9 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn } headOpts.EnableSTAsZeroSample = opts.EnableSTAsZeroSample headOpts.EnableMetadataWALRecords = opts.EnableMetadataWALRecords + headOpts.EnableNativeMetadata = opts.EnableNativeMetadata + headOpts.IndexedResourceAttrs = opts.IndexedResourceAttrs + headOpts.EnableResourceAttrIndex = opts.EnableResourceAttrIndex if opts.WALReplayConcurrency > 0 { headOpts.WALReplayConcurrency = opts.WALReplayConcurrency } @@ -1258,7 +1274,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.metrics = newDBMetrics(db, r) maxBytes := max(opts.MaxBytes, 0) db.metrics.maxBytes.Set(float64(maxBytes)) - db.metrics.maxPercentage.Set(float64(max(opts.MaxPercentage, 0))) db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds()) // Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked. @@ -1338,6 +1353,128 @@ func (db *DB) BlockMetas() []BlockMeta { return metas } +// cachedMetadataReader wraps a Reader and ignores Close() calls, +// since the underlying reader is shared across callers via the cache. +type cachedMetadataReader struct { + seriesmetadata.Reader +} + +func (*cachedMetadataReader) Close() error { return nil } + +// SeriesMetadata returns a layered reader combining blocks (cached) and head (live). +// Returns an empty reader when native metadata is not enabled. +// +// The blocks-only cache never expires for the same block set — it invalidates +// only on compaction/block reload. Head metadata updates are immediately +// visible without waiting for any TTL. +// +// NOTE: The returned reader's ref values are labels hashes, NOT series refs. +// The merged result spans multiple indexes so no single series ref is valid. +// Callers should use the resource/scope iteration methods. +func (db *DB) SeriesMetadata() (seriesmetadata.Reader, error) { + if !db.opts.EnableNativeMetadata { + return seriesmetadata.NewMemSeriesMetadata(), nil + } + + // Build fingerprint from current block set. + blocks := db.Blocks() + blocksKey := blocksFingerprint(blocks) + + // Fast path: check blocks cache atomically (no lock, no TTL). + var blocksMerged seriesmetadata.Reader + if v := db.metadataCache.Load(); v != nil { + if entry := v.(*metadataCacheEntry); entry.blocksKey == blocksKey { + blocksMerged = entry.reader + } + } + + if blocksMerged == nil { + // Cache miss — acquire build mutex to prevent thundering herd. + db.metadataBuildMtx.Lock() + + // Re-check after acquiring lock. + if v := db.metadataCache.Load(); v != nil { + if entry := v.(*metadataCacheEntry); entry.blocksKey == blocksKey { + blocksMerged = entry.reader + } + } + if blocksMerged == nil { + merged, err := db.mergeBlockMetadata(blocks) + if err != nil { + db.metadataBuildMtx.Unlock() + return nil, err + } + db.metadataCache.Store(&metadataCacheEntry{ + reader: merged, + blocksKey: blocksKey, + }) + blocksMerged = merged + } + db.metadataBuildMtx.Unlock() + } + + headReader, err := db.head.SeriesMetadata() + if err != nil { + return nil, err + } + + return seriesmetadata.NewLayeredReader(&cachedMetadataReader{blocksMerged}, headReader), nil +} + +// blocksFingerprint builds a cache key from sorted block ULIDs. +func blocksFingerprint(blocks []*Block) string { + if len(blocks) == 0 { + return "" + } + var b strings.Builder + for i, blk := range blocks { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(blk.Meta().ULID.String()) + } + return b.String() +} + +// mergeBlockMetadata merges metadata from all blocks into a single reader. +// Head metadata is not included — it is layered on top at query time. +func (db *DB) mergeBlockMetadata(blocks []*Block) (seriesmetadata.Reader, error) { + merged := seriesmetadata.NewMemSeriesMetadata() + + for _, b := range blocks { + mr, err := b.SeriesMetadata() + if err != nil { + return nil, fmt.Errorf("get block series metadata: %w", err) + } + + for _, kind := range seriesmetadata.AllKinds() { + err = mr.IterKind(context.Background(), kind.ID(), func(labelsHash uint64, versioned any) error { + store := merged.StoreForKind(kind.ID()) + kind.SetVersioned(store, labelsHash, versioned) + if _, exists := merged.LabelsForHash(labelsHash); !exists { + if lset, ok := mr.LabelsForHash(labelsHash); ok { + merged.SetLabels(labelsHash, lset) + } + } + return nil + }) + if err != nil { + mr.Close() + return nil, fmt.Errorf("iterate block %s: %w", kind.ID(), err) + } + } + mr.Close() + } + + // Build inverted index for blocks. With Fix 3.3 (per-block Parquet index), + // blocks read from new Parquet files already have the index populated and + // BuildResourceAttrIndex skips. Only old-format blocks need runtime build. + if db.opts.EnableResourceAttrIndex { + merged.BuildResourceAttrIndex() + } + return merged, nil +} + func (db *DB) run(ctx context.Context) { defer close(db.donec) @@ -1452,10 +1589,6 @@ func (db *DB) ApplyConfig(conf *config.Config) error { db.opts.MaxBytes = int64(conf.StorageConfig.TSDBConfig.Retention.Size) db.metrics.maxBytes.Set(float64(db.opts.MaxBytes)) } - if conf.StorageConfig.TSDBConfig.Retention.Percentage > 0 { - db.opts.MaxPercentage = conf.StorageConfig.TSDBConfig.Retention.Percentage - db.metrics.maxPercentage.Set(float64(db.opts.MaxPercentage)) - } db.retentionMtx.Unlock() } } else { @@ -1501,11 +1634,11 @@ func (db *DB) getRetentionDuration() int64 { return db.opts.RetentionDuration } -// getRetentionSettings returns max bytes and max percentage settings in a thread-safe manner. -func (db *DB) getRetentionSettings() (int64, uint) { +// getMaxBytes returns the current max bytes setting in a thread-safe manner. +func (db *DB) getMaxBytes() int64 { db.retentionMtx.RLock() defer db.retentionMtx.RUnlock() - return db.opts.MaxBytes, db.opts.MaxPercentage + return db.opts.MaxBytes } // dbAppender wraps the DB's head appender and triggers compactions on commit @@ -2049,8 +2182,9 @@ func (db *DB) reloadBlocks() (err error) { } var ( - toLoad []*Block - blocksSize int64 + toLoad []*Block + blocksSize int64 + seriesMetadataSize int64 ) // All deletable blocks should be unloaded. // NOTE: We need to loop through loadable one more time as there might be loadable ready to be removed (replaced by compacted block). @@ -2062,8 +2196,10 @@ func (db *DB) reloadBlocks() (err error) { toLoad = append(toLoad, block) blocksSize += block.Size() + seriesMetadataSize += block.numBytesSeriesMetadata } db.metrics.blocksBytes.Set(float64(blocksSize)) + db.metrics.seriesMetadataBytes.Set(float64(seriesMetadataSize)) slices.SortFunc(toLoad, func(a, b *Block) int { switch { @@ -2207,25 +2343,9 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc // BeyondSizeRetention returns those blocks which are beyond the size retention // set in the db options. func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { - // No blocks to work with - if len(blocks) == 0 { - return deletable - } - - maxBytes, maxPercentage := db.getRetentionSettings() - - // Max percentage prevails over max size. - if maxPercentage > 0 { - diskSize := db.fsSizeFunc(db.dir) - if diskSize <= 0 { - db.logger.Warn("Unable to retrieve filesystem size of database directory, skip percentage limitation and default to fixed size limitation", "dir", db.dir) - } else { - maxBytes = int64(uint64(maxPercentage) * diskSize / 100) - } - } - - // Size retention is disabled. - if maxBytes <= 0 { + // Size retention is disabled or no blocks to work with. + maxBytes := db.getMaxBytes() + if len(blocks) == 0 || maxBytes <= 0 { return deletable } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 810119eb4e9..34f127e8d4f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -43,6 +43,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/zeropool" @@ -65,6 +66,35 @@ var ( defaultWALReplayConcurrency = runtime.GOMAXPROCS(0) ) +func init() { + // Wire up WAL encode/decode functions for the seriesmetadata kind descriptors. + // These pluggable functions break the import cycle: seriesmetadata cannot + // import tsdb/record, so the tsdb package sets them here. + var dec record.Decoder + var enc record.Encoder + + seriesmetadata.ResourceDecodeWAL = func(rec []byte, into any) (any, error) { + var buf []record.RefResource + if into != nil { + buf = into.([]record.RefResource) + } + return dec.Resources(rec, buf) + } + seriesmetadata.ResourceEncodeWAL = func(records any, buf []byte) []byte { + return enc.Resources(records.([]record.RefResource), buf) + } + seriesmetadata.ScopeDecodeWAL = func(rec []byte, into any) (any, error) { + var buf []record.RefScope + if into != nil { + buf = into.([]record.RefScope) + } + return dec.Scopes(rec, buf) + } + seriesmetadata.ScopeEncodeWAL = func(records any, buf []byte) []byte { + return enc.Scopes(records.([]record.RefScope), buf) + } +} + // Head handles reads and writes of time series data within a time window. type Head struct { chunkRange atomic.Int64 @@ -92,6 +122,8 @@ type Head struct { histogramsPool zeropool.Pool[[]record.RefHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] metadataPool zeropool.Pool[[]record.RefMetadata] + resourcesPool zeropool.Pool[[]record.RefResource] + scopesPool zeropool.Pool[[]record.RefScope] seriesPool zeropool.Pool[[]*memSeries] typeMapPool zeropool.Pool[map[chunks.HeadSeriesRef]sampleType] bytesPool zeropool.Pool[[]byte] @@ -107,6 +139,8 @@ type Head struct { wlReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] wlReplayMetadataPool zeropool.Pool[[]record.RefMetadata] wlReplayMmapMarkersPool zeropool.Pool[[]record.RefMmapMarker] + wlReplayResourcesPool zeropool.Pool[[]record.RefResource] + wlReplayScopesPool zeropool.Pool[[]record.RefScope] // All series addressable by their ID or hash. series *stripeSeries @@ -142,6 +176,15 @@ type Head struct { writeNotified wlog.WriteNotified + // seriesMeta holds the shared metadata store (MemStore[V] operations are + // internally concurrent-safe). The head does NOT populate seriesMeta.labelsMap — + // labels are resolved on-demand via stripeSeries.getByID. + seriesMeta *seriesmetadata.MemSeriesMetadata + // metaRefStripes and metaHashStripes provide sharded ref↔hash mappings, + // eliminating the single-lock bottleneck. Sharded by ref and hash respectively. + metaRefStripes []metadataRefStripe // sharded by ref & (len-1) + metaHashStripes []metadataHashStripe // sharded by hash & (len-1) + memTruncationInProcess atomic.Bool memTruncationCallBack func() // For testing purposes. @@ -219,6 +262,17 @@ type HeadOptions struct { // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60 // is implemented. EnableMetadataWALRecords bool + + // EnableNativeMetadata represents 'native-metadata' feature flag. + // When enabled, OTel resource/scope attributes are persisted per time series. + EnableNativeMetadata bool + + // IndexedResourceAttrs specifies additional descriptive resource attribute + // names to include in the inverted index beyond identifying attributes. + IndexedResourceAttrs map[string]struct{} + + // EnableResourceAttrIndex enables the resource attribute inverted index. + EnableResourceAttrIndex bool } const ( @@ -388,6 +442,16 @@ func (h *Head) resetInMemoryState() error { h.maxOOOTime.Store(math.MinInt64) h.lastWALTruncationTime.Store(math.MinInt64) h.lastMemoryTruncationTime.Store(math.MinInt64) + + if h.opts.EnableNativeMetadata { + h.seriesMeta = seriesmetadata.NewMemSeriesMetadata() + h.seriesMeta.SetIndexedResourceAttrs(h.opts.IndexedResourceAttrs) + if h.opts.EnableResourceAttrIndex { + h.seriesMeta.InitResourceAttrIndex() + } + h.metaRefStripes, h.metaHashStripes = newMetadataStripes() + } + return nil } @@ -400,39 +464,45 @@ func (h *Head) resetWLReplayResources() { h.wlReplayFloatHistogramsPool = zeropool.Pool[[]record.RefFloatHistogramSample]{} h.wlReplayMetadataPool = zeropool.Pool[[]record.RefMetadata]{} h.wlReplayMmapMarkersPool = zeropool.Pool[[]record.RefMmapMarker]{} + h.wlReplayResourcesPool = zeropool.Pool[[]record.RefResource]{} + h.wlReplayScopesPool = zeropool.Pool[[]record.RefScope]{} } type headMetrics struct { - activeAppenders prometheus.Gauge - series prometheus.GaugeFunc - staleSeries prometheus.GaugeFunc - seriesCreated prometheus.Counter - seriesRemoved prometheus.Counter - seriesNotFound prometheus.Counter - chunks prometheus.Gauge - chunksCreated prometheus.Counter - chunksRemoved prometheus.Counter - gcDuration prometheus.Summary - samplesAppended *prometheus.CounterVec - outOfOrderSamplesAppended *prometheus.CounterVec - outOfBoundSamples *prometheus.CounterVec - outOfOrderSamples *prometheus.CounterVec - tooOldSamples *prometheus.CounterVec - walTruncateDuration prometheus.Summary - walCorruptionsTotal prometheus.Counter - dataTotalReplayDuration prometheus.Gauge - headTruncateFail prometheus.Counter - headTruncateTotal prometheus.Counter - checkpointDeleteFail prometheus.Counter - checkpointDeleteTotal prometheus.Counter - checkpointCreationFail prometheus.Counter - checkpointCreationTotal prometheus.Counter - mmapChunkCorruptionTotal prometheus.Counter - snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. - oooHistogram prometheus.Histogram - mmapChunksTotal prometheus.Counter - walReplayUnknownRefsTotal *prometheus.CounterVec - wblReplayUnknownRefsTotal *prometheus.CounterVec + activeAppenders prometheus.Gauge + series prometheus.GaugeFunc + staleSeries prometheus.GaugeFunc + seriesCreated prometheus.Counter + seriesRemoved prometheus.Counter + seriesNotFound prometheus.Counter + chunks prometheus.Gauge + chunksCreated prometheus.Counter + chunksRemoved prometheus.Counter + gcDuration prometheus.Summary + samplesAppended *prometheus.CounterVec + outOfOrderSamplesAppended *prometheus.CounterVec + outOfBoundSamples *prometheus.CounterVec + outOfOrderSamples *prometheus.CounterVec + tooOldSamples *prometheus.CounterVec + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + dataTotalReplayDuration prometheus.Gauge + headTruncateFail prometheus.Counter + headTruncateTotal prometheus.Counter + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter + mmapChunkCorruptionTotal prometheus.Counter + snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. + oooHistogram prometheus.Histogram + mmapChunksTotal prometheus.Counter + resourceUpdatesCommitted prometheus.Counter + scopeUpdatesCommitted prometheus.Counter + resourceUpdatesWALFiltered prometheus.Counter + scopeUpdatesWALFiltered prometheus.Counter + walReplayUnknownRefsTotal *prometheus.CounterVec + wblReplayUnknownRefsTotal *prometheus.CounterVec } const ( @@ -570,6 +640,22 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_mmap_chunks_total", Help: "Total number of chunks that were memory-mapped.", }), + resourceUpdatesCommitted: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_resource_updates_committed_total", + Help: "Total number of resource attribute updates committed to the head block.", + }), + scopeUpdatesCommitted: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_scope_updates_committed_total", + Help: "Total number of scope updates committed to the head block.", + }), + resourceUpdatesWALFiltered: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_resource_updates_wal_filtered_total", + Help: "Total number of resource attribute updates skipped from WAL write due to unchanged content.", + }), + scopeUpdatesWALFiltered: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_scope_updates_wal_filtered_total", + Help: "Total number of scope updates skipped from WAL write due to unchanged content.", + }), walReplayUnknownRefsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_replay_unknown_refs_total", Help: "Total number of unknown series references encountered during WAL replay.", @@ -608,6 +694,10 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m.checkpointCreationTotal, m.oooHistogram, m.mmapChunksTotal, + m.resourceUpdatesCommitted, + m.scopeUpdatesCommitted, + m.resourceUpdatesWALFiltered, + m.scopeUpdatesWALFiltered, m.mmapChunkCorruptionTotal, m.snapshotReplayErrorTotal, // Metrics bound to functions and not needed in tests @@ -1581,6 +1671,12 @@ func (h *RangeHead) Tombstones() (tombstones.Reader, error) { return h.head.tombstones, nil } +// SeriesMetadata returns series metadata for the head. +// Delegates to the underlying head to extract metadata from memSeries. +func (h *RangeHead) SeriesMetadata() (seriesmetadata.Reader, error) { + return h.head.SeriesMetadata() +} + func (h *RangeHead) MinTime() int64 { return h.mint } @@ -1754,6 +1850,9 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { h.tombstones.DeleteTombstones(deleted) h.tombstones.TruncateBefore(mint) + // Clean up shared metadata for deleted series. + h.cleanupSharedMetadata(deleted) + if h.wal != nil { h.walExpiriesMtx.Lock() // Samples for deleted series are likely still in the WAL, so flag that the deleted series records should be kept during @@ -1774,6 +1873,267 @@ func (h *Head) Tombstones() (tombstones.Reader, error) { return h.tombstones, nil } +// updateSharedMetadata updates the head's shared metadata store after a +// series' kind metadata has been modified. The series lock must be held. +func (h *Head) updateSharedMetadata(s *memSeries, kind seriesmetadata.KindDescriptor) { + if h.seriesMeta == nil { + return + } + v, ok := kind.CollectFromSeries(s) + if !ok { + return + } + hash := s.getStableHash() + + mask := uint64(len(h.metaRefStripes) - 1) + refShard := &h.metaRefStripes[uint64(s.ref)&mask] + refShard.Lock() + refShard.refToHash[s.ref] = hash + refShard.Unlock() + + hashShard := &h.metaHashStripes[hash&mask] + hashShard.Lock() + hashShard.hashToRef[hash] = s.ref + hashShard.Unlock() + + // For resource kind: use SetVersionedWithDiff to get old/new in a single + // lock acquisition (avoids 3 separate MemStore locks: get, set, get). + if kind.ID() == seriesmetadata.KindResource { + vr := v.(*seriesmetadata.VersionedResource) + oldVR, newVR := h.seriesMeta.ResourceStore().SetVersionedWithDiff(hash, vr) + h.seriesMeta.UpdateResourceAttrIndex(hash, oldVR, newVR) + return + } + + // Other kinds: standard path. + store := h.seriesMeta.StoreForKind(kind.ID()) + kind.SetVersioned(store, hash, v) +} + +// updateSharedResourceMetadata is the type-safe hot-path version of +// updateSharedMetadata for resource kind, avoiding interface{} boxing. +func (h *Head) updateSharedResourceMetadata(s *memSeries) { + if h.seriesMeta == nil { + return + } + vr, ok := seriesmetadata.CollectResourceDirect(s) + if !ok { + return + } + hash := s.getStableHash() + + mask := uint64(len(h.metaRefStripes) - 1) + refShard := &h.metaRefStripes[uint64(s.ref)&mask] + refShard.Lock() + refShard.refToHash[s.ref] = hash + refShard.Unlock() + + hashShard := &h.metaHashStripes[hash&mask] + hashShard.Lock() + hashShard.hashToRef[hash] = s.ref + hashShard.Unlock() + + oldVR, newVR := h.seriesMeta.ResourceStore().SetVersionedWithDiff(hash, vr) + h.seriesMeta.UpdateResourceAttrIndex(hash, oldVR, newVR) +} + +// updateSharedScopeMetadata is the type-safe hot-path version of +// updateSharedMetadata for scope kind, avoiding interface{} boxing. +func (h *Head) updateSharedScopeMetadata(s *memSeries) { + if h.seriesMeta == nil { + return + } + vs, ok := seriesmetadata.CollectScopeDirect(s) + if !ok { + return + } + hash := s.getStableHash() + + mask := uint64(len(h.metaRefStripes) - 1) + refShard := &h.metaRefStripes[uint64(s.ref)&mask] + refShard.Lock() + refShard.refToHash[s.ref] = hash + refShard.Unlock() + + hashShard := &h.metaHashStripes[hash&mask] + hashShard.Lock() + hashShard.hashToRef[hash] = s.ref + hashShard.Unlock() + + h.seriesMeta.ScopeStore().SetVersioned(hash, vs) +} + +// internSeriesResource replaces the deep-copied maps/slices on the latest per-series +// ResourceVersion with thin copies sharing canonical pointers from the MemStore +// content table. This deduplicates memory when many series share the same resource. +func (h *Head) internSeriesResource(s *memSeries) { + if h.seriesMeta == nil { + return + } + vr, ok := seriesmetadata.CollectResourceDirect(s) + if !ok || len(vr.Versions) == 0 { + return + } + last := len(vr.Versions) - 1 + vr.Versions[last] = h.seriesMeta.ResourceStore().InternVersion(vr.Versions[last]) +} + +// internSeriesScope replaces the deep-copied maps/slices on the latest per-series +// ScopeVersion with thin copies sharing canonical pointers from the MemStore +// content table. +func (h *Head) internSeriesScope(s *memSeries) { + if h.seriesMeta == nil { + return + } + vs, ok := seriesmetadata.CollectScopeDirect(s) + if !ok || len(vs.Versions) == 0 { + return + } + last := len(vs.Versions) - 1 + vs.Versions[last] = h.seriesMeta.ScopeStore().InternVersion(vs.Versions[last]) +} + +// cleanupSharedMetadata removes metadata for deleted series from the shared store. +func (h *Head) cleanupSharedMetadata(deleted map[storage.SeriesRef]struct{}) { + if h.seriesMeta == nil || len(deleted) == 0 { + return + } + mask := uint64(len(h.metaRefStripes) - 1) + for ref := range deleted { + hRef := chunks.HeadSeriesRef(ref) + refShard := &h.metaRefStripes[uint64(hRef)&mask] + refShard.Lock() + hash, ok := refShard.refToHash[hRef] + delete(refShard.refToHash, hRef) + refShard.Unlock() + if !ok { + continue + } + + hashShard := &h.metaHashStripes[hash&mask] + hashShard.Lock() + delete(hashShard.hashToRef, hash) + hashShard.Unlock() + + // Remove from inverted index before deleting from store. + if oldVR, ok := h.seriesMeta.GetVersionedResource(hash); ok { + h.seriesMeta.RemoveFromResourceAttrIndex(hash, oldVR) + } + + // Delete from all kind stores (internally concurrent-safe). + // If a live series shares the hash (extremely unlikely), its next + // commit will re-add it. + h.seriesMeta.DeleteResource(hash) + h.seriesMeta.ScopeStore().Delete(hash) + } +} + +// headMetadataReader provides concurrency-safe read access to the head's +// live shared metadata store. LabelsForHash resolves labels on-demand via +// sharded hash→ref stripes + stripeSeries.getByID. All other methods +// delegate to MemStore which is internally concurrent-safe. +type headMetadataReader struct { + head *Head +} + +func (*headMetadataReader) Close() error { return nil } + +func (r *headMetadataReader) LabelsForHash(labelsHash uint64) (labels.Labels, bool) { + mask := uint64(len(r.head.metaHashStripes) - 1) + shard := &r.head.metaHashStripes[labelsHash&mask] + shard.RLock() + ref, ok := shard.hashToRef[labelsHash] + shard.RUnlock() + if !ok { + return labels.EmptyLabels(), false + } + s := r.head.series.getByID(ref) + if s == nil { + return labels.EmptyLabels(), false + } + return s.lset, true +} + +func (r *headMetadataReader) IterKind(ctx context.Context, id seriesmetadata.KindID, f func(labelsHash uint64, versioned any) error) error { + return r.head.seriesMeta.IterKind(ctx, id, f) +} + +func (r *headMetadataReader) KindLen(id seriesmetadata.KindID) int { + return r.head.seriesMeta.KindLen(id) +} + +func (r *headMetadataReader) GetResource(labelsHash uint64) (*seriesmetadata.ResourceVersion, bool) { + return r.head.seriesMeta.GetResource(labelsHash) +} + +func (r *headMetadataReader) GetVersionedResource(labelsHash uint64) (*seriesmetadata.VersionedResource, bool) { + return r.head.seriesMeta.GetVersionedResource(labelsHash) +} + +func (r *headMetadataReader) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + return r.head.seriesMeta.GetResourceAt(labelsHash, timestamp) +} + +func (r *headMetadataReader) IterResources(ctx context.Context, f func(labelsHash uint64, resource *seriesmetadata.ResourceVersion) error) error { + return r.head.seriesMeta.IterResources(ctx, f) +} + +func (r *headMetadataReader) IterVersionedResources(ctx context.Context, f func(labelsHash uint64, resources *seriesmetadata.VersionedResource) error) error { + return r.head.seriesMeta.IterVersionedResources(ctx, f) +} + +func (r *headMetadataReader) TotalResources() uint64 { + return r.head.seriesMeta.TotalResources() +} + +func (r *headMetadataReader) TotalResourceVersions() uint64 { + return r.head.seriesMeta.TotalResourceVersions() +} + +func (r *headMetadataReader) GetVersionedScope(labelsHash uint64) (*seriesmetadata.VersionedScope, bool) { + return r.head.seriesMeta.GetVersionedScope(labelsHash) +} + +func (r *headMetadataReader) IterVersionedScopes(ctx context.Context, f func(labelsHash uint64, scopes *seriesmetadata.VersionedScope) error) error { + return r.head.seriesMeta.IterVersionedScopes(ctx, f) +} + +func (r *headMetadataReader) TotalScopes() uint64 { + return r.head.seriesMeta.TotalScopes() +} + +func (r *headMetadataReader) TotalScopeVersions() uint64 { + return r.head.seriesMeta.TotalScopeVersions() +} + +func (r *headMetadataReader) LookupResourceAttr(key, value string) []uint64 { + return r.head.seriesMeta.LookupResourceAttr(key, value) +} + +func (r *headMetadataReader) UniqueResourceAttrNames() map[string]struct{} { + return r.head.seriesMeta.UniqueResourceAttrNames() +} + +// SeriesMetadata returns a reader over the head's series metadata. +// When native metadata is enabled, this is O(1) — it returns a wrapper +// around the incrementally-maintained shared store instead of scanning all series. +func (h *Head) SeriesMetadata() (seriesmetadata.Reader, error) { + if h.seriesMeta == nil { + return seriesmetadata.NewMemSeriesMetadata(), nil + } + return &headMetadataReader{head: h}, nil +} + +// SetIndexedResourceAttrs reconfigures which descriptive resource attributes +// are included in the inverted index at runtime. This enables per-tenant +// overrides in Mimir ingesters. Note: changing the indexed set does NOT +// retroactively rebuild the index — it only affects future updates. +func (h *Head) SetIndexedResourceAttrs(attrs map[string]struct{}) { + if h.seriesMeta != nil { + h.seriesMeta.SetIndexedResourceAttrs(attrs) + } +} + // NumSeries returns the number of series tracked in the head. func (h *Head) NumSeries() uint64 { return h.numSeries.Load() @@ -2060,6 +2420,34 @@ type stripeLock struct { _ [40]byte } +const metadataStripeSize = 1 << 8 // 256-way sharding for metadata ref/hash maps + +// metadataRefStripe holds ref→hash mappings for one shard, keyed by ref. +type metadataRefStripe struct { + sync.RWMutex + _ [40]byte // cache line padding + refToHash map[chunks.HeadSeriesRef]uint64 +} + +// metadataHashStripe holds hash→ref mappings for one shard, keyed by hash. +type metadataHashStripe struct { + sync.RWMutex + _ [40]byte // cache line padding + hashToRef map[uint64]chunks.HeadSeriesRef +} + +func newMetadataStripes() ([]metadataRefStripe, []metadataHashStripe) { + refs := make([]metadataRefStripe, metadataStripeSize) + hashes := make([]metadataHashStripe, metadataStripeSize) + for i := range refs { + refs[i].refToHash = make(map[chunks.HeadSeriesRef]uint64) + } + for i := range hashes { + hashes[i].hashToRef = make(map[uint64]chunks.HeadSeriesRef) + } + return refs, hashes +} + func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *stripeSeries { s := &stripeSeries{ size: stripeSize, @@ -2189,6 +2577,9 @@ func (h *Head) gcStaleSeries(seriesRefs []storage.SeriesRef, maxt int64) map[sto // Remove tombstones referring to the deleted series. h.tombstones.DeleteTombstones(deleted) + // Clean up shared metadata for deleted series. + h.cleanupSharedMetadata(deleted) + if h.wal != nil { _, last, _ := wlog.Segments(h.wal.Dir()) h.walExpiriesMtx.Lock() @@ -2448,12 +2839,28 @@ func (s sample) Copy() chunks.Sample { return c } +// kindMetaEntry stores a single kind's metadata on a memSeries. +type kindMetaEntry struct { + kind seriesmetadata.KindID + data any // *Versioned[V] for the appropriate V +} + +// nativeMeta holds OTel native metadata state for a memSeries. +// Allocated lazily on first SetKindMeta call; nil when native metadata +// is not in use, saving 24 bytes per series (slice header + stableHash) +// compared to storing the fields inline on memSeries. +type nativeMeta struct { + // stableHash caches labels.StableHash(lset). Computed lazily on first + // metadata commit (lset is immutable so the hash never changes). + stableHash uint64 + kindMeta []kindMetaEntry +} + // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. type memSeries struct { // Members up to the Mutex are not changed after construction, so can be accessed without a lock. - ref chunks.HeadSeriesRef - meta *metadata.Metadata + ref chunks.HeadSeriesRef // Series labels hash to use for sharding purposes. The value is always 0 when sharding has not // been explicitly enabled in TSDB. @@ -2465,6 +2872,13 @@ type memSeries struct { // Everything after here should only be accessed with the lock held. sync.Mutex + meta *metadata.Metadata + + // nativeMeta holds OTel native metadata (stableHash cache + per-kind versioned data). + // nil when native metadata is not in use, saving 24 bytes per series. + // Allocated lazily on first SetKindMeta call. + nativeMeta *nativeMeta + lset labels.Labels // Locking required with -tags dedupelabels, not otherwise. // Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps. @@ -2535,6 +2949,42 @@ func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, return s } +// GetKindMeta returns the metadata for a kind, or nil/false if not set. +func (s *memSeries) GetKindMeta(id seriesmetadata.KindID) (any, bool) { + if s.nativeMeta == nil { + return nil, false + } + for _, e := range s.nativeMeta.kindMeta { + if e.kind == id { + return e.data, true + } + } + return nil, false +} + +// SetKindMeta sets the metadata for a kind. +func (s *memSeries) SetKindMeta(id seriesmetadata.KindID, v any) { + if s.nativeMeta == nil { + s.nativeMeta = &nativeMeta{} + } + for i, e := range s.nativeMeta.kindMeta { + if e.kind == id { + s.nativeMeta.kindMeta[i].data = v + return + } + } + s.nativeMeta.kindMeta = append(s.nativeMeta.kindMeta, kindMetaEntry{kind: id, data: v}) +} + +// getStableHash returns the cached labels.StableHash, computing it on first call. +// Caller must hold s.Lock() and s.nativeMeta must be non-nil (guaranteed after SetKindMeta). +func (s *memSeries) getStableHash() uint64 { + if s.nativeMeta.stableHash == 0 { + s.nativeMeta.stableHash = labels.StableHash(s.lset) + } + return s.nativeMeta.stableHash +} + func (s *memSeries) minTime() int64 { if len(s.mmappedChunks) > 0 { return s.mmappedChunks[0].minTime diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index ebf74549afd..fb73caaaa71 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "log/slog" + "maps" "math" "runtime" "time" @@ -31,6 +32,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" ) // initAppender is a helper to initialize the time bounds of the head @@ -104,6 +106,15 @@ func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m return a.app.UpdateMetadata(ref, l, m) } +func (a *initAppender) UpdateResource(ref storage.SeriesRef, l labels.Labels, identifying, descriptive map[string]string, entities []storage.EntityData, t int64) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.UpdateResource(ref, l, identifying, descriptive, entities, t) + } + + a.app = a.head.appender() + return a.app.UpdateResource(ref, l, identifying, descriptive, entities, t) +} + func (a *initAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, st int64) (storage.SeriesRef, error) { if a.app != nil { return a.app.AppendSTZeroSample(ref, lset, t, st) @@ -304,6 +315,32 @@ func (h *Head) putMetadataBuffer(b []record.RefMetadata) { h.metadataPool.Put(b[:0]) } +func (h *Head) getResourceBuffer() []record.RefResource { + b := h.resourcesPool.Get() + if b == nil { + return make([]record.RefResource, 0, 512) + } + return b +} + +func (h *Head) putResourceBuffer(b []record.RefResource) { + clear(b) + h.resourcesPool.Put(b[:0]) +} + +func (h *Head) getScopeBuffer() []record.RefScope { + b := h.scopesPool.Get() + if b == nil { + return make([]record.RefScope, 0, 512) + } + return b +} + +func (h *Head) putScopeBuffer(b []record.RefScope) { + clear(b) + h.scopesPool.Put(b[:0]) +} + func (h *Head) getSeriesBuffer() []*memSeries { b := h.seriesPool.Get() if b == nil { @@ -380,6 +417,10 @@ type appendBatch struct { metadata []record.RefMetadata // New metadata held by this appender. metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. exemplars []exemplarWithSeriesRef // New exemplars held by this appender. + resources []record.RefResource // Pending resource updates held by this appender. + resourceSeries []*memSeries // Series corresponding to the resource updates. + scopes []record.RefScope // Pending scope updates held by this appender. + scopeSeries []*memSeries // Series corresponding to the scope updates. } // close returns all the slices to the pools in Head and nil's them. @@ -402,6 +443,14 @@ func (b *appendBatch) close(h *Head) { b.metadataSeries = nil h.putExemplarBuffer(b.exemplars) b.exemplars = nil + h.putResourceBuffer(b.resources) + b.resources = nil + h.putSeriesBuffer(b.resourceSeries) + b.resourceSeries = nil + h.putScopeBuffer(b.scopes) + b.scopes = nil + h.putSeriesBuffer(b.scopeSeries) + b.scopeSeries = nil } type headAppenderBase struct { @@ -416,6 +465,14 @@ type headAppenderBase struct { typesInBatch map[chunks.HeadSeriesRef]sampleType // Which (one) sample type each series holds in the most recent batch. + // resourceRefs tracks which series have already had their resource updated + // in this append batch to avoid redundant work. + resourceRefs map[chunks.HeadSeriesRef]struct{} + + // scopeRefs tracks which series have already had their scope updated + // in this append batch to avoid redundant work. + scopeRefs map[chunks.HeadSeriesRef]struct{} + appendID, cleanupAppendIDsBelow uint64 closed bool } @@ -577,6 +634,10 @@ func (a *headAppenderBase) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef floatHistogramSeries: h.getSeriesBuffer(), metadata: h.getMetadataBuffer(), metadataSeries: h.getSeriesBuffer(), + resources: h.getResourceBuffer(), + resourceSeries: h.getSeriesBuffer(), + scopes: h.getScopeBuffer(), + scopeSeries: h.getSeriesBuffer(), } // Allocate the exemplars buffer only if exemplars are enabled. @@ -1048,6 +1109,61 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, return ref, nil } +// UpdateResource stores unified OTel resource data (attributes + entities) for the given series. +// Supports versioning: if the resource changes, a new version is created. +// If the resource is the same, the current version's time range is extended. +// The update is buffered and applied during Commit(). +func (a *headAppender) UpdateResource(ref storage.SeriesRef, lset labels.Labels, identifying, descriptive map[string]string, entities []storage.EntityData, t int64) (storage.SeriesRef, error) { + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + s = a.head.series.getByHash(lset.Hash(), lset) + if s != nil { + ref = storage.SeriesRef(s.ref) + } + } + if s == nil { + return 0, fmt.Errorf("unknown series when trying to add resource with HeadSeriesRef: %d and labels: %s", ref, lset) + } + + // Deduplicate: skip if this series already had a resource update in this batch. + if a.resourceRefs == nil { + a.resourceRefs = make(map[chunks.HeadSeriesRef]struct{}) + } + if _, ok := a.resourceRefs[s.ref]; ok { + return ref, nil + } + + // Convert storage.EntityData to record.RefResourceEntity for WAL encoding. + walEntities := make([]record.RefResourceEntity, len(entities)) + for i, e := range entities { + entityType := e.Type + if entityType == "" { + entityType = seriesmetadata.EntityTypeResource + } + walEntities[i] = record.RefResourceEntity{ + Type: entityType, + ID: e.ID, + Description: e.Description, + } + } + + // Buffer the resource update for commit-time application. + b := a.getCurrentBatch(stNone, s.ref) + b.resources = append(b.resources, record.RefResource{ + Ref: s.ref, + MinTime: t, + MaxTime: t, + Identifying: identifying, + Descriptive: descriptive, + Entities: walEntities, + }) + b.resourceSeries = append(b.resourceSeries, s) + + a.resourceRefs[s.ref] = struct{}{} + + return ref, nil +} + var _ storage.GetRef = &headAppender{} func (a *headAppenderBase) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { @@ -1144,6 +1260,22 @@ func (a *headAppenderBase) log() error { return fmt.Errorf("log exemplars: %w", err) } } + if len(b.resources) > 0 { + rec = enc.Resources(b.resources, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log resources: %w", err) + } + } + if len(b.scopes) > 0 { + rec = enc.Scopes(b.scopes, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log scopes: %w", err) + } + } } return nil } @@ -1711,6 +1843,149 @@ func commitMetadata(b *appendBatch) { } } +// commitResources commits the resource updates for each series in the provided batch. +func (a *headAppenderBase) commitResources(b *appendBatch) { + for i, r := range b.resources { + s := b.resourceSeries[i] + s.Lock() + seriesmetadata.CommitResourceDirect(s, seriesmetadata.ResourceCommitData{ + Identifying: r.Identifying, + Descriptive: r.Descriptive, + Entities: refResourceEntitiesToCommitData(r.Entities), + MinTime: r.MinTime, + MaxTime: r.MaxTime, + }) + a.head.updateSharedResourceMetadata(s) + a.head.internSeriesResource(s) + s.Unlock() + } +} + +// refResourceEntitiesToCommitData converts WAL record entities to ResourceEntityData. +func refResourceEntitiesToCommitData(entities []record.RefResourceEntity) []seriesmetadata.ResourceEntityData { + result := make([]seriesmetadata.ResourceEntityData, len(entities)) + for i, e := range entities { + result[i] = seriesmetadata.ResourceEntityData{ + Type: e.Type, + ID: e.ID, + Description: e.Description, + } + } + return result +} + +// commitScopes commits the scope updates for each series in the provided batch. +func (a *headAppenderBase) commitScopes(b *appendBatch) { + for i, sc := range b.scopes { + s := b.scopeSeries[i] + s.Lock() + seriesmetadata.CommitScopeDirect(s, seriesmetadata.ScopeCommitData{ + Name: sc.Name, + Version: sc.Version, + SchemaURL: sc.SchemaURL, + Attrs: sc.Attrs, + MinTime: sc.MinTime, + MaxTime: sc.MaxTime, + }) + a.head.updateSharedScopeMetadata(s) + a.head.internSeriesScope(s) + s.Unlock() + } +} + +// resourceContentUnchanged checks whether the incoming WAL resource record +// has the same content as the current stored ResourceVersion, ignoring time range. +func resourceContentUnchanged(cur *seriesmetadata.ResourceVersion, r record.RefResource) bool { + if !maps.Equal(cur.Identifying, r.Identifying) || + !maps.Equal(cur.Descriptive, r.Descriptive) || + len(cur.Entities) != len(r.Entities) { + return false + } + // Stored entities are sorted by Type; incoming may not be — pairwise match. + for _, ce := range cur.Entities { + found := false + for _, ie := range r.Entities { + if ce.Type == ie.Type && maps.Equal(ce.ID, ie.ID) && maps.Equal(ce.Description, ie.Description) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// scopeContentUnchanged checks whether the incoming WAL scope record +// has the same content as the current stored ScopeVersion, ignoring time range. +func scopeContentUnchanged(cur *seriesmetadata.ScopeVersion, sc record.RefScope) bool { + return cur.Name == sc.Name && cur.Version == sc.Version && + cur.SchemaURL == sc.SchemaURL && maps.Equal(cur.Attrs, sc.Attrs) +} + +// filterUnchangedResources removes entries from b.resources where the content +// is identical to what's already stored on the series. For unchanged entries, +// the time range is extended in-place and the shared metadata store is updated. +// Returns the number of entries filtered out. +func (a *headAppenderBase) filterUnchangedResources(b *appendBatch) int { + n := 0 + for i, r := range b.resources { + s := b.resourceSeries[i] + s.Lock() + changed := true + if vr, ok := seriesmetadata.CollectResourceDirect(s); ok && len(vr.Versions) > 0 { + cur := vr.Versions[len(vr.Versions)-1] + if resourceContentUnchanged(cur, r) { + cur.UpdateTimeRange(r.MinTime, r.MaxTime) + a.head.updateSharedResourceMetadata(s) + changed = false + } + } + s.Unlock() + if changed { + b.resources[n] = b.resources[i] + b.resourceSeries[n] = b.resourceSeries[i] + n++ + } + } + filtered := len(b.resources) - n + b.resources = b.resources[:n] + b.resourceSeries = b.resourceSeries[:n] + return filtered +} + +// filterUnchangedScopes removes entries from b.scopes where the content +// is identical to what's already stored on the series. For unchanged entries, +// the time range is extended in-place and the shared metadata store is updated. +// Returns the number of entries filtered out. +func (a *headAppenderBase) filterUnchangedScopes(b *appendBatch) int { + n := 0 + for i, sc := range b.scopes { + s := b.scopeSeries[i] + s.Lock() + changed := true + if vs, ok := seriesmetadata.CollectScopeDirect(s); ok && len(vs.Versions) > 0 { + cur := vs.Versions[len(vs.Versions)-1] + if scopeContentUnchanged(cur, sc) { + cur.UpdateTimeRange(sc.MinTime, sc.MaxTime) + a.head.updateSharedScopeMetadata(s) + changed = false + } + } + s.Unlock() + if changed { + b.scopes[n] = b.scopes[i] + b.scopeSeries[n] = b.scopeSeries[i] + n++ + } + } + filtered := len(b.scopes) - n + b.scopes = b.scopes[:n] + b.scopeSeries = b.scopeSeries[:n] + return filtered +} + func (a *headAppenderBase) unmarkCreatedSeriesAsPendingCommit() { for _, s := range a.series { s.Lock() @@ -1736,9 +2011,23 @@ func (a *headAppenderBase) Commit() (err error) { h.putRefSeriesBuffer(a.seriesRefs) h.putSeriesBuffer(a.series) h.putTypeMap(a.typesInBatch) + a.resourceRefs = nil + a.scopeRefs = nil a.closed = true }() + // Count total resource/scope updates before filtering, then filter + // unchanged entries to avoid unnecessary WAL writes. Unchanged entries + // have their time range extended in-place under the series lock. + var resourcesTotal, scopesTotal int + var resourcesFiltered, scopesFiltered int + for _, b := range a.batches { + resourcesTotal += len(b.resources) + scopesTotal += len(b.scopes) + resourcesFiltered += a.filterUnchangedResources(b) + scopesFiltered += a.filterUnchangedScopes(b) + } + if err := a.log(); err != nil { _ = a.Rollback() // Most likely the same error will happen again. return fmt.Errorf("write to WAL: %w", err) @@ -1784,6 +2073,8 @@ func (a *headAppenderBase) Commit() (err error) { a.commitHistograms(b, acc) a.commitFloatHistograms(b, acc) commitMetadata(b) + a.commitResources(b) + a.commitScopes(b) } // Unmark all series as pending commit after all samples have been committed. a.unmarkCreatedSeriesAsPendingCommit() @@ -1796,6 +2087,10 @@ func (a *headAppenderBase) Commit() (err error) { h.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) h.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + h.metrics.resourceUpdatesCommitted.Add(float64(resourcesTotal)) + h.metrics.scopeUpdatesCommitted.Add(float64(scopesTotal)) + h.metrics.resourceUpdatesWALFiltered.Add(float64(resourcesFiltered)) + h.metrics.scopeUpdatesWALFiltered.Add(float64(scopesFiltered)) h.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) h.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) @@ -2298,6 +2593,8 @@ func (a *headAppenderBase) Rollback() (err error) { h.putRefSeriesBuffer(a.seriesRefs) h.putSeriesBuffer(a.series) h.putTypeMap(a.typesInBatch) + a.resourceRefs = nil + a.scopeRefs = nil }() var series *memSeries diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append_v2.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append_v2.go index 87b62df536e..63675e12efa 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append_v2.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append_v2.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" ) // initAppenderV2 is a helper to initialize the time bounds of the head @@ -101,6 +102,12 @@ func (h *Head) appenderV2() *headAppenderV2 { type headAppenderV2 struct { headAppenderBase + + // Cached resource conversion to avoid redundant work when the same + // ResourceContext pointer is used for many series in a batch (e.g. + // histogram buckets and summary quantiles from the same OTLP resource). + cachedResourceCtx *storage.ResourceContext + cachedWALResource *record.RefResource } func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) { @@ -207,14 +214,24 @@ func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t i if metaChanged { b := a.getCurrentBatch(stNone, s.ref) b.metadata = append(b.metadata, record.RefMetadata{ - Ref: s.ref, - Type: record.GetMetricType(opts.Metadata.Type), - Unit: opts.Metadata.Unit, - Help: opts.Metadata.Help, + Ref: s.ref, + Type: record.GetMetricType(opts.Metadata.Type), + Unit: opts.Metadata.Unit, + Help: opts.Metadata.Help, + MinTime: t, + MaxTime: t, }) b.metadataSeries = append(b.metadataSeries, s) } } + if a.head.opts.EnableNativeMetadata { + if opts.Resource != nil { + a.updateResource(s, t, opts.Resource) + } + if opts.Scope != nil { + a.updateScope(s, t, opts.Scope) + } + } return storage.SeriesRef(s.ref), partialErr } @@ -379,4 +396,78 @@ func (a *headAppenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.La } } +// updateResource buffers a resource update for a series if not already done in this batch. +// The actual mutation is deferred to Commit() time. +func (a *headAppenderV2) updateResource(s *memSeries, t int64, rc *storage.ResourceContext) { + if a.resourceRefs == nil { + a.resourceRefs = make(map[chunks.HeadSeriesRef]struct{}) + } + if _, ok := a.resourceRefs[s.ref]; ok { + return + } + + // Cache the converted WAL record per ResourceContext pointer to avoid + // redundant entity conversion and allocations when the same resource is + // applied to many series (e.g. histogram sub-series share one ResourceContext). + if rc != a.cachedResourceCtx { + walEntities := make([]record.RefResourceEntity, len(rc.Entities)) + for i, e := range rc.Entities { + entityType := e.Type + if entityType == "" { + entityType = seriesmetadata.EntityTypeResource + } + walEntities[i] = record.RefResourceEntity{ + Type: entityType, + ID: e.ID, + Description: e.Description, + } + } + a.cachedWALResource = &record.RefResource{ + MinTime: t, + MaxTime: t, + Identifying: rc.Identifying, + Descriptive: rc.Descriptive, + Entities: walEntities, + } + a.cachedResourceCtx = rc + } + + // Buffer the resource update in the current batch. + b := a.getCurrentBatch(stNone, s.ref) + rr := *a.cachedWALResource + rr.Ref = s.ref + rr.MinTime = t + rr.MaxTime = t + b.resources = append(b.resources, rr) + b.resourceSeries = append(b.resourceSeries, s) + + a.resourceRefs[s.ref] = struct{}{} +} + +// updateScope buffers a scope update for a series if not already done in this batch. +// The actual mutation is deferred to Commit() time. +func (a *headAppenderV2) updateScope(s *memSeries, t int64, sc *storage.ScopeContext) { + if a.scopeRefs == nil { + a.scopeRefs = make(map[chunks.HeadSeriesRef]struct{}) + } + if _, ok := a.scopeRefs[s.ref]; ok { + return + } + + // Buffer the scope update in the current batch. + b := a.getCurrentBatch(stNone, s.ref) + b.scopes = append(b.scopes, record.RefScope{ + Ref: s.ref, + MinTime: t, + MaxTime: t, + Name: sc.Name, + Version: sc.Version, + SchemaURL: sc.SchemaURL, + Attrs: sc.Attrs, + }) + b.scopeSeries = append(b.scopeSeries, s) + + a.scopeRefs[s.ref] = struct{}{} +} + var _ storage.GetRef = &headAppenderV2{} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index ab55aed14e0..5fabc3161eb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -85,6 +86,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 var unknownTombstoneRefs atomic.Uint64 + var unknownResourceRefs atomic.Uint64 + var unknownScopeRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -241,6 +244,30 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- meta + case record.ResourceUpdate: + resources := h.wlReplayResourcesPool.Get()[:0] + resources, err = dec.Resources(r.Record(), resources) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode resources: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- resources + case record.ScopeUpdate: + scopes := h.wlReplayScopesPool.Get()[:0] + scopes, err = dec.Scopes(r.Record(), scopes) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode scopes: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- scopes default: // Noop. } @@ -442,13 +469,66 @@ Outer: missingSeries[m.Ref] = struct{}{} continue } + s.Lock() s.meta = &metadata.Metadata{ Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help, } + s.Unlock() } h.wlReplayMetadataPool.Put(v) + case []record.RefResource: + resKind, _ := seriesmetadata.KindByID(seriesmetadata.KindResource) + for _, r := range v { + if ref, ok := multiRef[r.Ref]; ok { + r.Ref = ref + } + s := h.series.getByID(r.Ref) + if s == nil { + unknownResourceRefs.Inc() + missingSeries[r.Ref] = struct{}{} + continue + } + s.Lock() + resKind.CommitToSeries(s, seriesmetadata.ResourceCommitData{ + Identifying: r.Identifying, + Descriptive: r.Descriptive, + Entities: refResourceEntitiesToCommitData(r.Entities), + MinTime: r.MinTime, + MaxTime: r.MaxTime, + }) + h.updateSharedMetadata(s, resKind) + h.internSeriesResource(s) + s.Unlock() + } + h.wlReplayResourcesPool.Put(v) + case []record.RefScope: + scopeKind, _ := seriesmetadata.KindByID(seriesmetadata.KindScope) + for _, sc := range v { + if ref, ok := multiRef[sc.Ref]; ok { + sc.Ref = ref + } + s := h.series.getByID(sc.Ref) + if s == nil { + unknownScopeRefs.Inc() + missingSeries[sc.Ref] = struct{}{} + continue + } + s.Lock() + scopeKind.CommitToSeries(s, seriesmetadata.ScopeCommitData{ + Name: sc.Name, + Version: sc.Version, + SchemaURL: sc.SchemaURL, + Attrs: sc.Attrs, + MinTime: sc.MinTime, + MaxTime: sc.MaxTime, + }) + h.updateSharedMetadata(s, scopeKind) + h.internSeriesScope(s) + s.Unlock() + } + h.wlReplayScopesPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -505,7 +585,7 @@ Outer: h.logger.Warn("Series reported as missing in prior segments but were found in this WAL segment", "count", foundSeriesForPriorSegments, "segment", r.Segment()) } - if unknownSampleRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load()+unknownTombstoneRefs.Load() > 0 { + if unknownSampleRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load()+unknownTombstoneRefs.Load()+unknownResourceRefs.Load()+unknownScopeRefs.Load() > 0 { h.logger.Warn( "Unknown series references", "series", unknownSeriesRefs.count(), @@ -514,6 +594,8 @@ Outer: "histograms", unknownHistogramRefs.Load(), "metadata", unknownMetadataRefs.Load(), "tombstones", unknownTombstoneRefs.Load(), + "resources", unknownResourceRefs.Load(), + "scopes", unknownScopeRefs.Load(), "segment", r.Segment(), ) @@ -552,6 +634,8 @@ Outer: counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownHistogramRefs.Load()), "histograms") counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownMetadataRefs.Load()), "metadata") counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownTombstoneRefs.Load()), "tombstones") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownResourceRefs.Load()), "resources") + counterAddNonZero(h.metrics.walReplayUnknownRefsTotal, float64(unknownScopeRefs.Load()), "scopes") } if count := mmapOverlappingChunks.Load(); count > 0 { h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index 3b04d1070cd..921491d8ee0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -27,12 +27,18 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/util/annotations" ) var _ IndexReader = &HeadAndOOOIndexReader{} +var ( + _ storage.ResourceQuerier = &HeadAndOOOQuerier{} + _ storage.ResourceQuerier = &HeadAndOOOChunkQuerier{} +) + type HeadAndOOOIndexReader struct { *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. inoMint int64 @@ -401,6 +407,12 @@ func (*OOOCompactionHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } +// SeriesMetadata returns series metadata for the OOO compaction head. +// Delegates to the underlying head so OOO-compacted blocks also get metadata. +func (ch *OOOCompactionHead) SeriesMetadata() (seriesmetadata.Reader, error) { + return ch.head.SeriesMetadata() +} + var oooCompactionHeadULID = ulid.MustParse("0000000000XX000COMPACTHEAD") func (ch *OOOCompactionHead) Meta() BlockMeta { @@ -586,6 +598,60 @@ func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints * return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) } +// GetResourceAt implements storage.ResourceQuerier. +func (q *HeadAndOOOQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + reader, err := q.head.SeriesMetadata() + if err != nil { + return nil, false + } + // Note: we don't close the reader here as it's the head's reader + // which is managed by the head itself. + return reader.GetResourceAt(labelsHash, timestamp) +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier. +// Uses the cached UniqueResourceAttrNames when available (O(1)), +// falling back to a full scan otherwise. +func (q *HeadAndOOOQuerier) IterUniqueAttributeNames(fn func(name string)) error { + reader, err := q.head.SeriesMetadata() + if err != nil { + return err + } + // Note: we don't close the reader here as it's the head's reader + // which is managed by the head itself. + + // Fast path: use cached unique names if the reader supports it. + if r, ok := reader.(seriesmetadata.UniqueAttrNameReader); ok { + if names := r.UniqueResourceAttrNames(); names != nil { + for name := range names { + fn(name) + } + return nil + } + } + + // Slow path: full scan (only reached when cache not available). + seen := make(map[string]struct{}) + return reader.IterResources(context.Background(), func(_ uint64, resource *seriesmetadata.ResourceVersion) error { + if resource == nil { + return nil + } + for name := range resource.Identifying { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + for name := range resource.Descriptive { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + return nil + }) +} + // HeadAndOOOChunkQuerier queries both the head and the out-of-order head. type HeadAndOOOChunkQuerier struct { mint, maxt int64 @@ -637,3 +703,51 @@ func (q *HeadAndOOOChunkQuerier) Close() error { func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) } + +// GetResourceAt implements storage.ResourceQuerier. +func (q *HeadAndOOOChunkQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + reader, err := q.head.SeriesMetadata() + if err != nil { + return nil, false + } + return reader.GetResourceAt(labelsHash, timestamp) +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier. +func (q *HeadAndOOOChunkQuerier) IterUniqueAttributeNames(fn func(name string)) error { + reader, err := q.head.SeriesMetadata() + if err != nil { + return err + } + + // Fast path: use cached unique names if the reader supports it. + if r, ok := reader.(seriesmetadata.UniqueAttrNameReader); ok { + if names := r.UniqueResourceAttrNames(); names != nil { + for name := range names { + fn(name) + } + return nil + } + } + + // Slow path: full scan. + seen := make(map[string]struct{}) + return reader.IterResources(context.Background(), func(_ uint64, resource *seriesmetadata.ResourceVersion) error { + if resource == nil { + return nil + } + for name := range resource.Identifying { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + for name := range resource.Descriptive { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + return nil + }) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 681496027dd..f38a93e1469 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -19,6 +19,7 @@ import ( "fmt" "math" "slices" + "sync" "github.com/oklog/ulid/v2" @@ -28,10 +29,13 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/util/annotations" ) +var _ storage.ResourceQuerier = &blockQuerier{} + // checkContextEveryNIterations is used in some tight loops to check if the context is done. const checkContextEveryNIterations = 100 @@ -102,6 +106,10 @@ func (q *blockBaseQuerier) Close() error { type blockQuerier struct { *blockBaseQuerier + block BlockReader // stored for lazy metadata reader access + metadataReader seriesmetadata.Reader // for resource attribute lookups (lazily loaded) + metadataErr error // error from loading metadata reader (if any) + metadataOnce sync.Once // ensures metadata reader is loaded only once } // NewBlockQuerier returns a querier against the block reader and requested min and max time range. @@ -110,13 +118,79 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) { if err != nil { return nil, err } - return &blockQuerier{blockBaseQuerier: q}, nil + // Store block reference for lazy metadata reader loading + return &blockQuerier{blockBaseQuerier: q, block: b}, nil +} + +// getMetadataReader lazily loads the metadata reader on first access. +func (q *blockQuerier) getMetadataReader() (seriesmetadata.Reader, error) { + q.metadataOnce.Do(func() { + if q.block != nil { + q.metadataReader, q.metadataErr = q.block.SeriesMetadata() + } + }) + return q.metadataReader, q.metadataErr } func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt) } +// GetResourceAt implements storage.ResourceQuerier. +func (q *blockQuerier) GetResourceAt(labelsHash uint64, timestamp int64) (*seriesmetadata.ResourceVersion, bool) { + reader, err := q.getMetadataReader() + if reader == nil || err != nil { + return nil, false + } + return reader.GetResourceAt(labelsHash, timestamp) +} + +// IterUniqueAttributeNames implements storage.ResourceQuerier. +func (q *blockQuerier) IterUniqueAttributeNames(fn func(name string)) error { + reader, err := q.getMetadataReader() + if err != nil { + return err + } + if reader == nil { + return nil + } + seen := make(map[string]struct{}) + return reader.IterResources(context.Background(), func(_ uint64, resource *seriesmetadata.ResourceVersion) error { + if resource == nil { + return nil + } + for name := range resource.Identifying { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + for name := range resource.Descriptive { + if _, ok := seen[name]; !ok { + seen[name] = struct{}{} + fn(name) + } + } + return nil + }) +} + +// Close closes the querier and releases resources. +func (q *blockQuerier) Close() error { + // Check if already closed to avoid double-closing the metadata reader + // which would cause a negative WaitGroup counter. + if q.closed { + return errors.New("block querier already closed") + } + err := q.blockBaseQuerier.Close() + if q.metadataReader != nil { + if closeErr := q.metadataReader.Close(); closeErr != nil && err == nil { + err = closeErr + } + } + return err +} + func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, ix IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64, ) storage.SeriesSet { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 106b8e51bcb..58065b5bdda 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -20,6 +20,8 @@ import ( "fmt" "log/slog" "math" + "slices" + "strconv" "unsafe" "github.com/prometheus/common/model" @@ -58,6 +60,10 @@ const ( CustomBucketsHistogramSamples Type = 9 // CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. CustomBucketsFloatHistogramSamples Type = 10 + // ResourceUpdate is used to match WAL records of type ResourceUpdate. + ResourceUpdate Type = 11 + // ScopeUpdate is used to match WAL records of type ScopeUpdate. + ScopeUpdate Type = 12 ) func (rt Type) String() string { @@ -82,6 +88,10 @@ func (rt Type) String() string { return "mmapmarkers" case Metadata: return "metadata" + case ResourceUpdate: + return "resource_update" + case ScopeUpdate: + return "scope_update" default: return "unknown" } @@ -144,8 +154,10 @@ func ToMetricType(m uint8) model.MetricType { } const ( - unitMetaName = "UNIT" - helpMetaName = "HELP" + unitMetaName = "UNIT" + helpMetaName = "HELP" + minTimeMetaName = "MINT" + maxTimeMetaName = "MAXT" ) // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. @@ -167,10 +179,12 @@ type RefSample struct { // RefMetadata is the metadata associated with a series ID. type RefMetadata struct { - Ref chunks.HeadSeriesRef - Type uint8 - Unit string - Help string + Ref chunks.HeadSeriesRef + Type uint8 + Unit string + Help string + MinTime int64 // Timestamp of the earliest sample in the batch that contributed this metadata. + MaxTime int64 // Timestamp of the latest sample in the batch that contributed this metadata. } // RefExemplar is an exemplar with the labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. @@ -201,6 +215,34 @@ type RefMmapMarker struct { MmapRef chunks.ChunkDiskMapperRef } +// RefResourceEntity represents a single entity in a WAL resource record. +type RefResourceEntity struct { + Type string + ID map[string]string + Description map[string]string +} + +// RefResource is a resource update associated with a series ref. +type RefResource struct { + Ref chunks.HeadSeriesRef + MinTime int64 + MaxTime int64 + Identifying map[string]string + Descriptive map[string]string + Entities []RefResourceEntity +} + +// RefScope is a scope update associated with a series ref. +type RefScope struct { + Ref chunks.HeadSeriesRef + MinTime int64 + MaxTime int64 + Name string + Version string + SchemaURL string + Attrs map[string]string +} + // Decoder decodes series, sample, metadata and tombstone records. type Decoder struct { builder labels.ScratchBuilder @@ -220,7 +262,7 @@ func (*Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples, ResourceUpdate, ScopeUpdate: return t } return Unknown @@ -263,10 +305,10 @@ func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, err typ := dec.Byte() numFields := dec.Uvarint() - // We're currently aware of two more metadata fields other than TYPE; that is UNIT and HELP. - // We can skip the rest of the fields (if we encounter any), but we must decode them anyway - // so we can correctly align with the start with the next metadata record. + // Decode all named fields. Known fields: UNIT, HELP, MINT, MAXT. + // Unknown fields are skipped for forward compatibility. var unit, help string + var minTime, maxTime int64 for range numFields { fieldName := dec.UvarintStr() fieldValue := dec.UvarintStr() @@ -275,14 +317,20 @@ func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, err unit = fieldValue case helpMetaName: help = fieldValue + case minTimeMetaName: + minTime, _ = strconv.ParseInt(fieldValue, 10, 64) + case maxTimeMetaName: + maxTime, _ = strconv.ParseInt(fieldValue, 10, 64) } } metadata = append(metadata, RefMetadata{ - Ref: chunks.HeadSeriesRef(ref), - Type: typ, - Unit: unit, - Help: help, + Ref: chunks.HeadSeriesRef(ref), + Type: typ, + Unit: unit, + Help: help, + MinTime: minTime, + MaxTime: maxTime, }) } if dec.Err() != nil { @@ -294,6 +342,95 @@ func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, err return metadata, nil } +// decodeMap reads a varint-length-prefixed map of string pairs from the decoder. +func decodeMap(dec *encoding.Decbuf) map[string]string { + n := dec.Uvarint() + if n == 0 { + return nil + } + m := make(map[string]string, n) + for range n { + k := dec.UvarintStr() + v := dec.UvarintStr() + m[k] = v + } + return m +} + +// Resources decodes resource updates from the WAL record. +func (*Decoder) Resources(rec []byte, resources []RefResource) ([]RefResource, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != ResourceUpdate { + return nil, errors.New("invalid record type") + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := dec.Uvarint64() + minTime := dec.Varint64() + maxTime := dec.Varint64() + identifying := decodeMap(&dec) + descriptive := decodeMap(&dec) + numEntities := dec.Uvarint() + entities := make([]RefResourceEntity, numEntities) + for i := range numEntities { + entities[i] = RefResourceEntity{ + Type: dec.UvarintStr(), + ID: decodeMap(&dec), + Description: decodeMap(&dec), + } + } + resources = append(resources, RefResource{ + Ref: chunks.HeadSeriesRef(ref), + MinTime: minTime, + MaxTime: maxTime, + Identifying: identifying, + Descriptive: descriptive, + Entities: entities, + }) + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return resources, nil +} + +// Scopes decodes scope updates from the WAL record. +func (*Decoder) Scopes(rec []byte, scopes []RefScope) ([]RefScope, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != ScopeUpdate { + return nil, errors.New("invalid record type") + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := dec.Uvarint64() + minTime := dec.Varint64() + maxTime := dec.Varint64() + name := dec.UvarintStr() + version := dec.UvarintStr() + schemaURL := dec.UvarintStr() + attrs := decodeMap(&dec) + scopes = append(scopes, RefScope{ + Ref: chunks.HeadSeriesRef(ref), + MinTime: minTime, + MaxTime: maxTime, + Name: name, + Version: version, + SchemaURL: schemaURL, + Attrs: attrs, + }) + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return scopes, nil +} + func yoloString(b []byte) string { return unsafe.String(unsafe.SliceData(b), len(b)) } @@ -680,11 +817,74 @@ func (*Encoder) Metadata(metadata []RefMetadata, b []byte) []byte { buf.PutByte(m.Type) - buf.PutUvarint(2) // num_fields: We currently have two more metadata fields, UNIT and HELP. + buf.PutUvarint(4) // num_fields: UNIT, HELP, MINT, MAXT. buf.PutUvarintStr(unitMetaName) buf.PutUvarintStr(m.Unit) buf.PutUvarintStr(helpMetaName) buf.PutUvarintStr(m.Help) + buf.PutUvarintStr(minTimeMetaName) + buf.PutUvarintStr(strconv.FormatInt(m.MinTime, 10)) + buf.PutUvarintStr(maxTimeMetaName) + buf.PutUvarintStr(strconv.FormatInt(m.MaxTime, 10)) + } + + return buf.Get() +} + +// encodeMap writes a varint-length-prefixed map of string pairs to the encoder. +// Keys are sorted for deterministic encoding. The keys buffer is reused across +// calls to avoid allocation per map encode. +func encodeMap(buf *encoding.Encbuf, m map[string]string, keys []string) []string { + buf.PutUvarint(len(m)) + keys = keys[:0] + for k := range m { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + buf.PutUvarintStr(k) + buf.PutUvarintStr(m[k]) + } + return keys +} + +// Resources appends the encoded resource updates to b and returns the resulting slice. +func (*Encoder) Resources(resources []RefResource, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(ResourceUpdate)) + + var keysBuf []string + for _, r := range resources { + buf.PutUvarint64(uint64(r.Ref)) + buf.PutVarint64(r.MinTime) + buf.PutVarint64(r.MaxTime) + keysBuf = encodeMap(&buf, r.Identifying, keysBuf) + keysBuf = encodeMap(&buf, r.Descriptive, keysBuf) + buf.PutUvarint(len(r.Entities)) + for _, e := range r.Entities { + buf.PutUvarintStr(e.Type) + keysBuf = encodeMap(&buf, e.ID, keysBuf) + keysBuf = encodeMap(&buf, e.Description, keysBuf) + } + } + + return buf.Get() +} + +// Scopes appends the encoded scope updates to b and returns the resulting slice. +func (*Encoder) Scopes(scopes []RefScope, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(ScopeUpdate)) + + var keysBuf []string + for _, s := range scopes { + buf.PutUvarint64(uint64(s.Ref)) + buf.PutVarint64(s.MinTime) + buf.PutVarint64(s.MaxTime) + buf.PutUvarintStr(s.Name) + buf.PutUvarintStr(s.Version) + buf.PutUvarintStr(s.SchemaURL) + keysBuf = encodeMap(&buf, s.Attrs, keysBuf) } return buf.Get() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/README.md b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/README.md new file mode 100644 index 00000000000..1277b2269d5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/README.md @@ -0,0 +1,377 @@ +# tsdb/seriesmetadata + +This package persists OTel (OpenTelemetry) resource attributes, instrumentation scopes, and entities alongside Prometheus TSDB blocks. Data is stored in a Parquet sidecar file (`series_metadata.parquet`) within each block directory. + +Enabled via `--enable-feature=native-metadata`. + +## Overview + +Each time series in Prometheus can have associated OTel metadata: + +- **Resource attributes**: identifying (e.g. `service.name`) and descriptive (e.g. `host.name`) attributes from the OTel Resource +- **Entities**: typed OTel entities (service, host, container, etc.) with their own ID and description attributes, embedded within resource versions +- **Scopes**: OTel InstrumentationScope data (library name, version, schema URL, custom attributes) + +All metadata is **versioned over time** per series. When a descriptive attribute changes (e.g. a service migrates to a new host), a new version is created with its own time range. Identifying attributes remain constant across versions. + +## Architecture + +``` + ┌─────────────────────┐ + │ OTLP Ingestion │ + │ (CombinedAppender) │ + └─────────┬───────────┘ + │ UpdateResource() / UpdateScope() + ▼ + ┌─────────────────────┐ + │ TSDB Head Block │ + │ (memSeries) │ + │ *nativeMeta │ + └─────────┬───────────┘ + │ Compaction + ▼ +┌──────────────────────────────────────────────────────────┐ +│ Parquet Sidecar File │ +│ series_metadata.parquet │ +│ │ +│ ┌──────────────┐ ┌──────────────────┐ │ +│ │resource_table│ │resource_mapping │ │ +│ │ (deduplicated│ │ (series_ref → │ │ +│ │ content) │ │ content_hash + │ │ +│ │ │ │ time range) │ │ +│ └──────────────┘ └──────────────────┘ │ +│ ┌──────────────┐ ┌──────────────────┐ │ +│ │ scope_table │ │ scope_mapping │ │ +│ └──────────────┘ └──────────────────┘ │ +│ ┌────────────────────────────────────┐ │ +│ │ resource_attr_index (optional) │ │ +│ │ (inverted index: attr → series) │ │ +│ └────────────────────────────────────┘ │ +└──────────────────────────────────────────────────────────┘ + │ + ▼ + ┌──────────────────────────────┐ + │ Query / API │ + │ info(), /resources, │ + │ /resources/series │ + └──────────────────────────────┘ +``` + +The `/resources/series` reverse-lookup endpoint only supports filtering by resource attributes (`resource.attr=key:value`). Scope and entity filters are intentionally excluded as an architectural choice for simplicity — matched series include their scope versions in the response as supplementary data but scopes are not filterable. + +## Kind Framework + +The metadata subsystem uses a **kind framework** to handle different metadata types (resources, scopes) generically. This avoids duplicating nearly identical code at every layer (WAL, Parquet, head commit/replay, compaction, DB merge). + +### Architecture + +- **Go generics** for type-safe hot paths: `Versioned[V]` (versioned container), `MemStore[V]` (in-memory store), `KindOps[V]` (equality/copy operations) +- **`KindDescriptor` interface** for runtime dispatch at serialization boundaries: WAL encode/decode, Parquet conversion, head commit, store operations. Methods use `any` for type erasure since the registry is not generic. +- **Kind registry** with lookup by `KindID`, WAL record type, table namespace, and mapping namespace. Each kind registers itself in an `init()` function. + +Adding a new metadata kind requires: (1) define the version struct, (2) implement `KindOps` and `KindDescriptor`, (3) register in `init()`. The framework layers (stores, Parquet normalization, WAL dispatch, head commit/replay, compaction merge, DB merge) all work automatically via the registry. + +### Import Cycle Avoidance + +`seriesmetadata` cannot import `tsdb/record` (which would create a cycle). The registry defines `WALRecordType = uint8` and each kind descriptor uses pluggable function variables (`ResourceDecodeWAL`, `ScopeDecodeWAL`, etc.) that are set by the `tsdb` package during init. + +## In-Memory Model + +Data is stored per-series, keyed by `labels.StableHash` (a 64-bit hash of the series' label set). + +### Generic Containers + +`Versioned[V]` is a generic container holding `[]V` ordered by MinTime ascending. Type aliases provide backward compatibility: + +- `VersionedResource = Versioned[*ResourceVersion]` → `[]*ResourceVersion` + - Each `ResourceVersion` has: `Identifying` attrs, `Descriptive` attrs, `[]*Entity`, `MinTime`, `MaxTime` +- `VersionedScope = Versioned[*ScopeVersion]` → `[]*ScopeVersion` + - Each `ScopeVersion` has: `Name`, `Version`, `SchemaURL`, `Attrs`, `MinTime`, `MaxTime` + +### Generic Stores + +`MemStore[V]` is a 256-way sharded generic in-memory store (matching the `stripeSeries` pattern in `tsdb/head.go`). Each shard has its own `sync.RWMutex` and map with 40-byte cache-line padding to prevent false sharing. Sharding is by `labelsHash & 0xFF`. When the `KindOps` also implements `ContentDedupOps[V]` (both `resourceOps` and `scopeOps` do), MemStore maintains a content-addressed dedup table so that versions with identical content share map/slice pointers from a single canonical entry (see Content-Addressed Dedup below). Type aliases provide backward compatibility: + +| Alias | Underlying Type | Key | Value | +|-------|-----------------|-----|-------| +| `MemResourceStore` | `MemStore[*ResourceVersion]` | labelsHash | `*Versioned[*ResourceVersion]` | +| `MemScopeStore` | `MemStore[*ScopeVersion]` | labelsHash | `*Versioned[*ScopeVersion]` | + +`MemSeriesMetadata` wraps a `map[KindID]any` (each value is a `*MemStore[V]` for the appropriate V) and implements the `Reader` interface. It provides `StoreForKind(id)` for generic access and type-safe accessors `ResourceStore()` / `ScopeStore()`. + +### Resource Attribute Inverted Index + +`MemSeriesMetadata` supports an optional 256-way sharded inverted index (`shardedAttrIndex`) for O(1) reverse lookup by resource attribute key:value pairs. Each stripe has its own `sync.RWMutex` and map, with keys routed by `xxhash.Sum64String("key\x00value") & 0xFF`. Within each stripe, values are sorted `[]uint64` of labelsHashes, using copy-on-write sorted slices (~4x memory reduction vs maps) that enable zero-copy reads. Lock ordering: `indexedResourceAttrsMu.RLock()` (for config) → per-stripe `mtx.Lock()` (never hold two stripe locks simultaneously). + +**Enable/disable**: The entire inverted index can be disabled via `tsdb.Options.EnableResourceAttrIndex` (default `true`). When disabled: `Head.Init()` skips `InitResourceAttrIndex()`, `DB.mergeBlockMetadata()` skips `BuildResourceAttrIndex()`, and compaction writes no `resource_attr_index` rows. The `UniqueResourceAttrNames()` cache still works — `UpdateResourceAttrIndex()` tracks attribute names before the nil guard, so autocomplete is available even without the index. Mimir uses this to disable the index per-tenant when Parquet-native filtering on `attr_key`/`attr_value` columns is preferred. + +**Selective indexing**: The index uses selective attribute indexing to control its size: +- **Identifying attributes** (from `ResourceVersion.Identifying`, e.g. `service.name`, `service.namespace`, `service.instance.id`) are **always** indexed +- **Descriptive attributes** (from `ResourceVersion.Descriptive`) are only indexed if their key is in `indexedResourceAttrs` — a configurable set passed via `SetIndexedResourceAttrs()`, sourced from `tsdb.Options.IndexedResourceAttrs` +- Default (nil `indexedResourceAttrs`) means only identifying attributes are indexed, reducing index size by ~10x at scale + +This filtering applies consistently across all index operations: `BuildResourceAttrIndex()`, `UpdateResourceAttrIndex()`, `RemoveFromResourceAttrIndex()`, and Parquet `buildResourceAttrIndexRows()`. + +- **`BuildResourceAttrIndex()`**: Iterates all resource versions once and populates the index. Called by `DB.buildSeriesMetadata()` after merging blocks + head. Since the merged reader is cached for 30 seconds, the index build cost is amortized across many API requests. +- **`LookupResourceAttr(key, value)`**: Returns a sorted `[]uint64` of labelsHashes matching the given attribute, or nil if the index has not been built. The returned slice must not be modified (COW guarantees safety for concurrent readers). The `/resources/series` handler intersects candidate slices from multiple filters using sorted two-pointer intersection, then verifies each candidate with `GetVersionedResource` + time range + attribute checks. Falls back to a full `IterVersionedResources` scan if the index is nil. + +The index is **time-unaware** — it includes all labelsHashes that have *any* version with the attribute. Time-range filtering happens during the verification step after index lookup. This is a deliberate trade-off: the index stays simple and the handler already performs per-version time filtering. + +### Head Storage + +On `memSeries`, OTel metadata is stored behind a `*nativeMeta` pointer (nil when native metadata is not in use, saving 24 bytes per series vs inline fields). The `nativeMeta` struct holds `stableHash uint64` (cached `labels.StableHash`, computed lazily on first metadata commit) and `kindMeta []kindMetaEntry` where each entry is `{kind KindID, data any}`. Linear scan of 0-2 entries is faster than map lookup. The `kindMetaAccessor` interface (`GetKindMeta`/`SetKindMeta`) provides kind-generic access; `SetKindMeta` lazy-allocates `nativeMeta`. + +The `Head` also maintains a shared `*MemSeriesMetadata` (`seriesMeta`) that is incrementally updated during `commitResources()`/`commitScopes()` and WAL replay via `updateSharedMetadata()`. This avoids an O(ALL_SERIES) scan that would otherwise be required to collect metadata across all shards. `Head.SeriesMetadata()` returns an O(1) `headMetadataReader` wrapper around this live store instead of scanning. + +The head does **not** populate `seriesMeta.labelsMap` — labels are resolved on-demand via `stripeSeries.getByID` in `headMetadataReader.LabelsForHash`. This saves ~3GB at 10M series by avoiding label set duplication. + +Two sharded stripe arrays (`metaRefStripes` and `metaHashStripes`, 256-way) track `HeadSeriesRef ↔ labelsHash` mappings for GC cleanup and label resolution. Lock ordering: series lock → stripe lock. `MemStore[V]` operations are internally concurrent-safe via their own mutexes. + +### Versioning + +`AddOrExtend(ops, version)` on `Versioned[V]` handles ingestion: +- If the latest version's content matches (via `KindOps.Equal`), extends `MaxTime` (same content, later timestamp) +- If content differs, appends a new version (attributes changed) + +### Content-Addressed Dedup + +When many series share the same OTel resource or scope (common at scale — e.g. 30M series sharing ~1K unique resources), the denormalized per-series storage wastes memory: each series holds a deep-copied version, and the shared `MemStore` holds another. Content-addressed dedup eliminates this by sharing map/slice pointers from a single canonical entry. + +**Interface**: `ContentDedupOps[V]` is an optional extension of `KindOps[V]`, detected via type assertion. Both `resourceOps` and `scopeOps` implement it. + +```go +type ContentDedupOps[V VersionConstraint] interface { + ContentHash(v V) uint64 // deterministic xxhash of immutable content fields + ThinCopy(canonical, v V) V // new V sharing canonical's maps, v's time range +} +``` + +**Content table**: A separate 256-way sharded map (`contentHash → V`) stores one canonical (deep-copied) version per unique content. `getOrCreateCanonical` uses double-checked locking (RLock → Lock on miss). Created lazily only when ops implements `ContentDedupOps`. + +**Interning**: All `MemStore` methods that store deep copies call `internVersions` (or `internLastVersion`) after the copy/merge, replacing each version with a thin copy sharing the canonical's maps. The `SetVersionedWithDiff` fast path (time-range extension only) skips interning since no new version is created. `InternVersion(v)` is public for per-series interning from outside `MemStore`. + +**Per-series interning**: After `CommitResourceDirect`/`CommitScopeDirect` creates a deep-copied version on the series, `Head.internSeriesResource()`/`internSeriesScope()` replaces its maps with canonical pointers. Same in WAL replay after `CommitToSeries`. + +**Memory impact** (30M series, 1K unique resources): per-version drops from ~1500B (deep copy with maps) to ~72B (thin copy struct with shared pointers). Canonicals are never deleted — at 1K unique resources × 1500B = 1.5MB, negligible. + +## Parquet File Format + +### Content-Addressed Normalization + +The file eliminates cross-series duplication using a **table + mapping** pattern. Many series sharing the same OTel resource produce **1 table row + N mapping rows** instead of N full copies. + +Content is keyed by `xxhash.Sum64` of sorted attributes (deterministic regardless of map iteration order). The hash covers all content fields but excludes time ranges. + +### Namespace Types + +Each row has a `namespace` discriminator field: + +| Namespace | Purpose | Key Fields Used | +|-----------|---------|-----------------| +| `resource_table` | Unique resource content | `content_hash`, `identifying_attrs`, `descriptive_attrs`, `entities` | +| `resource_mapping` | Series → resource + time range | `series_ref`, `content_hash`, `mint`, `maxt` | +| `scope_table` | Unique scope content | `content_hash`, `scope_name`, `scope_version_str`, `schema_url`, `scope_attrs` | +| `scope_mapping` | Series → scope + time range | `series_ref`, `content_hash`, `mint`, `maxt` | +| `resource_attr_index` | Inverted index: attr → series | `series_ref`, `content_hash`, `identifying_attrs[0]`, `attr_key`, `attr_value` | + +### Parquet Schema + +A single `metadataRow` struct covers all four namespace types. Fields unused by a namespace are zero-valued. + +``` +metadataRow +├── namespace string (row type discriminator) +├── series_ref uint64 (series identifier for mapping rows) +├── mint int64? (minimum timestamp, milliseconds) +├── maxt int64? (maximum timestamp, milliseconds) +├── content_hash uint64? (xxhash content key) +├── identifying_attrs list? (resource identifying attributes) +│ └── element +│ ├── key string +│ └── value string +├── descriptive_attrs list? (resource descriptive attributes) +│ └── element +│ ├── key string +│ └── value string +├── entities list? (typed OTel entities) +│ └── element +│ ├── type string +│ ├── id list (entity identifying attributes) +│ │ └── element +│ │ ├── key string +│ │ └── value string +│ └── description list (entity descriptive attributes) +│ └── element +│ ├── key string +│ └── value string +├── scope_name string? (InstrumentationScope name) +├── scope_version_str string? (InstrumentationScope version) +├── schema_url string? (InstrumentationScope schema URL) +├── scope_attrs list? (InstrumentationScope attributes) +│ └── element +│ ├── key string +│ └── value string +├── attr_key string? (resource_attr_index: attribute name) +└── attr_value string? (resource_attr_index: attribute value) +``` + +### SeriesRef and the Resolver Pattern + +Mapping rows store `series_ref` — the block-level series reference (a small integer from the block's index). The in-memory model uses `labelsHash` (a stable 64-bit hash of the series' labels). Conversion between the two happens at the Parquet boundary: + +- **Write path**: `WriterOptions.RefResolver` converts `labelsHash → seriesRef`. During compaction, this resolver is built by scanning the new block's postings. When no resolver is provided (head/test writes), `labelsHash` is written directly as `series_ref`. +- **Read path**: `WithRefResolver` reader option converts `seriesRef → labelsHash`. During block open, this resolver calls the block's index reader to look up the series' labels and compute `labels.StableHash()`. When no resolver is provided, `series_ref` is used as-is. + +### Row Groups and Physical Layout + +Each namespace is written as a separate row group (or multiple row groups if `MaxRowsPerRowGroup` is set). This enables selective reads — a reader can skip entire row groups based on namespace column statistics. + +Write order (alphabetical by namespace value): `resource_mapping`, `resource_table`, `scope_mapping`, `scope_table`, then `resource_attr_index` (when enabled). + +Within each namespace, rows are sorted by `(series_ref, content_hash, mint)` for better zstd compression. + +### Footer Metadata + +Parquet footer key-value pairs: + +| Key | Description | +|-----|-------------| +| `schema_version` | Currently `"1"` | +| `resource_table_count` | Number of unique resource content rows | +| `resource_mapping_count` | Number of series→resource mapping rows | +| `scope_table_count` | Number of unique scope content rows | +| `scope_mapping_count` | Number of series→scope mapping rows | +| `resource_attr_index_count` | Number of inverted index rows (when enabled) | +| `row_group_layout` | `"namespace_partitioned"` | + +### Compression + +All data is zstd-compressed at `SpeedBetterCompression` level. Typical file sizes are kilobytes to low megabytes. + +## API + +### Writing + +```go +// Simple write (no resolver, labelsHash stored as series_ref) +size, err := WriteFile(logger, blockDir, reader) + +// With options (compaction uses this) +stats := &seriesmetadata.WriteStats{} +size, err := WriteFileWithOptions(logger, blockDir, reader, WriterOptions{ + RefResolver: func(labelsHash uint64) (seriesRef uint64, ok bool) { ... }, + MaxRowsPerRowGroup: 10000, + BloomFilterFormat: BloomFilterParquetNative, + EnableInvertedIndex: true, + IndexedResourceAttrs: map[string]struct{}{"k8s.namespace.name": {}}, + WriteStats: stats, +}) +// stats.NamespaceRowCounts now contains per-namespace row counts +``` + +### Reading + +```go +// From file path (returns empty reader if file doesn't exist) +reader, size, err := ReadSeriesMetadata(logger, blockDir, + WithRefResolver(func(seriesRef uint64) (labelsHash uint64, ok bool) { ... }), +) +defer reader.Close() + +// From io.ReaderAt (for object storage / distributed systems) +reader, err := ReadSeriesMetadataFromReaderAt(logger, readerAt, size, + WithNamespaceFilter("resource_table", "resource_mapping"), + WithRefResolver(resolver), +) +``` + +### Querying + +```go +// Get latest resource for a series +rv, ok := reader.GetResource(labelsHash) + +// Get resource active at a specific timestamp +rv, ok := reader.GetResourceAt(labelsHash, timestampMs) + +// Get all versions +vr, ok := reader.GetVersionedResource(labelsHash) +for _, version := range vr.Versions { + fmt.Println(version.MinTime, version.Identifying, version.Descriptive) +} + +// Iterate all series (type-safe) +reader.IterVersionedResources(ctx, func(labelsHash uint64, vr *VersionedResource) error { + // ... + return nil +}) + +// Iterate via kind framework (generic) +reader.IterKind(ctx, KindResource, func(labelsHash uint64, versioned any) error { + vr := versioned.(*VersionedResource) + // ... + return nil +}) + +// Reverse lookup: find series by resource attribute (O(1) with index) +hashes := reader.LookupResourceAttr("service.name", "payment-service") +for _, hash := range hashes { + vr, _ := reader.GetVersionedResource(hash) + // verify time range / additional filters... +} +``` + +## Entities + +Entities represent typed OTel resources within a `ResourceVersion`. Seven predefined types: + +| Entity Type | Example Identifying Attributes | Example Descriptive Attributes | +|-------------|-------------------------------|-------------------------------| +| `service` | `service.name`, `service.namespace`, `service.instance.id` | `deployment.environment` | +| `host` | `host.name` | `cloud.region`, `cloud.provider` | +| `container` | `container.id` | `container.image.name` | +| `k8s.pod` | `k8s.pod.uid` | `k8s.pod.name` | +| `k8s.node` | `k8s.node.uid` | `k8s.node.name` | +| `process` | `process.pid` | `process.command` | +| `resource` | (default type) | (all non-identifying attributes) | + +Entities are derived from OTel resource attributes using `entity.ResourceEntityRefs()` from the xpdata package during OTLP ingestion. + +## Distributed-Scale Features + +Several features are designed for object-storage access patterns in clustered implementations (e.g. Grafana Mimir store-gateway and ingesters): + +- **`io.ReaderAt` API**: `ReadSeriesMetadataFromReaderAt()` decouples from `*os.File`, enabling `objstore.Bucket`-backed readers +- **Namespace filtering**: `WithNamespaceFilter()` skips non-matching row groups using Parquet column index min/max bounds +- **Bloom filters**: `WriterOptions.BloomFilterFormat` controls bloom filter generation. `BloomFilterParquetNative` embeds split-block bloom filters on `series_ref`, `content_hash`, `attr_key`, and `attr_value` columns. Write-only in this package; querying happens in the consumer. The `attr_key`/`attr_value` bloom filters enable store-gateways to find matching `resource_attr_index` rows via Parquet-native filtering. `BloomFilterSidecar` is reserved for future use (separate file for independent store-gateway caching) +- **Configurable inverted index**: `tsdb.Options.EnableResourceAttrIndex` (default `true`) controls whether the inverted index is built at all. When `false`, no index is built in memory (head skips `InitResourceAttrIndex()`), no `resource_attr_index` rows are written to Parquet (compactor sets `WriterOptions.EnableInvertedIndex` from this flag), and `DB.mergeBlockMetadata()` skips `BuildResourceAttrIndex()`. `UniqueResourceAttrNames()` still works — attr name tracking is decoupled from the index. Mimir uses this to disable the index per-tenant (~12 GB savings per block at 1B series) when Parquet-native filtering on `attr_key`/`attr_value` bloom filters is preferred +- **Selective resource attribute indexing**: `WriterOptions.IndexedResourceAttrs` controls which descriptive attributes appear in the inverted index. Identifying attributes are always indexed. This reduces index size by ~10x at scale +- **Row group size limits**: `WriterOptions.MaxRowsPerRowGroup` bounds memory usage when reading large row groups +- **`BlockSeriesMetadata` in `meta.json`**: After compaction, `BlockMeta.SeriesMetadata` records namespace row counts and indexed resource attribute names, enabling Mimir store-gateway to pre-plan queries without opening the Parquet file +- **`WriteStats`**: `WriterOptions.WriteStats` is populated after a successful write with per-namespace row counts, allowing the caller to capture stats (e.g. for `BlockMeta`) without parsing the Parquet footer +- **Per-tenant `IndexedResourceAttrs`**: `Head.SetIndexedResourceAttrs()` allows runtime reconfiguration of which descriptive attributes are indexed, enabling Mimir ingesters to apply per-tenant overrides +- **Direct commit functions**: `CommitResourceDirect()` and `CommitScopeDirect()` are called directly on the hot ingestion path with typed arguments (bypassing `interface{}` boxing). They use single-copy ownership: `maps.Clone` once from caller buffers, construct the version struct, and inline `AddOrExtend` logic — no redundant deep copies via `NewResourceVersion`/`copyResourceVersion`. `KindDescriptor.CommitToSeries()` (WAL replay) delegates to the same Direct functions after type-asserting `any` arguments. `CollectResourceDirect()` and `CollectScopeDirect()` provide the same boxing-avoidance for the collect path +- **Unique attribute name cache**: `UniqueAttrNameReader` interface (via type assertion) provides O(1) access to the set of all resource attribute names, avoiding O(N_series) full scans for attribute name discovery +- **`SetVersionedWithDiff` fast-path**: When the incoming `Versioned` has a single version that equals the existing current version (~90% of steady-state commits), `UpdateTimeRange` extends in-place with zero allocations, skipping `MergeVersioned` (~12 allocs) +- **In-memory content-addressed dedup**: `MemStore[V]` uses `ContentDedupOps[V]` to share map/slice pointers across versions with identical content. Per-version memory drops from ~1500B to ~72B. `InternVersion()` is public for per-series interning from head commit and WAL replay paths +- **WAL checkpoint content dedup**: Both resources and scopes use content-addressed dedup (`contentMapping` type) in WAL checkpoints, reducing memory from O(N_series × content_size) to O(N_unique_content + N_series × 24B) +- **Chunked checkpoint flushing**: WAL checkpoint resource and scope records are flushed in 10K-record chunks instead of materializing all records into a single slice, bounding peak allocation to ~2 MB per chunk + +## File Organization + +| File | Contents | +|------|----------| +| `seriesmetadata.go` | Core types (`Reader`, `MemSeriesMetadata`, `parquetReader`), write/read paths, denormalization, resource attribute inverted index | +| `versioned.go` | Generic `Versioned[V]` container, `VersionConstraint` interface, `KindOps[V]`, `ContentDedupOps[V]`, `MergeVersioned()` | +| `mem_store.go` | Generic `MemStore[V]` 256-way sharded in-memory store with content-addressed dedup | +| `registry.go` | `KindDescriptor` interface, `KindID`, global kind registry, `kindMetaAccessor` | +| `resource_kind.go` | `resourceKindDescriptor` (implements `KindDescriptor` for resources), `ResourceOps` (implements `KindOps` + `ContentDedupOps`), `ResourceCommitData`, `CommitResourceDirect()`, `CollectResourceDirect()`, content hashing | +| `scope_kind.go` | `scopeKindDescriptor` (implements `KindDescriptor` for scopes), `ScopeOps` (implements `KindOps` + `ContentDedupOps`), `ScopeCommitData`, `CommitScopeDirect()`, `CollectScopeDirect()`, content hashing | +| `parquet_schema.go` | Parquet schema (`metadataRow`, `EntityRow`, `EntityAttributeEntry`), namespace constants | +| `entity.go` | `Entity`, `ResourceVersion`, type aliases (`VersionedResource`, `MemResourceStore`, `VersionedResourceReader`) | +| `scope.go` | `ScopeVersion`, type aliases (`VersionedScope`, `MemScopeStore`, `VersionedScopeReader`) | +| `content_hash.go` | Shared `hashAttrs()` utility for deterministic xxhash of attribute maps | +| `resource_attributes.go` | `SplitAttributes()`, `IsIdentifyingAttribute()`, `AttributesEqual()` | +| `writer_options.go` | `WriterOptions` (`BloomFilterFormat`, row group limits, `IndexedResourceAttrs`, `RefResolver`, `WriteStats`) | +| `reader_options.go` | `ReaderOption`, `WithNamespaceFilter()`, `WithRefResolver()`, `ReadSeriesMetadataFromReaderAt()` | +| `layered_reader.go` | `layeredMetadataReader` (combines base block reader + head reader with dedup), `MergeSortedUnique()` | diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_openbsd.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/content_hash.go similarity index 54% rename from vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_openbsd.go rename to vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/content_hash.go index ce9aa181e63..8933693214d 100644 --- a/vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_openbsd.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/content_hash.go @@ -11,15 +11,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build openbsd - -package runtime +package seriesmetadata import ( - "syscall" + "slices" + + "github.com/cespare/xxhash/v2" ) -// VMLimits returns the soft and hard limits for virtual memory. -func VMLimits() string { - return getLimits(syscall.RLIMIT_DATA, "b") +// hashAttrs writes a deterministic representation of a string map into a hash. +// Keys are sorted before hashing for determinism. +func hashAttrs(h *xxhash.Digest, attrs map[string]string) { + keys := make([]string, 0, len(attrs)) + for k := range attrs { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + _, _ = h.WriteString(k) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(attrs[k]) + _, _ = h.Write([]byte{0}) + } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/entity.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/entity.go new file mode 100644 index 00000000000..c7e268399aa --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/entity.go @@ -0,0 +1,335 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "context" + "errors" + "maps" + "slices" + + "github.com/grafana/regexp" +) + +// Entity type constants aligned with OTel semantic conventions. +const ( + // EntityTypeResource is the default entity type for generic OTel resources. + EntityTypeResource = "resource" + // EntityTypeService represents a service entity. + EntityTypeService = "service" + // EntityTypeHost represents a host entity. + EntityTypeHost = "host" + // EntityTypeContainer represents a container entity. + EntityTypeContainer = "container" + // EntityTypeK8sPod represents a Kubernetes pod entity. + EntityTypeK8sPod = "k8s.pod" + // EntityTypeK8sNode represents a Kubernetes node entity. + EntityTypeK8sNode = "k8s.node" + // EntityTypeProcess represents a process entity. + EntityTypeProcess = "process" +) + +// entityTypePattern validates entity type strings. +// Must start with a letter and contain only letters, numbers, dots, underscores, and hyphens. +var entityTypePattern = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9._-]*$`) + +// Entity represents an OTel entity with explicit type and structured attributes. +// Conforms to the OTel Entity Data Model specification. +// An Entity is part of a ResourceVersion and does not have its own time range. +type Entity struct { + // Type defines the entity type (e.g., "service", "host", "container", "resource"). + // MUST NOT be empty. Default is "resource" for backward compatibility. + Type string + + // ID contains identifying attributes that uniquely identify the entity. + // These attributes MUST NOT change during the entity's lifetime. + ID map[string]string + + // Description contains descriptive (non-identifying) attributes. + // These attributes MAY change over the entity's lifetime. + Description map[string]string +} + +// NewEntity creates a new Entity with the given type, identifying and descriptive attributes. +func NewEntity(entityType string, id, description map[string]string) *Entity { + if entityType == "" { + entityType = EntityTypeResource + } + + // Deep copy the maps + idCopy := make(map[string]string, len(id)) + maps.Copy(idCopy, id) + + descCopy := make(map[string]string, len(description)) + maps.Copy(descCopy, description) + + return &Entity{ + Type: entityType, + ID: idCopy, + Description: descCopy, + } +} + +// NewDefaultEntity creates a new Entity with the default "resource" type. +// It uses the hardcoded identifying attributes (service.name, service.namespace, service.instance.id) +// to separate identifying from descriptive attributes. +func NewDefaultEntity(attrs map[string]string) *Entity { + id := make(map[string]string) + description := make(map[string]string) + + for k, v := range attrs { + if IsIdentifyingAttribute(k) { + id[k] = v + } else { + description[k] = v + } + } + + return &Entity{ + Type: EntityTypeResource, + ID: id, + Description: description, + } +} + +// Validate checks if the entity has valid structure. +func (e *Entity) Validate() error { + if e.Type == "" { + return errors.New("entity type must not be empty") + } + if !entityTypePattern.MatchString(e.Type) { + return errors.New("entity type must match pattern [a-zA-Z][a-zA-Z0-9._-]*") + } + return nil +} + +// AllAttributes returns all attributes (both identifying and descriptive) combined. +func (e *Entity) AllAttributes() map[string]string { + result := make(map[string]string, len(e.ID)+len(e.Description)) + maps.Copy(result, e.ID) + maps.Copy(result, e.Description) + return result +} + +// copyEntity creates a deep copy of an Entity. +func copyEntity(e *Entity) *Entity { + var idCopy map[string]string + if e.ID != nil { + idCopy = make(map[string]string, len(e.ID)) + maps.Copy(idCopy, e.ID) + } + + var descCopy map[string]string + if e.Description != nil { + descCopy = make(map[string]string, len(e.Description)) + maps.Copy(descCopy, e.Description) + } + + return &Entity{ + Type: e.Type, + ID: idCopy, + Description: descCopy, + } +} + +// EntitiesEqual compares two entities for equality (type, id, and description). +func EntitiesEqual(a, b *Entity) bool { + if a.Type != b.Type { + return false + } + if !AttributesEqual(a.ID, b.ID) { + return false + } + return AttributesEqual(a.Description, b.Description) +} + +// ResourceVersion represents a snapshot of resource data at a point in time. +// Contains both resource-level attributes and typed entities. +type ResourceVersion struct { + // Identifying contains resource-level identifying attributes. + // These uniquely identify the resource (e.g., service.name, service.namespace). + Identifying map[string]string + + // Descriptive contains resource-level descriptive attributes. + // These provide additional context but don't identify the resource. + Descriptive map[string]string + + // Entities contains typed entities for this resource version. + // Each entity has a unique Type within the version. + Entities []*Entity + + // MinTime is the minimum timestamp (in milliseconds) when this version was observed. + MinTime int64 + // MaxTime is the maximum timestamp (in milliseconds) when this version was observed. + MaxTime int64 +} + +// NewResourceVersion creates a new ResourceVersion with the given attributes, entities and time range. +func NewResourceVersion(identifying, descriptive map[string]string, entities []*Entity, minTime, maxTime int64) *ResourceVersion { + // Deep copy identifying + idCopy := make(map[string]string, len(identifying)) + maps.Copy(idCopy, identifying) + + // Deep copy descriptive + descCopy := make(map[string]string, len(descriptive)) + maps.Copy(descCopy, descriptive) + + // Deep copy the entities + copiedEntities := make([]*Entity, len(entities)) + for i, e := range entities { + copiedEntities[i] = copyEntity(e) + } + // Sort by Type for consistent comparison + slices.SortFunc(copiedEntities, func(a, b *Entity) int { + if a.Type < b.Type { + return -1 + } + if a.Type > b.Type { + return 1 + } + return 0 + }) + return &ResourceVersion{ + Identifying: idCopy, + Descriptive: descCopy, + Entities: copiedEntities, + MinTime: minTime, + MaxTime: maxTime, + } +} + +// GetEntity returns the entity with the given type, or nil if not found. +func (rv *ResourceVersion) GetEntity(entityType string) *Entity { + for _, e := range rv.Entities { + if e.Type == entityType { + return e + } + } + return nil +} + +// GetMinTime returns the minimum timestamp. +func (rv *ResourceVersion) GetMinTime() int64 { return rv.MinTime } + +// GetMaxTime returns the maximum timestamp. +func (rv *ResourceVersion) GetMaxTime() int64 { return rv.MaxTime } + +// SetMinTime sets the minimum timestamp. +func (rv *ResourceVersion) SetMinTime(t int64) { rv.MinTime = t } + +// SetMaxTime sets the maximum timestamp. +func (rv *ResourceVersion) SetMaxTime(t int64) { rv.MaxTime = t } + +// UpdateTimeRange extends the time range to include the given timestamps. +func (rv *ResourceVersion) UpdateTimeRange(minTime, maxTime int64) { + if minTime < rv.MinTime { + rv.MinTime = minTime + } + if maxTime > rv.MaxTime { + rv.MaxTime = maxTime + } +} + +// ResourceVersionsEqual compares two resource versions for equality. +// Compares identifying, descriptive attributes and all entities. +func ResourceVersionsEqual(a, b *ResourceVersion) bool { + if !AttributesEqual(a.Identifying, b.Identifying) { + return false + } + if !AttributesEqual(a.Descriptive, b.Descriptive) { + return false + } + if len(a.Entities) != len(b.Entities) { + return false + } + // Entities are sorted by Type, so we can compare in order + for i := range a.Entities { + if !EntitiesEqual(a.Entities[i], b.Entities[i]) { + return false + } + } + return true +} + +// copyResourceVersion creates a deep copy of a ResourceVersion. +func copyResourceVersion(rv *ResourceVersion) *ResourceVersion { + var idCopy map[string]string + if rv.Identifying != nil { + idCopy = make(map[string]string, len(rv.Identifying)) + maps.Copy(idCopy, rv.Identifying) + } + + var descCopy map[string]string + if rv.Descriptive != nil { + descCopy = make(map[string]string, len(rv.Descriptive)) + maps.Copy(descCopy, rv.Descriptive) + } + + var copiedEntities []*Entity + if rv.Entities != nil { + copiedEntities = make([]*Entity, len(rv.Entities)) + for i, e := range rv.Entities { + copiedEntities[i] = copyEntity(e) + } + } + return &ResourceVersion{ + Identifying: idCopy, + Descriptive: descCopy, + Entities: copiedEntities, + MinTime: rv.MinTime, + MaxTime: rv.MaxTime, + } +} + +// VersionedResource is a type alias for the generic Versioned container for resources. +type VersionedResource = Versioned[*ResourceVersion] + +// NewVersionedResource creates a new VersionedResource with a single version. +func NewVersionedResource(version *ResourceVersion) *VersionedResource { + return &VersionedResource{ + Versions: []*ResourceVersion{copyResourceVersion(version)}, + } +} + +// MergeVersionedResources merges two VersionedResource instances for the same series. +func MergeVersionedResources(a, b *VersionedResource) *VersionedResource { + return MergeVersioned(ResourceOps, a, b) +} + +// VersionedResourceReader provides read access to versioned resources. +type VersionedResourceReader interface { + GetResource(labelsHash uint64) (*ResourceVersion, bool) + GetVersionedResource(labelsHash uint64) (*VersionedResource, bool) + GetResourceAt(labelsHash uint64, timestamp int64) (*ResourceVersion, bool) + IterResources(ctx context.Context, f func(labelsHash uint64, resource *ResourceVersion) error) error + IterVersionedResources(ctx context.Context, f func(labelsHash uint64, resources *VersionedResource) error) error + TotalResources() uint64 + TotalResourceVersions() uint64 +} + +// MemResourceStore is a type alias for the generic MemStore for resources. +type MemResourceStore = MemStore[*ResourceVersion] + +// NewMemResourceStore creates a new in-memory resource store. +func NewMemResourceStore() *MemResourceStore { + return NewMemStore[*ResourceVersion](ResourceOps) +} + +// EntitiesFromResourceVersion extracts all entities from a resource version. +func EntitiesFromResourceVersion(rv *ResourceVersion) []*Entity { + if rv == nil { + return nil + } + return slices.Clone(rv.Entities) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/layered_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/layered_reader.go new file mode 100644 index 00000000000..f428a99744d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/layered_reader.go @@ -0,0 +1,268 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "context" + + "github.com/prometheus/prometheus/model/labels" +) + +// layeredMetadataReader combines a base reader (blocks-merged, cached +// indefinitely) with a top reader (head, always live). Lookups check +// top first, then base. Iteration deduplicates by labelsHash. +type layeredMetadataReader struct { + base Reader // blocks-merged (immutable, cached) + top Reader // head (live, always current) +} + +// NewLayeredReader creates a Reader that layers top on base. +// Close only closes base; the caller manages top's lifecycle. +func NewLayeredReader(base, top Reader) Reader { + return &layeredMetadataReader{base: base, top: top} +} + +func (r *layeredMetadataReader) Close() error { + // Only close base (blocks). Head reader's Close is a no-op and + // the head manages its own lifecycle. + return r.base.Close() +} + +// --- VersionedResourceReader --- + +func (r *layeredMetadataReader) GetResource(labelsHash uint64) (*ResourceVersion, bool) { + if v, ok := r.top.GetResource(labelsHash); ok { + return v, true + } + return r.base.GetResource(labelsHash) +} + +func (r *layeredMetadataReader) GetVersionedResource(labelsHash uint64) (*VersionedResource, bool) { + topVR, topOK := r.top.GetVersionedResource(labelsHash) + baseVR, baseOK := r.base.GetVersionedResource(labelsHash) + switch { + case topOK && baseOK: + return MergeVersionedResources(baseVR, topVR), true + case topOK: + return topVR, true + case baseOK: + return baseVR, true + default: + return nil, false + } +} + +func (r *layeredMetadataReader) GetResourceAt(labelsHash uint64, timestamp int64) (*ResourceVersion, bool) { + if v, ok := r.top.GetResourceAt(labelsHash, timestamp); ok { + return v, true + } + return r.base.GetResourceAt(labelsHash, timestamp) +} + +func (r *layeredMetadataReader) IterResources(ctx context.Context, f func(labelsHash uint64, resource *ResourceVersion) error) error { + seen := make(map[uint64]struct{}) + // Top (head) first — has the most current data. + if err := r.top.IterResources(ctx, func(labelsHash uint64, resource *ResourceVersion) error { + seen[labelsHash] = struct{}{} + return f(labelsHash, resource) + }); err != nil { + return err + } + // Base — skip anything already seen. + return r.base.IterResources(ctx, func(labelsHash uint64, resource *ResourceVersion) error { + if _, ok := seen[labelsHash]; ok { + return nil + } + return f(labelsHash, resource) + }) +} + +func (r *layeredMetadataReader) IterVersionedResources(ctx context.Context, f func(labelsHash uint64, resources *VersionedResource) error) error { + // Phase 1: Collect only the hash set from top (head). This uses + // map[uint64]struct{} (~16 bytes/entry) instead of storing full + // *VersionedResource pointers (~56+ bytes/entry), saving ~40% memory. + topHashes := make(map[uint64]struct{}) + if err := r.top.IterVersionedResources(ctx, func(labelsHash uint64, _ *VersionedResource) error { + topHashes[labelsHash] = struct{}{} + return nil + }); err != nil { + return err + } + + // Phase 2: Iterate base. For shared hashes, merge via point lookup on top. + if err := r.base.IterVersionedResources(ctx, func(labelsHash uint64, baseVR *VersionedResource) error { + if _, shared := topHashes[labelsHash]; shared { + delete(topHashes, labelsHash) + topVR, _ := r.top.GetVersionedResource(labelsHash) + return f(labelsHash, MergeVersionedResources(baseVR, topVR)) + } + return f(labelsHash, baseVR) + }); err != nil { + return err + } + + // Phase 3: Emit remaining top-only entries via point lookups. + for labelsHash := range topHashes { + if err := ctx.Err(); err != nil { + return err + } + topVR, _ := r.top.GetVersionedResource(labelsHash) + if topVR != nil { + if err := f(labelsHash, topVR); err != nil { + return err + } + } + } + return nil +} + +func (r *layeredMetadataReader) TotalResources() uint64 { + // Approximate: sum may overcount shared hashes (acceptable). + return r.base.TotalResources() + r.top.TotalResources() +} + +func (r *layeredMetadataReader) TotalResourceVersions() uint64 { + return r.base.TotalResourceVersions() + r.top.TotalResourceVersions() +} + +// --- VersionedScopeReader --- + +func (r *layeredMetadataReader) GetVersionedScope(labelsHash uint64) (*VersionedScope, bool) { + topVS, topOK := r.top.GetVersionedScope(labelsHash) + baseVS, baseOK := r.base.GetVersionedScope(labelsHash) + switch { + case topOK && baseOK: + return MergeVersionedScopes(baseVS, topVS), true + case topOK: + return topVS, true + case baseOK: + return baseVS, true + default: + return nil, false + } +} + +func (r *layeredMetadataReader) IterVersionedScopes(ctx context.Context, f func(labelsHash uint64, scopes *VersionedScope) error) error { + topHashes := make(map[uint64]struct{}) + if err := r.top.IterVersionedScopes(ctx, func(labelsHash uint64, _ *VersionedScope) error { + topHashes[labelsHash] = struct{}{} + return nil + }); err != nil { + return err + } + + if err := r.base.IterVersionedScopes(ctx, func(labelsHash uint64, baseVS *VersionedScope) error { + if _, shared := topHashes[labelsHash]; shared { + delete(topHashes, labelsHash) + topVS, _ := r.top.GetVersionedScope(labelsHash) + return f(labelsHash, MergeVersionedScopes(baseVS, topVS)) + } + return f(labelsHash, baseVS) + }); err != nil { + return err + } + + for labelsHash := range topHashes { + if err := ctx.Err(); err != nil { + return err + } + topVS, _ := r.top.GetVersionedScope(labelsHash) + if topVS != nil { + if err := f(labelsHash, topVS); err != nil { + return err + } + } + } + return nil +} + +func (r *layeredMetadataReader) TotalScopes() uint64 { + return r.base.TotalScopes() + r.top.TotalScopes() +} + +func (r *layeredMetadataReader) TotalScopeVersions() uint64 { + return r.base.TotalScopeVersions() + r.top.TotalScopeVersions() +} + +// --- Generic Reader methods --- + +func (r *layeredMetadataReader) IterKind(ctx context.Context, id KindID, f func(labelsHash uint64, versioned any) error) error { + seen := make(map[uint64]struct{}) + if err := r.top.IterKind(ctx, id, func(labelsHash uint64, versioned any) error { + seen[labelsHash] = struct{}{} + return f(labelsHash, versioned) + }); err != nil { + return err + } + return r.base.IterKind(ctx, id, func(labelsHash uint64, versioned any) error { + if _, ok := seen[labelsHash]; ok { + return nil + } + return f(labelsHash, versioned) + }) +} + +func (r *layeredMetadataReader) KindLen(id KindID) int { + return r.base.KindLen(id) + r.top.KindLen(id) +} + +func (r *layeredMetadataReader) LabelsForHash(labelsHash uint64) (labels.Labels, bool) { + // Top (head) has live series — check first. + if lset, ok := r.top.LabelsForHash(labelsHash); ok { + return lset, true + } + return r.base.LabelsForHash(labelsHash) +} + +func (r *layeredMetadataReader) LookupResourceAttr(key, value string) []uint64 { + baseSet := r.base.LookupResourceAttr(key, value) + topSet := r.top.LookupResourceAttr(key, value) + + if baseSet == nil && topSet == nil { + return nil + } + if baseSet == nil { + return topSet + } + if topSet == nil { + return baseSet + } + + return MergeSortedUnique(baseSet, topSet) +} + +// MergeSortedUnique merges two sorted uint64 slices into a new sorted slice +// containing all unique values from both inputs. Both inputs must be sorted +// in ascending order. +func MergeSortedUnique(a, b []uint64) []uint64 { + result := make([]uint64, 0, len(a)+len(b)) + i, j := 0, 0 + for i < len(a) && j < len(b) { + switch { + case a[i] < b[j]: + result = append(result, a[i]) + i++ + case a[i] > b[j]: + result = append(result, b[j]) + j++ + default: + result = append(result, a[i]) + i++ + j++ + } + } + result = append(result, a[i:]...) + result = append(result, b[j:]...) + return result +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/mem_store.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/mem_store.go new file mode 100644 index 00000000000..e62c8fdc718 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/mem_store.go @@ -0,0 +1,395 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "context" + "sync" +) + +// numMemStoreStripes is the number of shards in a MemStore. +// Must be a power of two for fast modulo via bitmask. +const numMemStoreStripes = 256 + +// versionedEntry stores versioned metadata with a labels hash for indexing. +type versionedEntry[V VersionConstraint] struct { + labelsHash uint64 + versioned *Versioned[V] +} + +// memStoreStripe is a single shard of a MemStore. Each stripe has its own +// mutex and map to reduce lock contention across concurrent goroutines. +type memStoreStripe[V VersionConstraint] struct { + mtx sync.RWMutex + byHash map[uint64]*versionedEntry[V] + _ [40]byte // cache-line padding to prevent false sharing +} + +// contentStripe is a single shard of the content-addressed dedup table. +type contentStripe[V VersionConstraint] struct { + mtx sync.RWMutex + byHash map[uint64]V + _ [40]byte // cache-line padding to prevent false sharing +} + +// MemStore is a generic in-memory store for versioned metadata keyed by labels hash. +// It uses 256-way sharding (matching the stripeSeries pattern in tsdb/head.go) +// to reduce lock contention under high concurrency. +// +// When the KindOps also implements ContentDedupOps, MemStore maintains a +// content-addressed table so that versions with identical content share +// map/slice pointers from a single canonical entry, reducing per-version +// memory from ~1500B to ~72B. +// +// It is safe for concurrent use. +type MemStore[V VersionConstraint] struct { + stripes [numMemStoreStripes]memStoreStripe[V] + contentStripes [numMemStoreStripes]contentStripe[V] + ops KindOps[V] + dedupOps ContentDedupOps[V] // nil when ops doesn't implement ContentDedupOps +} + +// NewMemStore creates a new generic in-memory store. +func NewMemStore[V VersionConstraint](ops KindOps[V]) *MemStore[V] { + m := &MemStore[V]{ops: ops} + for i := range m.stripes { + m.stripes[i].byHash = make(map[uint64]*versionedEntry[V]) + } + if dedupOps, ok := any(ops).(ContentDedupOps[V]); ok { + m.dedupOps = dedupOps + for i := range m.contentStripes { + m.contentStripes[i].byHash = make(map[uint64]V) + } + } + return m +} + +func (m *MemStore[V]) stripe(labelsHash uint64) *memStoreStripe[V] { + return &m.stripes[labelsHash&uint64(numMemStoreStripes-1)] +} + +// getOrCreateCanonical returns the canonical version for the given content hash. +// If no canonical exists yet, it deep-copies v via ops.Copy and stores it. +// Uses double-checked locking: RLock first, then Lock only on miss. +func (m *MemStore[V]) getOrCreateCanonical(hash uint64, v V) V { + cs := &m.contentStripes[hash&uint64(numMemStoreStripes-1)] + + cs.mtx.RLock() + if canonical, ok := cs.byHash[hash]; ok { + cs.mtx.RUnlock() + return canonical + } + cs.mtx.RUnlock() + + cs.mtx.Lock() + defer cs.mtx.Unlock() + if canonical, ok := cs.byHash[hash]; ok { + return canonical + } + canonical := m.ops.Copy(v) + cs.byHash[hash] = canonical + return canonical +} + +// internVersions replaces deep-copied versions with thin copies sharing canonical +// map/slice pointers. No-op when dedupOps is nil. +func (m *MemStore[V]) internVersions(vs *Versioned[V]) { + if m.dedupOps == nil { + return + } + for i, v := range vs.Versions { + hash := m.dedupOps.ContentHash(v) + canonical := m.getOrCreateCanonical(hash, v) + vs.Versions[i] = m.dedupOps.ThinCopy(canonical, v) + } +} + +// internLastVersion replaces only the last version with a thin copy. +// Used after AddOrExtend appends a new version. +func (m *MemStore[V]) internLastVersion(vs *Versioned[V]) { + if m.dedupOps == nil || len(vs.Versions) == 0 { + return + } + last := len(vs.Versions) - 1 + v := vs.Versions[last] + hash := m.dedupOps.ContentHash(v) + canonical := m.getOrCreateCanonical(hash, v) + vs.Versions[last] = m.dedupOps.ThinCopy(canonical, v) +} + +// InternVersion returns a thin copy of v sharing map/slice pointers from the +// canonical entry in the content table. Used for per-series interning from +// the head commit and WAL replay paths. +// Returns v unchanged when dedup is not enabled. +func (m *MemStore[V]) InternVersion(v V) V { + if m.dedupOps == nil { + return v + } + hash := m.dedupOps.ContentHash(v) + canonical := m.getOrCreateCanonical(hash, v) + return m.dedupOps.ThinCopy(canonical, v) +} + +// TotalCanonical returns the number of unique canonical entries in the content table. +func (m *MemStore[V]) TotalCanonical() int { + if m.dedupOps == nil { + return 0 + } + var total int + for i := range m.contentStripes { + cs := &m.contentStripes[i] + cs.mtx.RLock() + total += len(cs.byHash) + cs.mtx.RUnlock() + } + return total +} + +// Len returns the number of unique series with metadata. +func (m *MemStore[V]) Len() int { + var total int + for i := range m.stripes { + s := &m.stripes[i] + s.mtx.RLock() + total += len(s.byHash) + s.mtx.RUnlock() + } + return total +} + +// Get returns the current (latest) version for the series. +func (m *MemStore[V]) Get(labelsHash uint64) (V, bool) { + s := m.stripe(labelsHash) + s.mtx.RLock() + defer s.mtx.RUnlock() + entry, ok := s.byHash[labelsHash] + if !ok || len(entry.versioned.Versions) == 0 { + var zero V + return zero, false + } + return entry.versioned.CurrentVersion() +} + +// GetVersioned returns all versions for the series. +func (m *MemStore[V]) GetVersioned(labelsHash uint64) (*Versioned[V], bool) { + s := m.stripe(labelsHash) + s.mtx.RLock() + defer s.mtx.RUnlock() + entry, ok := s.byHash[labelsHash] + if !ok { + return nil, false + } + return entry.versioned, true +} + +// GetAt returns the version active at the given timestamp. +func (m *MemStore[V]) GetAt(labelsHash uint64, timestamp int64) (V, bool) { + s := m.stripe(labelsHash) + s.mtx.RLock() + defer s.mtx.RUnlock() + entry, ok := s.byHash[labelsHash] + if !ok { + var zero V + return zero, false + } + return entry.versioned.VersionAt(timestamp) +} + +// Set stores a single version for the series. +// If data already exists, a new version is created if it differs, +// or the existing version's time range is extended if identical. +func (m *MemStore[V]) Set(labelsHash uint64, version V) { + s := m.stripe(labelsHash) + s.mtx.Lock() + defer s.mtx.Unlock() + + if existing, ok := s.byHash[labelsHash]; ok { + prevLen := len(existing.versioned.Versions) + existing.versioned.AddOrExtend(m.ops, version) + if len(existing.versioned.Versions) > prevLen { + m.internLastVersion(existing.versioned) + } + return + } + + entry := &versionedEntry[V]{ + labelsHash: labelsHash, + versioned: &Versioned[V]{Versions: []V{m.ops.Copy(version)}}, + } + m.internVersions(entry.versioned) + s.byHash[labelsHash] = entry +} + +// SetVersioned stores versioned data for the series. +// Used during compaction and loading from Parquet. +func (m *MemStore[V]) SetVersioned(labelsHash uint64, versioned *Versioned[V]) { + s := m.stripe(labelsHash) + s.mtx.Lock() + defer s.mtx.Unlock() + + if existing, ok := s.byHash[labelsHash]; ok { + existing.versioned = MergeVersioned(m.ops, existing.versioned, versioned) + m.internVersions(existing.versioned) + return + } + + entry := &versionedEntry[V]{ + labelsHash: labelsHash, + versioned: versioned.Copy(m.ops), + } + m.internVersions(entry.versioned) + s.byHash[labelsHash] = entry +} + +// SetVersionedWithDiff atomically stores versioned data and returns the +// versioned state before and after the operation in a single lock acquisition. +// Returns (nil, new) for first insert, (old, merged) for merge. +func (m *MemStore[V]) SetVersionedWithDiff(labelsHash uint64, versioned *Versioned[V]) (old, cur *Versioned[V]) { + s := m.stripe(labelsHash) + s.mtx.Lock() + defer s.mtx.Unlock() + + if existing, ok := s.byHash[labelsHash]; ok { + old = existing.versioned + + // Fast path: single incoming version that matches current — just extend time range. + // This is the common case (~90% of commits) where resource/scope content hasn't changed. + // Mirrors AddOrExtend (versioned.go) — zero allocations vs ~12 from MergeVersioned. + if len(versioned.Versions) == 1 && len(existing.versioned.Versions) > 0 { + current := existing.versioned.Versions[len(existing.versioned.Versions)-1] + incoming := versioned.Versions[0] + if m.ops.Equal(current, incoming) { + current.UpdateTimeRange(incoming.GetMinTime(), incoming.GetMaxTime()) + return old, existing.versioned + } + } + + existing.versioned = MergeVersioned(m.ops, existing.versioned, versioned) + m.internVersions(existing.versioned) + return old, existing.versioned + } + + entry := &versionedEntry[V]{ + labelsHash: labelsHash, + versioned: versioned.Copy(m.ops), + } + m.internVersions(entry.versioned) + s.byHash[labelsHash] = entry + return nil, entry.versioned +} + +// Delete removes all metadata for the series. +func (m *MemStore[V]) Delete(labelsHash uint64) { + s := m.stripe(labelsHash) + s.mtx.Lock() + defer s.mtx.Unlock() + delete(s.byHash, labelsHash) +} + +// checkContextEveryNIterations controls how often ctx.Err() is checked during iteration. +const checkContextEveryNIterations = 100 + +// snapshotEntries returns a shallow copy of all entries across all stripes. +// Each stripe's read lock is held briefly while copying its entries, then released +// before moving to the next stripe. This avoids holding all locks simultaneously +// and prevents writer starvation on large stores. +// The *versionedEntry pointers themselves are stable (not deleted, only +// replaced), and the Versioned inside is append-only, so a shallow +// snapshot is safe for read-only iteration. +func (m *MemStore[V]) snapshotEntries() []*versionedEntry[V] { + // Pre-count total entries to allocate once. + var total int + for i := range m.stripes { + s := &m.stripes[i] + s.mtx.RLock() + total += len(s.byHash) + s.mtx.RUnlock() + } + + entries := make([]*versionedEntry[V], 0, total) + for i := range m.stripes { + s := &m.stripes[i] + s.mtx.RLock() + for _, entry := range s.byHash { + entries = append(entries, entry) + } + s.mtx.RUnlock() + } + return entries +} + +// Iter calls the function for each series' current version. +// A snapshot of entries is taken under the lock, then iteration proceeds +// without holding the lock to avoid blocking writers for large stores. +func (m *MemStore[V]) Iter(ctx context.Context, f func(labelsHash uint64, version V) error) error { + snapshot := m.snapshotEntries() + for i, entry := range snapshot { + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return err + } + } + current, ok := entry.versioned.CurrentVersion() + if ok { + if err := f(entry.labelsHash, current); err != nil { + return err + } + } + } + return nil +} + +// IterVersioned calls the function for each series' versioned data. +// A snapshot of entries is taken under the lock, then iteration proceeds +// without holding the lock to avoid blocking writers for large stores. +func (m *MemStore[V]) IterVersioned(ctx context.Context, f func(labelsHash uint64, versioned *Versioned[V]) error) error { + snapshot := m.snapshotEntries() + for i, entry := range snapshot { + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return err + } + } + if err := f(entry.labelsHash, entry.versioned); err != nil { + return err + } + } + return nil +} + +// TotalEntries returns the count of series with metadata. +func (m *MemStore[V]) TotalEntries() uint64 { + var total uint64 + for i := range m.stripes { + s := &m.stripes[i] + s.mtx.RLock() + total += uint64(len(s.byHash)) + s.mtx.RUnlock() + } + return total +} + +// TotalVersions returns the total count of all versions across all series. +func (m *MemStore[V]) TotalVersions() uint64 { + var total uint64 + for i := range m.stripes { + s := &m.stripes[i] + s.mtx.RLock() + for _, entry := range s.byHash { + total += uint64(len(entry.versioned.Versions)) + } + s.mtx.RUnlock() + } + return total +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/parquet_schema.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/parquet_schema.go new file mode 100644 index 00000000000..6bec463058d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/parquet_schema.go @@ -0,0 +1,141 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +// Namespace constants for discriminating row types in the Parquet file. +const ( + // NamespaceResource indicates a row contains a unified resource with attributes and entities. + NamespaceResource = "resource" + // NamespaceScope indicates a row contains OTel InstrumentationScope data. + NamespaceScope = "scope" + + // NamespaceResourceTable indicates a content-addressed resource table row. + // Contains the full resource content (identifying/descriptive attrs, entities) + // keyed by ContentHash. No SeriesRef or time range. + NamespaceResourceTable = "resource_table" + // NamespaceResourceMapping maps a series (SeriesRef) to a resource (ContentHash) + // at a specific time range (MinTime/MaxTime). + NamespaceResourceMapping = "resource_mapping" + // NamespaceScopeTable indicates a content-addressed scope table row. + // Contains the full scope content keyed by ContentHash. + NamespaceScopeTable = "scope_table" + // NamespaceScopeMapping maps a series (SeriesRef) to a scope (ContentHash) + // at a specific time range (MinTime/MaxTime). + NamespaceScopeMapping = "scope_mapping" + + // NamespaceResourceAttrIndex stores inverted index entries mapping + // resource attribute key:value pairs to series refs. Each row represents + // one (key, value, seriesRef) tuple. The key and value are stored in + // IdentifyingAttrs[0]; ContentHash is xxhash("key\x00value") for bloom + // filter skipability. + NamespaceResourceAttrIndex = "resource_attr_index" +) + +// Identifying attribute keys per OTel semantic conventions. +const ( + AttrServiceName = "service.name" + AttrServiceNamespace = "service.namespace" + AttrServiceInstanceID = "service.instance.id" +) + +// AttributeEntry represents a single resource attribute in the Parquet schema. +// Used as a nested list within metadataRow for backward compatibility. +type AttributeEntry struct { + // Key is the attribute name (e.g., "deployment.environment"). + Key string `parquet:"key"` + // Value is the attribute value as a string. + Value string `parquet:"value"` + // IsIdentifying indicates if this attribute is an identifying attribute + // (service.name, service.namespace, service.instance.id). + IsIdentifying bool `parquet:"is_identifying"` +} + +// EntityAttributeEntry represents a single entity attribute in the Parquet schema. +// Used for the entity namespace with separate identifying and descriptive attribute lists. +type EntityAttributeEntry struct { + // Key is the attribute name. + Key string `parquet:"key"` + // Value is the attribute value as a string. + Value string `parquet:"value"` +} + +// EntityRow represents a single entity within a resource version in the Parquet schema. +type EntityRow struct { + // Type is the entity type (e.g., "service", "host", "container", "resource"). + Type string `parquet:"type"` + // ID contains identifying attributes that uniquely identify the entity. + ID []EntityAttributeEntry `parquet:"id,list"` + // Description contains descriptive (non-identifying) attributes. + Description []EntityAttributeEntry `parquet:"description,list"` +} + +// metadataRow is the unified Parquet schema for OTel resources and scopes. +// The Namespace field discriminates the logical row type. +type metadataRow struct { + // Namespace discriminates the row type: "resource_table", "resource_mapping", + // "scope_table", or "scope_mapping". + Namespace string `parquet:"namespace"` + + // SeriesRef identifies the series for mapping rows. In block Parquet files + // this is the block-level series reference; the read path uses a RefResolver + // to convert back to labelsHash for in-memory lookups. When no RefResolver + // is provided (e.g. head/test writes), labelsHash is stored directly. + SeriesRef uint64 `parquet:"series_ref"` + + // MinTime is the minimum timestamp (in milliseconds) when this data was active. + MinTime int64 `parquet:"mint,optional"` + + // MaxTime is the maximum timestamp (in milliseconds) when this data was active. + MaxTime int64 `parquet:"maxt,optional"` + + // ContentHash is the xxhash of content for content-addressed rows. + // Used for resource_table/resource_mapping and scope_table/scope_mapping rows. + ContentHash uint64 `parquet:"content_hash,optional"` + + // --- Resource fields (namespace="resource") --- + + // IdentifyingAttrs contains the resource-level identifying attributes. + IdentifyingAttrs []EntityAttributeEntry `parquet:"identifying_attrs,list,optional"` + + // DescriptiveAttrs contains the resource-level descriptive attributes. + DescriptiveAttrs []EntityAttributeEntry `parquet:"descriptive_attrs,list,optional"` + + // Entities contains typed entities associated with this resource version. + Entities []EntityRow `parquet:"entities,list,optional"` + + // --- Scope fields (namespace="scope") --- + + // ScopeName is the InstrumentationScope name. + ScopeName string `parquet:"scope_name,optional"` + + // ScopeVersionStr is the InstrumentationScope version. + // Named "scope_version_str" to avoid clash with Parquet reserved words. + ScopeVersionStr string `parquet:"scope_version_str,optional"` + + // SchemaURL is the InstrumentationScope schema URL. + SchemaURL string `parquet:"schema_url,optional"` + + // ScopeAttrs contains InstrumentationScope attributes. + ScopeAttrs []EntityAttributeEntry `parquet:"scope_attrs,list,optional"` + + // --- Resource attribute index fields (namespace="resource_attr_index") --- + + // AttrKey is the attribute name for resource_attr_index rows. + // Top-level column enables Parquet-native filtering (column stats, bloom filters). + AttrKey string `parquet:"attr_key,optional"` + + // AttrValue is the attribute value for resource_attr_index rows. + // Top-level column enables Parquet-native filtering (column stats, bloom filters). + AttrValue string `parquet:"attr_value,optional"` +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/reader_options.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/reader_options.go new file mode 100644 index 00000000000..078fa479d90 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/reader_options.go @@ -0,0 +1,82 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "io" + "log/slog" +) + +// ReaderOption configures Parquet read behavior. +type ReaderOption func(*readerOptions) + +type readerOptions struct { + namespaceFilter map[string]struct{} + refResolver func(seriesRef uint64) (labelsHash uint64, ok bool) +} + +// WithNamespaceFilter restricts loading to rows matching the given namespaces. +// When set, row groups for non-matching namespaces are skipped entirely (for +// files written with namespace-partitioned row groups). For single-row-group +// files, all rows are read but only matching namespaces are processed. +// +// Note: resource_mapping rows require resource_table rows to resolve; +// include both when filtering for resources. +func WithNamespaceFilter(namespaces ...string) ReaderOption { + return func(o *readerOptions) { + o.namespaceFilter = make(map[string]struct{}, len(namespaces)) + for _, ns := range namespaces { + o.namespaceFilter[ns] = struct{}{} + } + } +} + +// WithRefResolver provides a function that converts a block-level seriesRef +// (stored in Parquet mapping rows) back to a labelsHash for in-memory lookups. +// If not set, SeriesRef values are used as-is (handles head-written files +// where seriesRef == labelsHash). +func WithRefResolver(fn func(seriesRef uint64) (labelsHash uint64, ok bool)) ReaderOption { + return func(o *readerOptions) { + o.refResolver = fn + } +} + +// WithResourceAttrIndexOnly loads only the resource attribute inverted index. +// Useful for Mimir store-gateway reverse-lookup queries that only need the index. +func WithResourceAttrIndexOnly() ReaderOption { + return WithNamespaceFilter(NamespaceResourceAttrIndex) +} + +// WithResourceData loads resource tables and mappings (no inverted index). +func WithResourceData() ReaderOption { + return WithNamespaceFilter(NamespaceResourceTable, NamespaceResourceMapping) +} + +// WithScopeData loads scope tables and mappings. +func WithScopeData() ReaderOption { + return WithNamespaceFilter(NamespaceScopeTable, NamespaceScopeMapping) +} + +// WithFullResourceData loads resource tables, mappings, and inverted index. +func WithFullResourceData() ReaderOption { + return WithNamespaceFilter(NamespaceResourceTable, NamespaceResourceMapping, NamespaceResourceAttrIndex) +} + +// ReadSeriesMetadataFromReaderAt reads series metadata from an io.ReaderAt. +// This is the API for distributed systems like Mimir that provide +// objstore.Bucket-backed readers. The caller is responsible for closing +// the underlying reader. +func ReadSeriesMetadataFromReaderAt(logger *slog.Logger, r io.ReaderAt, size int64, opts ...ReaderOption) (Reader, error) { + return newParquetReaderFromReaderAt(logger, r, size, opts...) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/registry.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/registry.go new file mode 100644 index 00000000000..d48626848ea --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/registry.go @@ -0,0 +1,177 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "context" + "log/slog" +) + +// KindID uniquely identifies a metadata kind. +type KindID string + +const ( + KindResource KindID = "resource" + KindScope KindID = "scope" +) + +// WALRecordType mirrors record.Type (uint8) to avoid an import cycle +// between seriesmetadata and tsdb/record. +type WALRecordType = uint8 + +// WAL record type constants matching record.ResourceUpdate and record.ScopeUpdate. +const ( + WALResourceUpdate WALRecordType = 11 + WALScopeUpdate WALRecordType = 12 +) + +// KindDescriptor provides runtime dispatch for a metadata kind at +// serialization boundaries (WAL, Parquet, head commit/replay). +// Methods use `any` for type erasure since the registry is not generic. +type KindDescriptor interface { + // ID returns the unique identifier for this kind. + ID() KindID + + // WALRecordType returns the WAL record type constant for this kind. + WALRecordType() WALRecordType + + // TableNamespace returns the Parquet namespace for content-addressed table rows. + TableNamespace() string + + // MappingNamespace returns the Parquet namespace for series mapping rows. + MappingNamespace() string + + // --- WAL encode/decode (type-erased at boundary) --- + + // DecodeWAL decodes a WAL record. `into` is a pooled slice to reuse. + // Returns the decoded slice (type-erased). + DecodeWAL(rec []byte, into any) (any, error) + + // EncodeWAL encodes records into a WAL record. Returns the encoded bytes. + EncodeWAL(records any, buf []byte) []byte + + // --- Parquet conversion --- + + // ParseTableRow converts a Parquet table row into a version value (type-erased). + ParseTableRow(logger *slog.Logger, row *metadataRow) any + + // BuildTableRow builds a Parquet table row from a content hash and version. + BuildTableRow(contentHash uint64, version any) metadataRow + + // ContentHash computes the content hash for a version value. + ContentHash(version any) uint64 + + // --- Head integration --- + + // CommitToSeries applies a single WAL record entry to a memSeries. + // `series` is a kindMetaAccessor, `walRecord` is a single WAL record entry. + CommitToSeries(series, walRecord any) + + // CollectFromSeries extracts the *Versioned[V] from a memSeries (type-erased). + // Returns nil, false if the series has no data for this kind. + CollectFromSeries(series any) (any, bool) + + // CopyVersioned deep-copies a *Versioned[V] (type-erased). + CopyVersioned(v any) any + + // SetOnSeries sets the *Versioned[V] on a memSeries (type-erased). + SetOnSeries(series, versioned any) + + // --- Store operations (type-erased wrappers around MemStore) --- + + // NewStore creates a new *MemStore[V] (type-erased). + NewStore() any + + // SetVersioned merges versioned data into the store. + SetVersioned(store any, labelsHash uint64, versioned any) + + // IterVersioned iterates all entries in the store. + IterVersioned(ctx context.Context, store any, f func(labelsHash uint64, versioned any) error) error + + // StoreLen returns the number of entries in the store. + StoreLen(store any) int + + // --- Parquet denormalization --- + + // DenormalizeIntoStore copies versions from mapping rows, sets their time ranges, + // sorts by MinTime, and sets the resulting *Versioned[V] on the store. + DenormalizeIntoStore(store any, labelsHash uint64, versions []VersionWithTime) + + // IterateVersions iterates each version in a *Versioned[V] (type-erased), + // calling f with the version and its MinTime/MaxTime. + IterateVersions(versioned any, f func(version any, minTime, maxTime int64)) + + // VersionsEqual compares two version values for content equality. + VersionsEqual(a, b any) bool +} + +// kindMetaAccessor is implemented by memSeries to provide kind-generic access +// to per-series metadata. Defined here (in seriesmetadata) so that KindDescriptor +// implementations can use it without importing tsdb. +type kindMetaAccessor interface { + GetKindMeta(id KindID) (any, bool) + SetKindMeta(id KindID, v any) +} + +// Global kind registry. +var ( + kindsByID = make(map[KindID]KindDescriptor) + kindsByWALType = make(map[WALRecordType]KindDescriptor) + kindsByTableNS = make(map[string]KindDescriptor) + kindsByMappingNS = make(map[string]KindDescriptor) + allKindsRegistered []KindDescriptor +) + +// RegisterKind registers a KindDescriptor in the global registry. +// Must be called from init() functions. +func RegisterKind(desc KindDescriptor) { + id := desc.ID() + if _, exists := kindsByID[id]; exists { + panic("duplicate kind registration: " + string(id)) + } + kindsByID[id] = desc + kindsByWALType[desc.WALRecordType()] = desc + kindsByTableNS[desc.TableNamespace()] = desc + kindsByMappingNS[desc.MappingNamespace()] = desc + allKindsRegistered = append(allKindsRegistered, desc) +} + +// KindByID looks up a kind by its unique ID. +func KindByID(id KindID) (KindDescriptor, bool) { + d, ok := kindsByID[id] + return d, ok +} + +// KindByWALType looks up a kind by its WAL record type. +func KindByWALType(t WALRecordType) (KindDescriptor, bool) { + d, ok := kindsByWALType[t] + return d, ok +} + +// KindByTableNS looks up a kind by its Parquet table namespace. +func KindByTableNS(ns string) (KindDescriptor, bool) { + d, ok := kindsByTableNS[ns] + return d, ok +} + +// KindByMappingNS looks up a kind by its Parquet mapping namespace. +func KindByMappingNS(ns string) (KindDescriptor, bool) { + d, ok := kindsByMappingNS[ns] + return d, ok +} + +// AllKinds returns all registered kinds in registration order. +func AllKinds() []KindDescriptor { + return allKindsRegistered +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_attributes.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_attributes.go new file mode 100644 index 00000000000..111881a032a --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_attributes.go @@ -0,0 +1,55 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +// IsIdentifyingAttribute returns true if the given key is an identifying attribute. +// Identifying attributes are used to uniquely identify a resource. +func IsIdentifyingAttribute(key string) bool { + switch key { + case AttrServiceName, AttrServiceNamespace, AttrServiceInstanceID: + return true + default: + return false + } +} + +// AttributesEqual compares two attribute maps for equality. +func AttributesEqual(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + for k, v := range a { + if bv, ok := b[k]; !ok || bv != v { + return false + } + } + return true +} + +// SplitAttributes splits a flat attribute map into identifying and descriptive maps +// based on the default identifying attribute keys. +func SplitAttributes(attrs map[string]string) (identifying, descriptive map[string]string) { + identifying = make(map[string]string) + descriptive = make(map[string]string, len(attrs)) + + for k, v := range attrs { + if IsIdentifyingAttribute(k) { + identifying[k] = v + } else { + descriptive[k] = v + } + } + + return identifying, descriptive +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_kind.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_kind.go new file mode 100644 index 00000000000..28cbf47a3a0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/resource_kind.go @@ -0,0 +1,243 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "cmp" + "context" + "log/slog" + "maps" + "slices" + + "github.com/cespare/xxhash/v2" +) + +func init() { + RegisterKind(&resourceKindDescriptor{}) +} + +// resourceOps implements KindOps for *ResourceVersion. +type resourceOps struct{} + +func (resourceOps) Equal(a, b *ResourceVersion) bool { return ResourceVersionsEqual(a, b) } +func (resourceOps) Copy(v *ResourceVersion) *ResourceVersion { return copyResourceVersion(v) } + +func (resourceOps) ContentHash(v *ResourceVersion) uint64 { return hashResourceContent(v) } +func (resourceOps) ThinCopy(canonical, v *ResourceVersion) *ResourceVersion { + return &ResourceVersion{ + Identifying: canonical.Identifying, + Descriptive: canonical.Descriptive, + Entities: canonical.Entities, + MinTime: v.MinTime, + MaxTime: v.MaxTime, + } +} + +// ResourceOps is the shared KindOps instance for resources. +var ResourceOps KindOps[*ResourceVersion] = resourceOps{} + +// hashResourceContent computes a deterministic xxhash for a ResourceVersion's content. +// The hash covers identifying attrs, descriptive attrs, and all entities. +// It does NOT include MinTime/MaxTime since those are per-mapping, not per-content. +func hashResourceContent(rv *ResourceVersion) uint64 { + h := xxhash.New() + + hashAttrs(h, rv.Identifying) + _, _ = h.Write([]byte{1}) // section separator + hashAttrs(h, rv.Descriptive) + _, _ = h.Write([]byte{1}) + + // Entities must be sorted by Type (enforced by NewResourceVersion and parseResourceContent). + for _, e := range rv.Entities { + _, _ = h.WriteString(e.Type) + _, _ = h.Write([]byte{0}) + hashAttrs(h, e.ID) + _, _ = h.Write([]byte{1}) + hashAttrs(h, e.Description) + _, _ = h.Write([]byte{1}) + } + + return h.Sum64() +} + +// resourceKindDescriptor implements KindDescriptor for OTel resources. +type resourceKindDescriptor struct{} + +func (*resourceKindDescriptor) ID() KindID { return KindResource } +func (*resourceKindDescriptor) WALRecordType() WALRecordType { return WALResourceUpdate } +func (*resourceKindDescriptor) TableNamespace() string { return NamespaceResourceTable } +func (*resourceKindDescriptor) MappingNamespace() string { return NamespaceResourceMapping } + +// DecodeWAL and EncodeWAL are implemented in tsdb/head_wal_kind.go to avoid +// importing tsdb/record from this package (which would create an import cycle). +// The descriptor delegates to pluggable functions set during init. +func (*resourceKindDescriptor) DecodeWAL(rec []byte, into any) (any, error) { + return ResourceDecodeWAL(rec, into) +} + +func (*resourceKindDescriptor) EncodeWAL(records any, buf []byte) []byte { + return ResourceEncodeWAL(records, buf) +} + +// ResourceDecodeWAL is set by the tsdb package to break the import cycle. +var ResourceDecodeWAL func(rec []byte, into any) (any, error) + +// ResourceEncodeWAL is set by the tsdb package to break the import cycle. +var ResourceEncodeWAL func(records any, buf []byte) []byte + +func (*resourceKindDescriptor) ParseTableRow(logger *slog.Logger, row *metadataRow) any { + return parseResourceContent(logger, row) +} + +func (*resourceKindDescriptor) BuildTableRow(contentHash uint64, version any) metadataRow { + return buildResourceTableRow(contentHash, version.(*ResourceVersion)) +} + +func (*resourceKindDescriptor) ContentHash(version any) uint64 { + return hashResourceContent(version.(*ResourceVersion)) +} + +func (*resourceKindDescriptor) CommitToSeries(series, walRecord any) { + CommitResourceDirect(series.(kindMetaAccessor), walRecord.(ResourceCommitData)) +} + +// ResourceEntityData is a lightweight struct for passing entity data +// from WAL records without importing tsdb/record. +type ResourceEntityData struct { + Type string + ID map[string]string + Description map[string]string +} + +// ResourceCommitData carries resource WAL record data without importing tsdb/record. +type ResourceCommitData struct { + Identifying map[string]string + Descriptive map[string]string + Entities []ResourceEntityData + MinTime int64 + MaxTime int64 +} + +// CommitResourceDirect is the hot-path commit for resources. +// It constructs the ResourceVersion with exactly one deep copy of each map +// (from the caller's buffers into stored metadata) and takes ownership of +// the result — no further copies via AddOrExtend or copyResourceVersion. +// Called directly from headAppenderBase.commitResources and from +// CommitToSeries (cold path, WAL replay). +func CommitResourceDirect(accessor kindMetaAccessor, rcd ResourceCommitData) { + entities := make([]*Entity, len(rcd.Entities)) + for j, e := range rcd.Entities { + entityType := e.Type + if entityType == "" { + entityType = EntityTypeResource + } + entities[j] = &Entity{ + Type: entityType, + ID: maps.Clone(e.ID), + Description: maps.Clone(e.Description), + } + } + slices.SortFunc(entities, func(a, b *Entity) int { + return cmp.Compare(a.Type, b.Type) + }) + + rv := &ResourceVersion{ + Identifying: maps.Clone(rcd.Identifying), + Descriptive: maps.Clone(rcd.Descriptive), + Entities: entities, + MinTime: rcd.MinTime, + MaxTime: rcd.MaxTime, + } + + existing, _ := accessor.GetKindMeta(KindResource) + if existing == nil { + accessor.SetKindMeta(KindResource, &Versioned[*ResourceVersion]{ + Versions: []*ResourceVersion{rv}, + }) + } else { + vr := existing.(*Versioned[*ResourceVersion]) + if len(vr.Versions) > 0 && ResourceOps.Equal(vr.Versions[len(vr.Versions)-1], rv) { + vr.Versions[len(vr.Versions)-1].UpdateTimeRange(rv.MinTime, rv.MaxTime) + } else { + vr.Versions = append(vr.Versions, rv) + } + } +} + +// CollectResourceDirect is the hot-path equivalent of CollectFromSeries +// for resources, avoiding interface{} boxing on the return path. +func CollectResourceDirect(accessor kindMetaAccessor) (*VersionedResource, bool) { + v, ok := accessor.GetKindMeta(KindResource) + if !ok || v == nil { + return nil, false + } + return v.(*Versioned[*ResourceVersion]), true +} + +func (*resourceKindDescriptor) CollectFromSeries(series any) (any, bool) { + accessor := series.(kindMetaAccessor) + return accessor.GetKindMeta(KindResource) +} + +func (*resourceKindDescriptor) CopyVersioned(v any) any { + return v.(*Versioned[*ResourceVersion]).Copy(ResourceOps) +} + +func (*resourceKindDescriptor) SetOnSeries(series, versioned any) { + accessor := series.(kindMetaAccessor) + accessor.SetKindMeta(KindResource, versioned) +} + +func (*resourceKindDescriptor) NewStore() any { + return NewMemStore[*ResourceVersion](ResourceOps) +} + +func (*resourceKindDescriptor) SetVersioned(store any, labelsHash uint64, versioned any) { + store.(*MemStore[*ResourceVersion]).SetVersioned(labelsHash, versioned.(*Versioned[*ResourceVersion])) +} + +func (*resourceKindDescriptor) IterVersioned(ctx context.Context, store any, f func(labelsHash uint64, versioned any) error) error { + return store.(*MemStore[*ResourceVersion]).IterVersioned(ctx, func(labelsHash uint64, v *Versioned[*ResourceVersion]) error { + return f(labelsHash, v) + }) +} + +func (*resourceKindDescriptor) StoreLen(store any) int { + return store.(*MemStore[*ResourceVersion]).Len() +} + +func (*resourceKindDescriptor) DenormalizeIntoStore(store any, labelsHash uint64, versions []VersionWithTime) { + typed := make([]*ResourceVersion, len(versions)) + for i, vt := range versions { + cp := copyResourceVersion(vt.Version.(*ResourceVersion)) + cp.MinTime = vt.MinTime + cp.MaxTime = vt.MaxTime + typed[i] = cp + } + slices.SortFunc(typed, func(a, b *ResourceVersion) int { + return cmp.Compare(a.MinTime, b.MinTime) + }) + store.(*MemStore[*ResourceVersion]).SetVersioned(labelsHash, &Versioned[*ResourceVersion]{Versions: typed}) +} + +func (*resourceKindDescriptor) IterateVersions(versioned any, f func(version any, minTime, maxTime int64)) { + vr := versioned.(*Versioned[*ResourceVersion]) + for _, rv := range vr.Versions { + f(rv, rv.MinTime, rv.MaxTime) + } +} + +func (*resourceKindDescriptor) VersionsEqual(a, b any) bool { + return ResourceVersionsEqual(a.(*ResourceVersion), b.(*ResourceVersion)) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope.go new file mode 100644 index 00000000000..c14f811cebf --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope.go @@ -0,0 +1,127 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "context" + "maps" +) + +// ScopeVersion represents a snapshot of OTel InstrumentationScope data at a point in time. +type ScopeVersion struct { + Name string + Version string + SchemaURL string + Attrs map[string]string + MinTime int64 + MaxTime int64 +} + +// GetMinTime returns the minimum timestamp. +func (sv *ScopeVersion) GetMinTime() int64 { return sv.MinTime } + +// GetMaxTime returns the maximum timestamp. +func (sv *ScopeVersion) GetMaxTime() int64 { return sv.MaxTime } + +// SetMinTime sets the minimum timestamp. +func (sv *ScopeVersion) SetMinTime(t int64) { sv.MinTime = t } + +// SetMaxTime sets the maximum timestamp. +func (sv *ScopeVersion) SetMaxTime(t int64) { sv.MaxTime = t } + +// UpdateTimeRange extends the time range to include the given timestamps. +func (sv *ScopeVersion) UpdateTimeRange(minTime, maxTime int64) { + if minTime < sv.MinTime { + sv.MinTime = minTime + } + if maxTime > sv.MaxTime { + sv.MaxTime = maxTime + } +} + +// NewScopeVersion creates a new ScopeVersion with deep-copied attributes. +func NewScopeVersion(name, version, schemaURL string, attrs map[string]string, minTime, maxTime int64) *ScopeVersion { + attrsCopy := make(map[string]string, len(attrs)) + maps.Copy(attrsCopy, attrs) + return &ScopeVersion{ + Name: name, + Version: version, + SchemaURL: schemaURL, + Attrs: attrsCopy, + MinTime: minTime, + MaxTime: maxTime, + } +} + +// CopyScopeVersion creates a deep copy of a ScopeVersion. +func CopyScopeVersion(sv *ScopeVersion) *ScopeVersion { + var attrsCopy map[string]string + if sv.Attrs != nil { + attrsCopy = make(map[string]string, len(sv.Attrs)) + maps.Copy(attrsCopy, sv.Attrs) + } + return &ScopeVersion{ + Name: sv.Name, + Version: sv.Version, + SchemaURL: sv.SchemaURL, + Attrs: attrsCopy, + MinTime: sv.MinTime, + MaxTime: sv.MaxTime, + } +} + +// ScopeVersionsEqual compares two ScopeVersions for equality (ignoring time range). +func ScopeVersionsEqual(a, b *ScopeVersion) bool { + if a.Name != b.Name { + return false + } + if a.Version != b.Version { + return false + } + if a.SchemaURL != b.SchemaURL { + return false + } + return AttributesEqual(a.Attrs, b.Attrs) +} + +// VersionedScope is a type alias for the generic Versioned container for scopes. +type VersionedScope = Versioned[*ScopeVersion] + +// NewVersionedScope creates a new VersionedScope with a single version. +func NewVersionedScope(version *ScopeVersion) *VersionedScope { + return &VersionedScope{ + Versions: []*ScopeVersion{CopyScopeVersion(version)}, + } +} + +// MergeVersionedScopes merges two VersionedScope instances for the same series. +func MergeVersionedScopes(a, b *VersionedScope) *VersionedScope { + return MergeVersioned(ScopeOps, a, b) +} + +// VersionedScopeReader provides read access to versioned scopes. +type VersionedScopeReader interface { + GetVersionedScope(labelsHash uint64) (*VersionedScope, bool) + IterVersionedScopes(ctx context.Context, f func(labelsHash uint64, scopes *VersionedScope) error) error + TotalScopes() uint64 + TotalScopeVersions() uint64 +} + +// MemScopeStore is a type alias for the generic MemStore for scopes. +type MemScopeStore = MemStore[*ScopeVersion] + +// NewMemScopeStore creates a new in-memory scope store. +func NewMemScopeStore() *MemScopeStore { + return NewMemStore[*ScopeVersion](ScopeOps) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope_kind.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope_kind.go new file mode 100644 index 00000000000..68f4a6afbe0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/scope_kind.go @@ -0,0 +1,225 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "cmp" + "context" + "log/slog" + "maps" + "slices" + + "github.com/cespare/xxhash/v2" +) + +func init() { + RegisterKind(&scopeKindDescriptor{}) +} + +// scopeOps implements KindOps for *ScopeVersion. +type scopeOps struct{} + +func (scopeOps) Equal(a, b *ScopeVersion) bool { return ScopeVersionsEqual(a, b) } +func (scopeOps) Copy(v *ScopeVersion) *ScopeVersion { return CopyScopeVersion(v) } + +func (scopeOps) ContentHash(v *ScopeVersion) uint64 { return hashScopeContent(v) } +func (scopeOps) ThinCopy(canonical, v *ScopeVersion) *ScopeVersion { + return &ScopeVersion{ + Name: canonical.Name, + Version: canonical.Version, + SchemaURL: canonical.SchemaURL, + Attrs: canonical.Attrs, + MinTime: v.MinTime, + MaxTime: v.MaxTime, + } +} + +// ScopeOps is the shared KindOps instance for scopes. +var ScopeOps KindOps[*ScopeVersion] = scopeOps{} + +// hashScopeContent computes a deterministic xxhash for a ScopeVersion's content. +// The hash covers name, version, schema URL, and attributes. +// It does NOT include MinTime/MaxTime. +func hashScopeContent(sv *ScopeVersion) uint64 { + h := xxhash.New() + + _, _ = h.WriteString(sv.Name) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(sv.Version) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(sv.SchemaURL) + _, _ = h.Write([]byte{0}) + hashAttrs(h, sv.Attrs) + + return h.Sum64() +} + +// scopeKindDescriptor implements KindDescriptor for OTel scopes. +type scopeKindDescriptor struct{} + +func (*scopeKindDescriptor) ID() KindID { return KindScope } +func (*scopeKindDescriptor) WALRecordType() WALRecordType { return WALScopeUpdate } +func (*scopeKindDescriptor) TableNamespace() string { return NamespaceScopeTable } +func (*scopeKindDescriptor) MappingNamespace() string { return NamespaceScopeMapping } + +// DecodeWAL and EncodeWAL delegate to pluggable functions to avoid import cycle. +func (*scopeKindDescriptor) DecodeWAL(rec []byte, into any) (any, error) { + return ScopeDecodeWAL(rec, into) +} + +func (*scopeKindDescriptor) EncodeWAL(records any, buf []byte) []byte { + return ScopeEncodeWAL(records, buf) +} + +// ScopeDecodeWAL is set by the tsdb package to break the import cycle. +var ScopeDecodeWAL func(rec []byte, into any) (any, error) + +// ScopeEncodeWAL is set by the tsdb package to break the import cycle. +var ScopeEncodeWAL func(records any, buf []byte) []byte + +func (*scopeKindDescriptor) ParseTableRow(_ *slog.Logger, row *metadataRow) any { + return parseScopeContent(row) +} + +func (*scopeKindDescriptor) BuildTableRow(contentHash uint64, version any) metadataRow { + sv := version.(*ScopeVersion) + scopeAttrs := make([]EntityAttributeEntry, 0, len(sv.Attrs)) + for k, v := range sv.Attrs { + scopeAttrs = append(scopeAttrs, EntityAttributeEntry{Key: k, Value: v}) + } + sortAttrEntries(scopeAttrs) + return metadataRow{ + Namespace: NamespaceScopeTable, + ContentHash: contentHash, + ScopeName: sv.Name, + ScopeVersionStr: sv.Version, + SchemaURL: sv.SchemaURL, + ScopeAttrs: scopeAttrs, + } +} + +func (*scopeKindDescriptor) ContentHash(version any) uint64 { + return hashScopeContent(version.(*ScopeVersion)) +} + +// ScopeCommitData carries scope WAL record data without importing tsdb/record. +type ScopeCommitData struct { + Name string + Version string + SchemaURL string + Attrs map[string]string + MinTime int64 + MaxTime int64 +} + +func (*scopeKindDescriptor) CommitToSeries(series, walRecord any) { + CommitScopeDirect(series.(kindMetaAccessor), walRecord.(ScopeCommitData)) +} + +// CommitScopeDirect is the hot-path commit for scopes. +// It constructs the ScopeVersion with exactly one deep copy of the attrs map +// and takes ownership — no further copies via AddOrExtend or CopyScopeVersion. +// Called directly from headAppenderBase.commitScopes and from +// CommitToSeries (cold path, WAL replay). +func CommitScopeDirect(accessor kindMetaAccessor, scd ScopeCommitData) { + sv := &ScopeVersion{ + Name: scd.Name, + Version: scd.Version, + SchemaURL: scd.SchemaURL, + Attrs: maps.Clone(scd.Attrs), + MinTime: scd.MinTime, + MaxTime: scd.MaxTime, + } + + existing, _ := accessor.GetKindMeta(KindScope) + if existing == nil { + accessor.SetKindMeta(KindScope, &Versioned[*ScopeVersion]{ + Versions: []*ScopeVersion{sv}, + }) + } else { + vs := existing.(*Versioned[*ScopeVersion]) + if len(vs.Versions) > 0 && ScopeOps.Equal(vs.Versions[len(vs.Versions)-1], sv) { + vs.Versions[len(vs.Versions)-1].UpdateTimeRange(sv.MinTime, sv.MaxTime) + } else { + vs.Versions = append(vs.Versions, sv) + } + } +} + +// CollectScopeDirect is the hot-path equivalent of CollectFromSeries +// for scopes, avoiding interface{} boxing on the return path. +func CollectScopeDirect(accessor kindMetaAccessor) (*VersionedScope, bool) { + v, ok := accessor.GetKindMeta(KindScope) + if !ok || v == nil { + return nil, false + } + return v.(*Versioned[*ScopeVersion]), true +} + +func (*scopeKindDescriptor) CollectFromSeries(series any) (any, bool) { + accessor := series.(kindMetaAccessor) + return accessor.GetKindMeta(KindScope) +} + +func (*scopeKindDescriptor) CopyVersioned(v any) any { + return v.(*Versioned[*ScopeVersion]).Copy(ScopeOps) +} + +func (*scopeKindDescriptor) SetOnSeries(series, versioned any) { + accessor := series.(kindMetaAccessor) + accessor.SetKindMeta(KindScope, versioned) +} + +func (*scopeKindDescriptor) NewStore() any { + return NewMemStore[*ScopeVersion](ScopeOps) +} + +func (*scopeKindDescriptor) SetVersioned(store any, labelsHash uint64, versioned any) { + store.(*MemStore[*ScopeVersion]).SetVersioned(labelsHash, versioned.(*Versioned[*ScopeVersion])) +} + +func (*scopeKindDescriptor) IterVersioned(ctx context.Context, store any, f func(labelsHash uint64, versioned any) error) error { + return store.(*MemStore[*ScopeVersion]).IterVersioned(ctx, func(labelsHash uint64, v *Versioned[*ScopeVersion]) error { + return f(labelsHash, v) + }) +} + +func (*scopeKindDescriptor) StoreLen(store any) int { + return store.(*MemStore[*ScopeVersion]).Len() +} + +func (*scopeKindDescriptor) DenormalizeIntoStore(store any, labelsHash uint64, versions []VersionWithTime) { + typed := make([]*ScopeVersion, len(versions)) + for i, vt := range versions { + cp := CopyScopeVersion(vt.Version.(*ScopeVersion)) + cp.MinTime = vt.MinTime + cp.MaxTime = vt.MaxTime + typed[i] = cp + } + slices.SortFunc(typed, func(a, b *ScopeVersion) int { + return cmp.Compare(a.MinTime, b.MinTime) + }) + store.(*MemStore[*ScopeVersion]).SetVersioned(labelsHash, &Versioned[*ScopeVersion]{Versions: typed}) +} + +func (*scopeKindDescriptor) IterateVersions(versioned any, f func(version any, minTime, maxTime int64)) { + vs := versioned.(*Versioned[*ScopeVersion]) + for _, sv := range vs.Versions { + f(sv, sv.MinTime, sv.MaxTime) + } +} + +func (*scopeKindDescriptor) VersionsEqual(a, b any) bool { + return ScopeVersionsEqual(a.(*ScopeVersion), b.(*ScopeVersion)) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/seriesmetadata.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/seriesmetadata.go new file mode 100644 index 00000000000..e3a49bad007 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/seriesmetadata.go @@ -0,0 +1,1270 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "cmp" + "context" + "errors" + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" + "github.com/parquet-go/parquet-go" + "github.com/parquet-go/parquet-go/compress/zstd" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/fileutil" +) + +// SeriesMetadataFilename is the name of the series metadata file in a block directory. +const SeriesMetadataFilename = "series_metadata.parquet" + +// schemaVersion is stored in the Parquet footer for future schema evolution. +const schemaVersion = "1" + +// Reader provides read access to series metadata (OTel resources and scopes). +type Reader interface { + // Close releases any resources associated with the reader. + Close() error + + // VersionedResourceReader provides access to versioned OTel resources. + VersionedResourceReader + + // VersionedScopeReader provides access to versioned OTel InstrumentationScope data. + VersionedScopeReader + + // IterKind iterates all entries for a kind (type-erased). + IterKind(ctx context.Context, id KindID, f func(labelsHash uint64, versioned any) error) error + + // KindLen returns the number of entries for a kind. + KindLen(id KindID) int + + // LabelsForHash returns the labels for a given labels hash, if available. + LabelsForHash(labelsHash uint64) (labels.Labels, bool) + + // LookupResourceAttr returns sorted labelsHashes that have a resource version + // with the given key:value in Identifying or Descriptive attributes. + // Returns nil if the index has not been built. The returned slice must not + // be modified by the caller. + LookupResourceAttr(key, value string) []uint64 +} + +// LabelsPopulator allows post-construction population of the labels map. +type LabelsPopulator interface { + SetLabels(labelsHash uint64, lset labels.Labels) +} + +// UniqueAttrNameReader is optionally implemented by Reader implementations +// that maintain a cached set of unique resource attribute names. Checking +// this via type assertion avoids O(N_series) full scans. +type UniqueAttrNameReader interface { + UniqueResourceAttrNames() map[string]struct{} +} + +// numAttrIndexStripes is the number of shards in the inverted attribute index. +// Must be a power of two for fast modulo via bitmask. +const numAttrIndexStripes = 256 + +// attrIndexStripe is a single shard of the inverted attribute index. +type attrIndexStripe struct { + mtx sync.RWMutex + idx map[string][]uint64 + _ [40]byte // cache-line padding to prevent false sharing +} + +// shardedAttrIndex is a 256-way sharded inverted index mapping +// "key\x00value" → sorted []uint64 of labelsHashes. Sharding by key hash +// eliminates the single-mutex bottleneck under high ingestion concurrency. +type shardedAttrIndex struct { + stripes [numAttrIndexStripes]attrIndexStripe +} + +func newShardedAttrIndex() *shardedAttrIndex { + s := &shardedAttrIndex{} + for i := range s.stripes { + s.stripes[i].idx = make(map[string][]uint64) + } + return s +} + +func (s *shardedAttrIndex) stripe(key string) *attrIndexStripe { + h := xxhash.Sum64String(key) + return &s.stripes[h&uint64(numAttrIndexStripes-1)] +} + +// lookup returns the sorted labelsHashes for a given index key. +// The returned slice must not be modified by the caller (copy-on-write). +func (s *shardedAttrIndex) lookup(key string) []uint64 { + st := s.stripe(key) + st.mtx.RLock() + defer st.mtx.RUnlock() + v := st.idx[key] + if len(v) == 0 { + return nil + } + return v +} + +// MemSeriesMetadata is an in-memory implementation of series metadata storage. +// It wraps per-kind stores accessible both generically (via IterKind/KindLen) +// and type-safely (via ResourceStore/ScopeStore). +type MemSeriesMetadata struct { + stores map[KindID]any // each value is *MemStore[V] for the appropriate V + labelsMap map[uint64]labels.Labels // labelsHash → labels.Labels + + // resourceAttrIndex is a 256-way sharded inverted index mapping + // "key\x00value" → sorted []uint64 of labelsHashes. + // Uses copy-on-write sorted slices for ~4x memory reduction vs maps and + // zero-copy reads (readers holding old slices are safe). + // Covers identifying attributes (always) and descriptive attributes + // only when the key is in indexedResourceAttrs. + // Built lazily via BuildResourceAttrIndex() or incrementally via + // UpdateResourceAttrIndex(). nil until first build or incremental init. + resourceAttrIndex *shardedAttrIndex // nil until first build/init + + // indexedResourceAttrs specifies additional descriptive resource attribute + // names to include in the inverted index beyond identifying attributes + // (which are always indexed). nil means index only identifying attributes. + indexedResourceAttrs map[string]struct{} + indexedResourceAttrsMu sync.RWMutex // protects indexedResourceAttrs + + // uniqueAttrNames is a grow-only cache of all resource attribute names + // seen across all resource versions. Updated incrementally in addToAttrIndex + // and BuildResourceAttrIndex. Cardinality is typically tiny (<100 names). + uniqueAttrNames map[string]struct{} + uniqueAttrNamesMu sync.RWMutex +} + +// NewMemSeriesMetadata creates a new in-memory series metadata store. +func NewMemSeriesMetadata() *MemSeriesMetadata { + m := &MemSeriesMetadata{ + stores: make(map[KindID]any, len(allKindsRegistered)), + labelsMap: make(map[uint64]labels.Labels), + } + for _, kind := range allKindsRegistered { + m.stores[kind.ID()] = kind.NewStore() + } + return m +} + +// ResourceStore returns the typed resource store. +func (m *MemSeriesMetadata) ResourceStore() *MemStore[*ResourceVersion] { + return m.stores[KindResource].(*MemStore[*ResourceVersion]) +} + +// ScopeStore returns the typed scope store. +func (m *MemSeriesMetadata) ScopeStore() *MemStore[*ScopeVersion] { + return m.stores[KindScope].(*MemStore[*ScopeVersion]) +} + +// StoreForKind returns the type-erased store for a kind. +func (m *MemSeriesMetadata) StoreForKind(id KindID) any { + return m.stores[id] +} + +// ResourceCount returns the number of unique series with resource data. +func (m *MemSeriesMetadata) ResourceCount() int { return m.ResourceStore().Len() } + +// ScopeCount returns the number of unique series with scope data. +func (m *MemSeriesMetadata) ScopeCount() int { return m.ScopeStore().Len() } + +// Close is a no-op for in-memory storage. +func (*MemSeriesMetadata) Close() error { return nil } + +// SetIndexedResourceAttrs configures which additional descriptive resource +// attribute names are included in the inverted index. Identifying attributes +// are always indexed regardless of this setting. +// Thread-safe: uses the same mutex as index operations. +// Note: changing the indexed set does NOT retroactively rebuild the index — +// it only affects future updates. The caller should rebuild if needed. +func (m *MemSeriesMetadata) SetIndexedResourceAttrs(attrs map[string]struct{}) { + m.indexedResourceAttrsMu.Lock() + defer m.indexedResourceAttrsMu.Unlock() + m.indexedResourceAttrs = attrs +} + +// UniqueResourceAttrNames returns a snapshot of all resource attribute names +// that have been seen. The returned map must not be modified by the caller. +// This is O(1) — no iteration required. +func (m *MemSeriesMetadata) UniqueResourceAttrNames() map[string]struct{} { + m.uniqueAttrNamesMu.RLock() + defer m.uniqueAttrNamesMu.RUnlock() + return m.uniqueAttrNames +} + +// SetLabels associates a labels set with a labels hash for later lookup. +func (m *MemSeriesMetadata) SetLabels(labelsHash uint64, lset labels.Labels) { + m.labelsMap[labelsHash] = lset +} + +// DeleteLabels removes the labels mapping for a given hash. +func (m *MemSeriesMetadata) DeleteLabels(labelsHash uint64) { + delete(m.labelsMap, labelsHash) +} + +// LabelsForHash returns the labels for a given labels hash, if available. +func (m *MemSeriesMetadata) LabelsForHash(labelsHash uint64) (labels.Labels, bool) { + lset, ok := m.labelsMap[labelsHash] + return lset, ok +} + +// IterKind iterates all entries for a kind. +func (m *MemSeriesMetadata) IterKind(ctx context.Context, id KindID, f func(labelsHash uint64, versioned any) error) error { + kind, ok := KindByID(id) + if !ok { + return nil + } + store, ok := m.stores[id] + if !ok { + return nil + } + return kind.IterVersioned(ctx, store, f) +} + +// KindLen returns the number of entries for a kind. +func (m *MemSeriesMetadata) KindLen(id KindID) int { + kind, ok := KindByID(id) + if !ok { + return 0 + } + store, ok := m.stores[id] + if !ok { + return 0 + } + return kind.StoreLen(store) +} + +// --- Resource type-safe accessors (VersionedResourceReader) --- + +func (m *MemSeriesMetadata) GetResource(labelsHash uint64) (*ResourceVersion, bool) { + return m.ResourceStore().Get(labelsHash) +} + +func (m *MemSeriesMetadata) GetVersionedResource(labelsHash uint64) (*VersionedResource, bool) { + return m.ResourceStore().GetVersioned(labelsHash) +} + +func (m *MemSeriesMetadata) GetResourceAt(labelsHash uint64, timestamp int64) (*ResourceVersion, bool) { + return m.ResourceStore().GetAt(labelsHash, timestamp) +} + +func (m *MemSeriesMetadata) SetResource(labelsHash uint64, resource *ResourceVersion) { + m.ResourceStore().Set(labelsHash, resource) +} + +func (m *MemSeriesMetadata) SetVersionedResource(labelsHash uint64, resources *VersionedResource) { + m.ResourceStore().SetVersioned(labelsHash, resources) +} + +func (m *MemSeriesMetadata) DeleteResource(labelsHash uint64) { + m.ResourceStore().Delete(labelsHash) +} + +func (m *MemSeriesMetadata) IterResources(ctx context.Context, f func(labelsHash uint64, resource *ResourceVersion) error) error { + return m.ResourceStore().Iter(ctx, f) +} + +func (m *MemSeriesMetadata) IterVersionedResources(ctx context.Context, f func(labelsHash uint64, resources *VersionedResource) error) error { + return m.ResourceStore().IterVersioned(ctx, f) +} + +func (m *MemSeriesMetadata) TotalResources() uint64 { + return m.ResourceStore().TotalEntries() +} + +func (m *MemSeriesMetadata) TotalResourceVersions() uint64 { + return m.ResourceStore().TotalVersions() +} + +// --- Scope type-safe accessors (VersionedScopeReader) --- + +func (m *MemSeriesMetadata) GetVersionedScope(labelsHash uint64) (*VersionedScope, bool) { + return m.ScopeStore().GetVersioned(labelsHash) +} + +func (m *MemSeriesMetadata) SetVersionedScope(labelsHash uint64, scopes *VersionedScope) { + m.ScopeStore().SetVersioned(labelsHash, scopes) +} + +func (m *MemSeriesMetadata) IterVersionedScopes(ctx context.Context, f func(labelsHash uint64, scopes *VersionedScope) error) error { + return m.ScopeStore().IterVersioned(ctx, f) +} + +func (m *MemSeriesMetadata) TotalScopes() uint64 { + return m.ScopeStore().TotalEntries() +} + +func (m *MemSeriesMetadata) TotalScopeVersions() uint64 { + return m.ScopeStore().TotalVersions() +} + +// BuildResourceAttrIndex builds the inverted index from all resource versions. +// Called once after merge in mergeBlockMetadata. After this, LookupResourceAttr +// returns results in O(1) instead of requiring a full scan. +// Skips rebuilding if the index is already populated (e.g. from Parquet or +// incremental updates). +func (m *MemSeriesMetadata) BuildResourceAttrIndex() { + if m.resourceAttrIndex != nil { + return + } + idx := newShardedAttrIndex() + names := make(map[string]struct{}) + m.indexedResourceAttrsMu.RLock() + extra := m.indexedResourceAttrs + m.indexedResourceAttrsMu.RUnlock() + _ = m.ResourceStore().IterVersioned(context.Background(), func(labelsHash uint64, vr *VersionedResource) error { + for _, rv := range vr.Versions { + addToAttrIndex(idx, labelsHash, rv, extra) + collectAttrNames(names, rv) + } + return nil + }) + m.resourceAttrIndex = idx + + m.uniqueAttrNamesMu.Lock() + m.uniqueAttrNames = names + m.uniqueAttrNamesMu.Unlock() +} + +// InitResourceAttrIndex initializes an empty inverted index, enabling +// incremental updates via UpdateResourceAttrIndex. This must be called +// before any incremental updates (e.g. on head startup). +func (m *MemSeriesMetadata) InitResourceAttrIndex() { + if m.resourceAttrIndex == nil { + m.resourceAttrIndex = newShardedAttrIndex() + } +} + +// UpdateResourceAttrIndex incrementally updates the inverted index when a +// resource version changes. Removes stale entries from old, adds new ones. +// old may be nil if this is the first insert for this labelsHash. +func (m *MemSeriesMetadata) UpdateResourceAttrIndex( + labelsHash uint64, + old *VersionedResource, + cur *VersionedResource, +) { + // Track new attr names from the current version (grow-only). + // Always runs even without inverted index — used for autocomplete. + if cur != nil { + m.uniqueAttrNamesMu.Lock() + if m.uniqueAttrNames == nil { + m.uniqueAttrNames = make(map[string]struct{}) + } + for _, rv := range cur.Versions { + collectAttrNames(m.uniqueAttrNames, rv) + } + m.uniqueAttrNamesMu.Unlock() + } + + if m.resourceAttrIndex == nil { + return + } + m.indexedResourceAttrsMu.RLock() + extra := m.indexedResourceAttrs + m.indexedResourceAttrsMu.RUnlock() + + // Remove old entries. + if old != nil { + for _, rv := range old.Versions { + removeFromAttrIndex(m.resourceAttrIndex, labelsHash, rv, extra) + } + } + // Add current entries. + if cur != nil { + for _, rv := range cur.Versions { + addToAttrIndex(m.resourceAttrIndex, labelsHash, rv, extra) + } + } +} + +// RemoveFromResourceAttrIndex removes all index entries for a labelsHash. +func (m *MemSeriesMetadata) RemoveFromResourceAttrIndex(labelsHash uint64, vr *VersionedResource) { + if vr == nil { + return + } + if m.resourceAttrIndex == nil { + return + } + m.indexedResourceAttrsMu.RLock() + extra := m.indexedResourceAttrs + m.indexedResourceAttrsMu.RUnlock() + for _, rv := range vr.Versions { + removeFromAttrIndex(m.resourceAttrIndex, labelsHash, rv, extra) + } +} + +// sortedInsert inserts val into the sorted slice s using copy-on-write semantics. +// Returns the (possibly new) slice. If val already exists, s is returned unchanged. +// The new slice does not share backing memory with s, so readers holding old +// slices are safe from concurrent mutation. +func sortedInsert(s []uint64, val uint64) []uint64 { + i, found := slices.BinarySearch(s, val) + if found { + return s + } + ns := make([]uint64, len(s)+1) + copy(ns, s[:i]) + ns[i] = val + copy(ns[i+1:], s[i:]) + return ns +} + +// sortedRemove removes val from the sorted slice s using copy-on-write semantics. +// Returns the (possibly new) slice. If val is not found, s is returned unchanged. +func sortedRemove(s []uint64, val uint64) []uint64 { + i, found := slices.BinarySearch(s, val) + if !found { + return s + } + ns := make([]uint64, len(s)-1) + copy(ns, s[:i]) + copy(ns[i:], s[i+1:]) + return ns +} + +// collectAttrNames adds all attribute names from a resource version to the name set. +func collectAttrNames(names map[string]struct{}, rv *ResourceVersion) { + for k := range rv.Identifying { + names[k] = struct{}{} + } + for k := range rv.Descriptive { + names[k] = struct{}{} + } +} + +// addToAttrIndex adds attribute entries for a resource version to the sharded index. +// Identifying attributes are always indexed. Descriptive attributes are only +// indexed if their key is in extraIndexed. +// Uses copy-on-write sorted slices so readers holding old slices are safe. +// Each key routes to a single stripe — no two stripe locks are held simultaneously. +func addToAttrIndex(idx *shardedAttrIndex, labelsHash uint64, rv *ResourceVersion, extraIndexed map[string]struct{}) { + for k, v := range rv.Identifying { + key := k + "\x00" + v + st := idx.stripe(key) + st.mtx.Lock() + st.idx[key] = sortedInsert(st.idx[key], labelsHash) + st.mtx.Unlock() + } + for k, v := range rv.Descriptive { + if _, ok := extraIndexed[k]; !ok { + continue + } + key := k + "\x00" + v + st := idx.stripe(key) + st.mtx.Lock() + st.idx[key] = sortedInsert(st.idx[key], labelsHash) + st.mtx.Unlock() + } +} + +// removeFromAttrIndex removes attribute entries for a resource version from the sharded index. +// Identifying attributes are always removed. Descriptive attributes are only +// removed if their key is in extraIndexed. +// Uses copy-on-write sorted slices so readers holding old slices are safe. +// Each key routes to a single stripe — no two stripe locks are held simultaneously. +func removeFromAttrIndex(idx *shardedAttrIndex, labelsHash uint64, rv *ResourceVersion, extraIndexed map[string]struct{}) { + for k, v := range rv.Identifying { + key := k + "\x00" + v + st := idx.stripe(key) + st.mtx.Lock() + if s, ok := st.idx[key]; ok { + ns := sortedRemove(s, labelsHash) + if len(ns) == 0 { + delete(st.idx, key) + } else { + st.idx[key] = ns + } + } + st.mtx.Unlock() + } + for k, v := range rv.Descriptive { + if _, ok := extraIndexed[k]; !ok { + continue + } + key := k + "\x00" + v + st := idx.stripe(key) + st.mtx.Lock() + if s, ok := st.idx[key]; ok { + ns := sortedRemove(s, labelsHash) + if len(ns) == 0 { + delete(st.idx, key) + } else { + st.idx[key] = ns + } + } + st.mtx.Unlock() + } +} + +// LookupResourceAttr returns sorted labelsHashes that have a resource version +// with the given key:value in Identifying or Descriptive attributes. +// Returns nil if the index has not been built. +// The returned slice is safe for concurrent use — copy-on-write ensures +// that mutations create new slices rather than modifying existing ones. +func (m *MemSeriesMetadata) LookupResourceAttr(key, value string) []uint64 { + if m.resourceAttrIndex == nil { + return nil + } + return m.resourceAttrIndex.lookup(key + "\x00" + value) +} + +// parquetReader implements Reader by reading from a Parquet file. +type parquetReader struct { + closer io.Closer // nil for ReaderAt-based readers (caller manages lifecycle) + mem *MemSeriesMetadata + + closeOnce sync.Once + closeErr error +} + +// contentMapping pairs a series (by SeriesRef) with a content hash and time range. +type contentMapping struct { + seriesRef uint64 + contentHash uint64 + minTime int64 + maxTime int64 +} + +// kindDenormState holds per-kind state during row denormalization. +type kindDenormState struct { + kind KindDescriptor + contentTable map[uint64]any // contentHash → version value (type-erased) + mappings []contentMapping // series → content hash + time range + versionsByHash map[uint64][]VersionWithTime // labelsHash → versions with time ranges +} + +// denormalizeRows processes raw Parquet rows into in-memory lookup structures. +// It uses the kind registry to dispatch table/mapping rows generically. +func denormalizeRows( + logger *slog.Logger, + rows []metadataRow, + mem *MemSeriesMetadata, + refResolver func(seriesRef uint64) (labelsHash uint64, ok bool), +) { + // Phase 1: Build content-addressed tables and collect mappings per kind. + states := make(map[KindID]*kindDenormState) + for _, kind := range AllKinds() { + states[kind.ID()] = &kindDenormState{ + kind: kind, + contentTable: make(map[uint64]any), + versionsByHash: make(map[uint64][]VersionWithTime), + } + } + + for i := range rows { + row := &rows[i] + + if kind, ok := KindByTableNS(row.Namespace); ok { + state := states[kind.ID()] + state.contentTable[row.ContentHash] = kind.ParseTableRow(logger, row) + } else if kind, ok := KindByMappingNS(row.Namespace); ok { + state := states[kind.ID()] + state.mappings = append(state.mappings, contentMapping{ + seriesRef: row.SeriesRef, + contentHash: row.ContentHash, + minTime: row.MinTime, + maxTime: row.MaxTime, + }) + } + } + + // Phase 2: Resolve mappings by looking up content from tables. + for _, state := range states { + for _, m := range state.mappings { + template, ok := state.contentTable[m.contentHash] + if !ok { + logger.Warn("Mapping references missing content hash", + "kind", string(state.kind.ID()), + "series_ref", m.seriesRef, "content_hash", m.contentHash) + continue + } + labelsHash := m.seriesRef + if refResolver != nil { + lh, ok := refResolver(m.seriesRef) + if !ok { + logger.Warn("Mapping references unresolvable series ref", + "kind", string(state.kind.ID()), + "series_ref", m.seriesRef, "content_hash", m.contentHash) + continue + } + labelsHash = lh + } + // Copy the template and set time range. + // The kind descriptor's CopyVersioned works on *Versioned[V], but here + // we have a single version. We'll wrap and use kind.SetVersioned. + // For now, accumulate raw versions and build Versioned in phase 3. + state.versionsByHash[labelsHash] = append(state.versionsByHash[labelsHash], VersionWithTime{ + Version: template, + MinTime: m.minTime, + MaxTime: m.maxTime, + }) + } + } + + // Phase 3: Sort versions by MinTime and populate stores (kind-generic). + for _, kind := range AllKinds() { + state := states[kind.ID()] + store := mem.StoreForKind(kind.ID()) + for labelsHash, rawVersions := range state.versionsByHash { + kind.DenormalizeIntoStore(store, labelsHash, rawVersions) + } + } + + // Phase 4: Process resource attribute inverted index rows. + // Prefer dedicated AttrKey/AttrValue columns when non-empty (new files), + // fall back to IdentifyingAttrs[0] for backward compatibility (old files). + // Build into a local shardedAttrIndex, then assign atomically. + var idx *shardedAttrIndex + for i := range rows { + row := &rows[i] + if row.Namespace != NamespaceResourceAttrIndex { + continue + } + if idx == nil { + idx = newShardedAttrIndex() + } + + var attrKey, attrValue string + switch { + case row.AttrKey != "": + attrKey = row.AttrKey + attrValue = row.AttrValue + case len(row.IdentifyingAttrs) > 0: + attrKey = row.IdentifyingAttrs[0].Key + attrValue = row.IdentifyingAttrs[0].Value + default: + continue + } + + labelsHash := row.SeriesRef + if refResolver != nil { + lh, ok := refResolver(row.SeriesRef) + if !ok { + continue + } + labelsHash = lh + } + key := attrKey + "\x00" + attrValue + // Single-threaded during Parquet load — no stripe locking needed, + // but use stripe routing for correct placement. + st := idx.stripe(key) + st.idx[key] = sortedInsert(st.idx[key], labelsHash) + } + if idx != nil { + mem.resourceAttrIndex = idx + } +} + +// VersionWithTime wraps a version value with its time range from a mapping row. +type VersionWithTime struct { + Version any + MinTime int64 + MaxTime int64 +} + +// newParquetReaderFromReaderAt creates a parquetReader from an io.ReaderAt. +func newParquetReaderFromReaderAt(logger *slog.Logger, r io.ReaderAt, size int64, opts ...ReaderOption) (*parquetReader, error) { + var ropts readerOptions + for _, o := range opts { + o(&ropts) + } + + pf, err := parquet.OpenFile(r, size) + if err != nil { + return nil, fmt.Errorf("open parquet file: %w", err) + } + if v, ok := pf.Lookup("schema_version"); ok { + if v != schemaVersion { + logger.Warn("Parquet metadata file has unexpected schema version; data may not load correctly", + "expected", schemaVersion, "found", v) + } + } else { + logger.Warn("Parquet metadata file missing schema_version in footer metadata") + } + + mem := NewMemSeriesMetadata() + + if len(ropts.namespaceFilter) > 0 { + nsColIdx := lookupColumnIndex(pf.Schema(), "namespace") + var allRows []metadataRow + for _, rg := range pf.RowGroups() { + if nsColIdx >= 0 { + if ns, ok := rowGroupSingleNamespace(rg, nsColIdx); ok { + if _, match := ropts.namespaceFilter[ns]; !match { + continue + } + } + } + + rows, err := readRowGroup[metadataRow](rg) + if err != nil { + return nil, fmt.Errorf("read filtered row group: %w", err) + } + allRows = append(allRows, rows...) + } + denormalizeRows(logger, allRows, mem, ropts.refResolver) + } else { + rows, err := parquet.Read[metadataRow](r, size) + if err != nil { + return nil, fmt.Errorf("read parquet rows: %w", err) + } + denormalizeRows(logger, rows, mem, ropts.refResolver) + } + + return &parquetReader{mem: mem}, nil +} + +// lookupColumnIndex returns the index of the named column in the schema, or -1. +func lookupColumnIndex(schema *parquet.Schema, name string) int { + for i, col := range schema.Columns() { + if len(col) == 1 && col[0] == name { + return i + } + } + return -1 +} + +// rowGroupSingleNamespace checks whether a row group contains a single namespace. +func rowGroupSingleNamespace(rg parquet.RowGroup, nsColIdx int) (string, bool) { + cc := rg.ColumnChunks()[nsColIdx] + idx, err := cc.ColumnIndex() + if err != nil || idx.NumPages() == 0 { + return "", false + } + minVal := string(idx.MinValue(0).ByteArray()) + maxVal := string(idx.MaxValue(0).ByteArray()) + if minVal != maxVal { + return "", false + } + for p := 1; p < idx.NumPages(); p++ { + if string(idx.MinValue(p).ByteArray()) != minVal || string(idx.MaxValue(p).ByteArray()) != minVal { + return "", false + } + } + return minVal, true +} + +// readRowGroup reads all rows from a single row group into a typed slice. +func readRowGroup[T any](rg parquet.RowGroup) ([]T, error) { + n := rg.NumRows() + rows := make([]T, n) + reader := parquet.NewGenericRowGroupReader[T](rg) + _, err := reader.Read(rows) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + return rows, nil +} + +// parseResourceContent converts a resource_table row into a ResourceVersion. +func parseResourceContent(logger *slog.Logger, row *metadataRow) *ResourceVersion { + identifying := make(map[string]string, len(row.IdentifyingAttrs)) + for _, attr := range row.IdentifyingAttrs { + identifying[attr.Key] = attr.Value + } + descriptive := make(map[string]string, len(row.DescriptiveAttrs)) + for _, attr := range row.DescriptiveAttrs { + descriptive[attr.Key] = attr.Value + } + + var entities []*Entity + for _, entityRow := range row.Entities { + entityID := make(map[string]string, len(entityRow.ID)) + for _, attr := range entityRow.ID { + entityID[attr.Key] = attr.Value + } + entityDesc := make(map[string]string, len(entityRow.Description)) + for _, attr := range entityRow.Description { + entityDesc[attr.Key] = attr.Value + } + entityType := entityRow.Type + if entityType == "" { + entityType = EntityTypeResource + } + e := &Entity{ + Type: entityType, + ID: entityID, + Description: entityDesc, + } + if err := e.Validate(); err != nil { + logger.Warn("Skipping invalid entity during parquet read", "err", err, "type", entityRow.Type) + continue + } + entities = append(entities, e) + } + slices.SortFunc(entities, func(a, b *Entity) int { + return strings.Compare(a.Type, b.Type) + }) + + return &ResourceVersion{ + Identifying: identifying, + Descriptive: descriptive, + Entities: entities, + } +} + +// parseScopeContent converts a scope_table row into a ScopeVersion. +func parseScopeContent(row *metadataRow) *ScopeVersion { + attrs := make(map[string]string, len(row.ScopeAttrs)) + for _, attr := range row.ScopeAttrs { + attrs[attr.Key] = attr.Value + } + return &ScopeVersion{ + Name: row.ScopeName, + Version: row.ScopeVersionStr, + SchemaURL: row.SchemaURL, + Attrs: attrs, + } +} + +// --- parquetReader type-safe accessors --- + +func (r *parquetReader) GetResource(labelsHash uint64) (*ResourceVersion, bool) { + return r.mem.GetResource(labelsHash) +} + +func (r *parquetReader) GetVersionedResource(labelsHash uint64) (*VersionedResource, bool) { + return r.mem.GetVersionedResource(labelsHash) +} + +func (r *parquetReader) GetResourceAt(labelsHash uint64, timestamp int64) (*ResourceVersion, bool) { + return r.mem.GetResourceAt(labelsHash, timestamp) +} + +func (r *parquetReader) IterResources(ctx context.Context, f func(labelsHash uint64, resource *ResourceVersion) error) error { + return r.mem.IterResources(ctx, f) +} + +func (r *parquetReader) IterVersionedResources(ctx context.Context, f func(labelsHash uint64, resources *VersionedResource) error) error { + return r.mem.IterVersionedResources(ctx, f) +} + +func (r *parquetReader) TotalResources() uint64 { + return r.mem.TotalResources() +} + +func (r *parquetReader) TotalResourceVersions() uint64 { + return r.mem.TotalResourceVersions() +} + +func (r *parquetReader) GetVersionedScope(labelsHash uint64) (*VersionedScope, bool) { + return r.mem.GetVersionedScope(labelsHash) +} + +func (r *parquetReader) IterVersionedScopes(ctx context.Context, f func(labelsHash uint64, scopes *VersionedScope) error) error { + return r.mem.IterVersionedScopes(ctx, f) +} + +func (r *parquetReader) TotalScopes() uint64 { + return r.mem.TotalScopes() +} + +func (r *parquetReader) TotalScopeVersions() uint64 { + return r.mem.TotalScopeVersions() +} + +func (r *parquetReader) IterKind(ctx context.Context, id KindID, f func(labelsHash uint64, versioned any) error) error { + return r.mem.IterKind(ctx, id, f) +} + +func (r *parquetReader) LabelsForHash(labelsHash uint64) (labels.Labels, bool) { + return r.mem.LabelsForHash(labelsHash) +} + +func (r *parquetReader) SetLabels(labelsHash uint64, lset labels.Labels) { + r.mem.SetLabels(labelsHash, lset) +} + +func (r *parquetReader) KindLen(id KindID) int { + return r.mem.KindLen(id) +} + +func (r *parquetReader) LookupResourceAttr(key, value string) []uint64 { + return r.mem.LookupResourceAttr(key, value) +} + +// Close releases resources associated with the reader. +func (r *parquetReader) Close() error { + r.closeOnce.Do(func() { + if r.closer != nil { + r.closeErr = r.closer.Close() + } + }) + return r.closeErr +} + +// sortAttrEntries sorts attribute entries by key for deterministic Parquet output. +func sortAttrEntries(entries []EntityAttributeEntry) { + slices.SortFunc(entries, func(a, b EntityAttributeEntry) int { + return cmp.Compare(a.Key, b.Key) + }) +} + +// sortMetadataRows sorts rows for compression: group by namespace, then by +// series_ref, content_hash, MinTime. +func sortMetadataRows(rows []metadataRow) { + slices.SortFunc(rows, func(a, b metadataRow) int { + if c := strings.Compare(a.Namespace, b.Namespace); c != 0 { + return c + } + if c := cmp.Compare(a.SeriesRef, b.SeriesRef); c != 0 { + return c + } + if c := cmp.Compare(a.ContentHash, b.ContentHash); c != 0 { + return c + } + return cmp.Compare(a.MinTime, b.MinTime) + }) +} + +// WriteFile atomically writes series metadata to a Parquet file. +func WriteFile(logger *slog.Logger, dir string, mr Reader) (int64, error) { + return WriteFileWithOptions(logger, dir, mr, WriterOptions{}) +} + +// WriteFileWithOptions writes series metadata using the kind registry for dispatch. +func WriteFileWithOptions(logger *slog.Logger, dir string, mr Reader, opts WriterOptions) (int64, error) { + path := filepath.Join(dir, SeriesMetadataFilename) + tmp := path + ".tmp" + + // Per-kind: content table (dedup) and mapping rows. + type kindWriteState struct { + kind KindDescriptor + contentTable map[uint64]metadataRow // contentHash → table row + mappingRows []metadataRow + } + + kindStates := make(map[KindID]*kindWriteState) + for _, kind := range AllKinds() { + kindStates[kind.ID()] = &kindWriteState{ + kind: kind, + contentTable: make(map[uint64]metadataRow), + } + } + + // Iterate all kinds and build rows. + for _, kind := range AllKinds() { + state := kindStates[kind.ID()] + err := mr.IterKind(context.Background(), kind.ID(), func(labelsHash uint64, versioned any) error { + kind.IterateVersions(versioned, func(version any, minTime, maxTime int64) { + contentHash := kind.ContentHash(version) + if _, exists := state.contentTable[contentHash]; !exists { + state.contentTable[contentHash] = kind.BuildTableRow(contentHash, version) + } else { + existing := state.contentTable[contentHash] + existingVersion := kind.ParseTableRow(logger, &existing) + if !kind.VersionsEqual(existingVersion, version) { + logger.Warn("Hash collision detected in content-addressed table", + "kind", string(kind.ID()), "content_hash", contentHash, "labels_hash", labelsHash) + } + } + seriesRef := labelsHash + if opts.RefResolver != nil { + ref, ok := opts.RefResolver(labelsHash) + if !ok { + logger.Warn("Skipping unresolvable labels hash in write", + "kind", string(kind.ID()), "labels_hash", labelsHash) + return + } + seriesRef = ref + } + state.mappingRows = append(state.mappingRows, metadataRow{ + Namespace: kind.MappingNamespace(), + SeriesRef: seriesRef, + ContentHash: contentHash, + MinTime: minTime, + MaxTime: maxTime, + }) + }) + return nil + }) + if err != nil { + return 0, fmt.Errorf("iterate %s: %w", kind.ID(), err) + } + } + + // Build per-namespace row slices. + var allNamespaceRows [][]metadataRow + totalRows := 0 + metadataCounts := make(map[string]int) // for footer metadata + + for _, kind := range AllKinds() { + state := kindStates[kind.ID()] + + tableRows := make([]metadataRow, 0, len(state.contentTable)) + for _, row := range state.contentTable { + tableRows = append(tableRows, row) + } + sortMetadataRows(tableRows) + sortMetadataRows(state.mappingRows) + + metadataCounts[string(kind.ID())+"_table_count"] = len(tableRows) + metadataCounts[string(kind.ID())+"_mapping_count"] = len(state.mappingRows) + totalRows += len(tableRows) + len(state.mappingRows) + + allNamespaceRows = append(allNamespaceRows, tableRows, state.mappingRows) + } + + // Optionally build resource attribute inverted index rows. + if opts.EnableInvertedIndex { + indexRows := buildResourceAttrIndexRows(mr, opts.RefResolver, opts.IndexedResourceAttrs) + if len(indexRows) > 0 { + sortMetadataRows(indexRows) + metadataCounts["resource_attr_index_count"] = len(indexRows) + totalRows += len(indexRows) + allNamespaceRows = append(allNamespaceRows, indexRows) + } + } + + if totalRows == 0 { + return 0, nil + } + + // Create temp file. + f, err := os.Create(tmp) + if err != nil { + return 0, fmt.Errorf("create temp file: %w", err) + } + defer func() { + if f != nil { + if err := f.Close(); err != nil { + logger.Error("close temp file", "err", err.Error()) + } + } + if tmp != "" { + if err := os.RemoveAll(tmp); err != nil { + logger.Error("remove temp file", "err", err.Error()) + } + } + }() + + // Build writer options. + writerOpts := []parquet.WriterOption{ + parquet.Compression(&zstd.Codec{Level: zstd.SpeedBetterCompression}), + parquet.KeyValueMetadata("schema_version", schemaVersion), + parquet.KeyValueMetadata("row_group_layout", "namespace_partitioned"), + } + for k, v := range metadataCounts { + writerOpts = append(writerOpts, parquet.KeyValueMetadata(k, strconv.Itoa(v))) + } + if opts.BloomFilterFormat == BloomFilterParquetNative { + writerOpts = append(writerOpts, + parquet.BloomFilters( + parquet.SplitBlockFilter(10, "series_ref"), + parquet.SplitBlockFilter(10, "content_hash"), + parquet.SplitBlockFilter(10, "attr_key"), + parquet.SplitBlockFilter(10, "attr_value"), + ), + ) + } + + writer := parquet.NewGenericWriter[metadataRow](f, writerOpts...) + + for _, nsRows := range allNamespaceRows { + if err := writeNamespaceRows(writer, nsRows, opts.MaxRowsPerRowGroup); err != nil { + return 0, fmt.Errorf("write parquet rows: %w", err) + } + } + + if err := writer.Close(); err != nil { + return 0, fmt.Errorf("close parquet writer: %w", err) + } + + if err := f.Sync(); err != nil { + return 0, fmt.Errorf("sync file: %w", err) + } + + stat, err := f.Stat() + if err != nil { + return 0, fmt.Errorf("stat file: %w", err) + } + size := stat.Size() + + if err := f.Close(); err != nil { + return 0, fmt.Errorf("close file: %w", err) + } + f = nil + + if err := fileutil.Replace(tmp, path); err != nil { + return 0, fmt.Errorf("rename temp file: %w", err) + } + tmp = "" + + logArgs := []any{ + "resource_table", metadataCounts["resource_table_count"], + "resource_mappings", metadataCounts["resource_mapping_count"], + "scope_table", metadataCounts["scope_table_count"], + "scope_mappings", metadataCounts["scope_mapping_count"], + "size", size, + } + if cnt, ok := metadataCounts["resource_attr_index_count"]; ok { + logArgs = append(logArgs, "resource_attr_index", cnt) + } + logger.Info("Series metadata written", logArgs...) + + // Populate write stats if requested. + if opts.WriteStats != nil { + opts.WriteStats.NamespaceRowCounts = metadataCounts + } + + return size, nil +} + +// buildResourceTableRow converts a ResourceVersion into a content-addressed table row. +func buildResourceTableRow(contentHash uint64, rv *ResourceVersion) metadataRow { + idAttrs := make([]EntityAttributeEntry, 0, len(rv.Identifying)) + for k, v := range rv.Identifying { + idAttrs = append(idAttrs, EntityAttributeEntry{Key: k, Value: v}) + } + sortAttrEntries(idAttrs) + + descAttrs := make([]EntityAttributeEntry, 0, len(rv.Descriptive)) + for k, v := range rv.Descriptive { + descAttrs = append(descAttrs, EntityAttributeEntry{Key: k, Value: v}) + } + sortAttrEntries(descAttrs) + + entityRows := make([]EntityRow, 0, len(rv.Entities)) + for _, entity := range rv.Entities { + entityIDAttrs := make([]EntityAttributeEntry, 0, len(entity.ID)) + for k, v := range entity.ID { + entityIDAttrs = append(entityIDAttrs, EntityAttributeEntry{Key: k, Value: v}) + } + sortAttrEntries(entityIDAttrs) + + entityDescAttrs := make([]EntityAttributeEntry, 0, len(entity.Description)) + for k, v := range entity.Description { + entityDescAttrs = append(entityDescAttrs, EntityAttributeEntry{Key: k, Value: v}) + } + sortAttrEntries(entityDescAttrs) + + entityRows = append(entityRows, EntityRow{ + Type: entity.Type, + ID: entityIDAttrs, + Description: entityDescAttrs, + }) + } + + return metadataRow{ + Namespace: NamespaceResourceTable, + ContentHash: contentHash, + IdentifyingAttrs: idAttrs, + DescriptiveAttrs: descAttrs, + Entities: entityRows, + } +} + +// buildResourceAttrIndexRows builds inverted index rows for Parquet from all +// resource versions. Each unique (key, value, seriesRef) tuple produces one row. +// Identifying attributes are always indexed. Descriptive attributes are only +// indexed if their key is in indexedResourceAttrs. +// Uses a numeric hash pair for dedup instead of string keys to reduce memory. +func buildResourceAttrIndexRows(mr Reader, refResolver func(labelsHash uint64) (uint64, bool), indexedResourceAttrs map[string]struct{}) []metadataRow { + type dedupKey struct { + contentHash uint64 // attrKeyValueHash(k, v) + seriesRef uint64 + } + seen := make(map[dedupKey]struct{}) + var rows []metadataRow + + _ = mr.IterVersionedResources(context.Background(), func(labelsHash uint64, vr *VersionedResource) error { + seriesRef := labelsHash + if refResolver != nil { + ref, ok := refResolver(labelsHash) + if !ok { + return nil + } + seriesRef = ref + } + + addEntry := func(k, v string) { + ch := attrKeyValueHash(k, v) + dk := dedupKey{contentHash: ch, seriesRef: seriesRef} + if _, exists := seen[dk]; exists { + return + } + seen[dk] = struct{}{} + rows = append(rows, metadataRow{ + Namespace: NamespaceResourceAttrIndex, + SeriesRef: seriesRef, + ContentHash: ch, + AttrKey: k, + AttrValue: v, + IdentifyingAttrs: []EntityAttributeEntry{ + {Key: k, Value: v}, + }, + }) + } + + for _, rv := range vr.Versions { + for k, v := range rv.Identifying { + addEntry(k, v) + } + for k, v := range rv.Descriptive { + if _, ok := indexedResourceAttrs[k]; !ok { + continue + } + addEntry(k, v) + } + } + return nil + }) + + return rows +} + +// attrKeyValueHash computes xxhash("key\x00value") for bloom filter skipability. +func attrKeyValueHash(key, value string) uint64 { + h := xxhash.New() + _, _ = h.WriteString(key) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(value) + return h.Sum64() +} + +// ReadSeriesMetadata reads series metadata from a Parquet file in the given directory. +func ReadSeriesMetadata(logger *slog.Logger, dir string, opts ...ReaderOption) (Reader, int64, error) { + path := filepath.Join(dir, SeriesMetadataFilename) + + f, err := os.Open(path) + if os.IsNotExist(err) { + return NewMemSeriesMetadata(), 0, nil + } + if err != nil { + return nil, 0, fmt.Errorf("open metadata file: %w", err) + } + + stat, err := f.Stat() + if err != nil { + f.Close() + return nil, 0, fmt.Errorf("stat metadata file: %w", err) + } + + reader, err := newParquetReaderFromReaderAt(logger, f, stat.Size(), opts...) + if err != nil { + f.Close() + return nil, 0, fmt.Errorf("create parquet reader: %w", err) + } + reader.closer = f + + return reader, stat.Size(), nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/versioned.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/versioned.go new file mode 100644 index 00000000000..dea2035fa7b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/versioned.go @@ -0,0 +1,154 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import ( + "math" +) + +// VersionConstraint defines the time-range interface that all version types must satisfy. +type VersionConstraint interface { + GetMinTime() int64 + GetMaxTime() int64 + SetMinTime(int64) + SetMaxTime(int64) + UpdateTimeRange(minTime, maxTime int64) +} + +// KindOps provides kind-specific operations for version values. +// Implementations are stateless and safe for concurrent use. +type KindOps[V VersionConstraint] interface { + Equal(a, b V) bool + Copy(v V) V +} + +// ContentDedupOps is an optional extension of KindOps detected via type assertion. +// Kinds that implement it get content-addressed dedup in MemStore, where versions +// with identical content share map/slice pointers from a single canonical entry. +type ContentDedupOps[V VersionConstraint] interface { + ContentHash(v V) uint64 + ThinCopy(canonical, v V) V +} + +// Versioned holds multiple versions of metadata for a single series. +// Each version represents a period when specific metadata was active. +// Versions are ordered by MinTime ascending; most recent version is last. +type Versioned[V VersionConstraint] struct { + Versions []V +} + +// Copy creates a deep copy of the Versioned container. +func (vr *Versioned[V]) Copy(ops KindOps[V]) *Versioned[V] { + versions := make([]V, len(vr.Versions)) + for i, v := range vr.Versions { + versions[i] = ops.Copy(v) + } + return &Versioned[V]{Versions: versions} +} + +// CurrentVersion returns the most recent version (last in the list). +// Returns the zero value if no versions exist. +func (vr *Versioned[V]) CurrentVersion() (V, bool) { + if len(vr.Versions) == 0 { + var zero V + return zero, false + } + return vr.Versions[len(vr.Versions)-1], true +} + +// VersionAt returns the version that was active at the given timestamp. +func (vr *Versioned[V]) VersionAt(timestamp int64) (V, bool) { + for i := len(vr.Versions) - 1; i >= 0; i-- { + ver := vr.Versions[i] + if timestamp >= ver.GetMinTime() && timestamp <= ver.GetMaxTime() { + return ver, true + } + if timestamp > ver.GetMaxTime() { + return ver, true + } + } + var zero V + return zero, false +} + +// AddOrExtend adds a new version if the metadata changed, or extends the current +// version's time range if it's identical. +func (vr *Versioned[V]) AddOrExtend(ops KindOps[V], version V) { + if len(vr.Versions) == 0 { + vr.Versions = []V{ops.Copy(version)} + return + } + + current := vr.Versions[len(vr.Versions)-1] + if ops.Equal(current, version) { + current.UpdateTimeRange(version.GetMinTime(), version.GetMaxTime()) + } else { + vr.Versions = append(vr.Versions, ops.Copy(version)) + } +} + +// Len returns the number of versions. +func (vr *Versioned[V]) Len() int { + return len(vr.Versions) +} + +// MergeVersioned merges two Versioned instances for the same series. +// Both inputs must be sorted by MinTime (the invariant maintained by all stores). +// Uses a two-pointer merge to avoid an extra allocation + sort. +func MergeVersioned[V VersionConstraint](ops KindOps[V], a, b *Versioned[V]) *Versioned[V] { + if a == nil { + return b.Copy(ops) + } + if b == nil { + return a.Copy(ops) + } + + merged := make([]V, 0, len(a.Versions)+len(b.Versions)) + + // Two-pointer merge (both slices are sorted by MinTime). + i, j := 0, 0 + for i < len(a.Versions) || j < len(b.Versions) { + var ver V + switch { + case i >= len(a.Versions): + ver = b.Versions[j] + j++ + case j >= len(b.Versions): + ver = a.Versions[i] + i++ + case a.Versions[i].GetMinTime() <= b.Versions[j].GetMinTime(): + ver = a.Versions[i] + i++ + default: + ver = b.Versions[j] + j++ + } + + if len(merged) > 0 { + last := merged[len(merged)-1] + if ops.Equal(last, ver) && (last.GetMaxTime() == math.MaxInt64 || ver.GetMinTime() <= last.GetMaxTime()+1) { + if ver.GetMaxTime() > last.GetMaxTime() { + last.SetMaxTime(ver.GetMaxTime()) + } + if ver.GetMinTime() < last.GetMinTime() { + last.SetMinTime(ver.GetMinTime()) + } + continue + } + } + merged = append(merged, ops.Copy(ver)) + } + + return &Versioned[V]{Versions: merged} +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/writer_options.go b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/writer_options.go new file mode 100644 index 00000000000..4773c05bd6c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/seriesmetadata/writer_options.go @@ -0,0 +1,109 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seriesmetadata + +import "github.com/parquet-go/parquet-go" + +// BloomFilterFormat controls how bloom filters are written into the Parquet file. +type BloomFilterFormat int + +const ( + // BloomFilterNone disables bloom filter generation. + BloomFilterNone BloomFilterFormat = iota + + // BloomFilterParquetNative embeds split-block bloom filters in the Parquet + // file footer for series_ref, content_hash, attr_key, and attr_value columns. + // This is the current behavior when bloom filters are enabled. + BloomFilterParquetNative + + // BloomFilterSidecar is reserved for future use: bloom filters written to a + // separate file for independent store-gateway caching. Not yet implemented. +) + +// WriterOptions configures Parquet write behavior for distributed-scale features. +type WriterOptions struct { + // MaxRowsPerRowGroup limits rows per row group within a namespace. + // 0 means no limit (one row group per namespace). + MaxRowsPerRowGroup int + + // BloomFilterFormat controls bloom filter generation. Use BloomFilterParquetNative + // to embed split-block bloom filters in the Parquet file. Default (BloomFilterNone) + // disables bloom filters. + // + // Note: the read side in this package does not query bloom filters — + // it loads all matching row groups into memory. Bloom filter querying + // is expected to happen in the consumer (e.g. Mimir store-gateway) + // which knows the query-time predicates. + BloomFilterFormat BloomFilterFormat + + // EnableInvertedIndex writes resource attribute inverted index rows + // (namespace=resource_attr_index) into the Parquet file. Each row maps + // a (key, value) attribute pair to a series ref, enabling O(1) reverse + // lookup without runtime index build. + EnableInvertedIndex bool + + // IndexedResourceAttrs specifies additional descriptive resource attribute + // names to include in the inverted index beyond identifying attributes + // (which are always indexed). nil means index only identifying attributes. + IndexedResourceAttrs map[string]struct{} + + // RefResolver converts a labelsHash (the in-memory key) to a block-level + // seriesRef for Parquet mapping rows. If nil, labelsHash is written + // directly as SeriesRef (backward compat for head/test writes without + // a block index). + RefResolver func(labelsHash uint64) (seriesRef uint64, ok bool) + + // WriteStats is populated after a successful write with namespace row + // counts from the written Parquet file. This allows the caller to + // capture stats (e.g. for BlockMeta) without parsing the footer. + WriteStats *WriteStats +} + +// WriteStats contains post-write statistics about the Parquet file. +type WriteStats struct { + // NamespaceRowCounts maps namespace footer keys (e.g. "resource_table_count") + // to their row counts. + NamespaceRowCounts map[string]int +} + +// writeNamespaceRows writes rows in chunks of maxPerGroup, calling Flush after +// each chunk to create row group boundaries. If maxPerGroup <= 0 all rows are +// written as a single row group. +func writeNamespaceRows(writer *parquet.GenericWriter[metadataRow], rows []metadataRow, maxPerGroup int) error { + if len(rows) == 0 { + return nil + } + + if maxPerGroup <= 0 { + if _, err := writer.Write(rows); err != nil { + return err + } + return writer.Flush() + } + + for len(rows) > 0 { + chunk := rows + if len(chunk) > maxPerGroup { + chunk = rows[:maxPerGroup] + } + if _, err := writer.Write(chunk); err != nil { + return err + } + if err := writer.Flush(); err != nil { + return err + } + rows = rows[len(chunk):] + } + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index 3a4e194fecd..ce5eb4ded52 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -27,6 +27,8 @@ import ( "strconv" "strings" + "github.com/cespare/xxhash/v2" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -35,6 +37,67 @@ import ( "github.com/prometheus/prometheus/tsdb/tsdbutil" ) +// contentMapping maps a series ref to a content hash with a time range. +// Used during checkpoint to dedup resource and scope content across series. +type contentMapping struct { + contentHash uint64 + minTime int64 + maxTime int64 +} + +// hashResourceWALContent computes a deterministic xxhash for a RefResource's +// content (identifying + descriptive attrs + entities). It does NOT include +// Ref, MinTime, or MaxTime since those are per-mapping, not per-content. +func hashResourceWALContent(r *record.RefResource) uint64 { + h := xxhash.New() + + hashMapInto(h, r.Identifying) + _, _ = h.Write([]byte{1}) + hashMapInto(h, r.Descriptive) + _, _ = h.Write([]byte{1}) + + for _, e := range r.Entities { + _, _ = h.WriteString(e.Type) + _, _ = h.Write([]byte{0}) + hashMapInto(h, e.ID) + _, _ = h.Write([]byte{1}) + hashMapInto(h, e.Description) + _, _ = h.Write([]byte{1}) + } + + return h.Sum64() +} + +// hashScopeWALContent computes a deterministic xxhash for a RefScope's +// content (name, version, schema URL, attrs). It does NOT include Ref, +// MinTime, or MaxTime since those are per-mapping, not per-content. +func hashScopeWALContent(s *record.RefScope) uint64 { + h := xxhash.New() + _, _ = h.WriteString(s.Name) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(s.Version) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(s.SchemaURL) + _, _ = h.Write([]byte{0}) + hashMapInto(h, s.Attrs) + return h.Sum64() +} + +// hashMapInto writes a deterministic representation of a string map into a hash digest. +func hashMapInto(h *xxhash.Digest, m map[string]string) { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + _, _ = h.WriteString(k) + _, _ = h.Write([]byte{0}) + _, _ = h.WriteString(m[k]) + _, _ = h.Write([]byte{0}) + } +} + // CheckpointStats returns stats about a created checkpoint. type CheckpointStats struct { DroppedSeries int @@ -42,11 +105,15 @@ type CheckpointStats struct { DroppedTombstones int DroppedExemplars int DroppedMetadata int + DroppedResources int + DroppedScopes int TotalSeries int // Processed series including dropped ones. TotalSamples int // Processed float and histogram samples including dropped ones. TotalTombstones int // Processed tombstones including dropped ones. TotalExemplars int // Processed exemplars including dropped ones. TotalMetadata int // Processed metadata including dropped ones. + TotalResources int // Processed resource updates including dropped ones. + TotalScopes int // Processed scope updates including dropped ones. } // LastCheckpoint returns the directory name and index of the most recent checkpoint. @@ -85,6 +152,11 @@ func DeleteCheckpoints(dir string, maxIndex int) error { // checkpointTempFileSuffix is the suffix used when creating temporary checkpoint files. const checkpointTempFileSuffix = ".tmp" +// checkpointFlushChunkSize is the number of resource/scope records to buffer +// before flushing to the checkpoint WAL. Bounds peak memory to ~2 MB per chunk +// instead of potentially gigabytes for the full monolithic slice. +const checkpointFlushChunkSize = 10000 + // DeleteTempCheckpoints deletes all temporary checkpoint directories in the given directory. func DeleteTempCheckpoints(logger *slog.Logger, dir string) error { if err := tsdbutil.RemoveTmpDirs(logger, dir, isTempDir); err != nil { @@ -164,6 +236,8 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He tstones []tombstones.Stone exemplars []record.RefExemplar metadata []record.RefMetadata + resources []record.RefResource + scopes []record.RefScope st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function. dec = record.NewDecoder(st, logger) enc record.Encoder @@ -171,9 +245,20 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He recs [][]byte latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata) + // Resources and scopes are versioned (descriptive attributes can change over time), + // so we keep ALL records per ref, not just the latest. This preserves version history + // so that VersionAt() returns correct attributes for historical timestamps after replay. + // + // Content-addressed dedup: many series share the same resource/scope content. + // Store unique content once in a table, and map refs to content hashes. + // This dramatically reduces memory when N series share K unique resources (K << N). + resourceContentTable = make(map[uint64]record.RefResource) // contentHash → canonical record + resourceRefToContent = make(map[chunks.HeadSeriesRef][]contentMapping) // ref → content hashes with time ranges + scopeContentTable = make(map[uint64]record.RefScope) // contentHash → canonical scope record + scopeRefToContent = make(map[chunks.HeadSeriesRef][]contentMapping) // ref → content hashes with time ranges ) for r.Next() { - series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0] + series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata, resources, scopes = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0], resources[:0], scopes[:0] // We don't reset the buffer since we batch up multiple records // before writing them to the checkpoint. @@ -341,6 +426,50 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } stats.TotalMetadata += len(metadata) stats.DroppedMetadata += len(metadata) - repl + case record.ResourceUpdate: + resources, err = dec.Resources(rec, resources) + if err != nil { + return nil, fmt.Errorf("decode resources: %w", err) + } + repl := 0 + for i, r := range resources { + if keep(r.Ref) { + repl++ + ch := hashResourceWALContent(&resources[i]) + if _, exists := resourceContentTable[ch]; !exists { + resourceContentTable[ch] = r + } + resourceRefToContent[r.Ref] = append(resourceRefToContent[r.Ref], contentMapping{ + contentHash: ch, + minTime: r.MinTime, + maxTime: r.MaxTime, + }) + } + } + stats.TotalResources += len(resources) + stats.DroppedResources += len(resources) - repl + case record.ScopeUpdate: + scopes, err = dec.Scopes(rec, scopes) + if err != nil { + return nil, fmt.Errorf("decode scopes: %w", err) + } + repl := 0 + for i, s := range scopes { + if keep(s.Ref) { + repl++ + ch := hashScopeWALContent(&scopes[i]) + if _, exists := scopeContentTable[ch]; !exists { + scopeContentTable[ch] = s + } + scopeRefToContent[s.Ref] = append(scopeRefToContent[s.Ref], contentMapping{ + contentHash: ch, + minTime: s.MinTime, + maxTime: s.MaxTime, + }) + } + } + stats.TotalScopes += len(scopes) + stats.DroppedScopes += len(scopes) - repl default: // Unknown record type, probably from a future Prometheus version. continue @@ -369,7 +498,6 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He return nil, fmt.Errorf("flush records: %w", err) } - // Flush latest metadata records for each series. if len(latestMetadataMap) > 0 { latestMetadata := make([]record.RefMetadata, 0, len(latestMetadataMap)) for _, m := range latestMetadataMap { @@ -380,6 +508,69 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } + // Flush all resource records for each series (preserving version history). + // Reconstruct full RefResource records from the content-addressed table. + // Flush in chunks to bound peak memory instead of materializing all records at once. + if len(resourceRefToContent) > 0 { + chunk := make([]record.RefResource, 0, checkpointFlushChunkSize) + for ref, mappings := range resourceRefToContent { + for _, m := range mappings { + canonical := resourceContentTable[m.contentHash] + chunk = append(chunk, record.RefResource{ + Ref: ref, + MinTime: m.minTime, + MaxTime: m.maxTime, + Identifying: canonical.Identifying, + Descriptive: canonical.Descriptive, + Entities: canonical.Entities, + }) + if len(chunk) >= checkpointFlushChunkSize { + if err := cp.Log(enc.Resources(chunk, buf[:0])); err != nil { + return nil, fmt.Errorf("flush resource records: %w", err) + } + chunk = chunk[:0] + } + } + } + if len(chunk) > 0 { + if err := cp.Log(enc.Resources(chunk, buf[:0])); err != nil { + return nil, fmt.Errorf("flush resource records: %w", err) + } + } + } + + // Flush all scope records for each series (preserving version history). + // Reconstruct full RefScope records from the content-addressed table. + // Flush in chunks to bound peak memory instead of materializing all records at once. + if len(scopeRefToContent) > 0 { + chunk := make([]record.RefScope, 0, checkpointFlushChunkSize) + for ref, mappings := range scopeRefToContent { + for _, m := range mappings { + canonical := scopeContentTable[m.contentHash] + chunk = append(chunk, record.RefScope{ + Ref: ref, + MinTime: m.minTime, + MaxTime: m.maxTime, + Name: canonical.Name, + Version: canonical.Version, + SchemaURL: canonical.SchemaURL, + Attrs: canonical.Attrs, + }) + if len(chunk) >= checkpointFlushChunkSize { + if err := cp.Log(enc.Scopes(chunk, buf[:0])); err != nil { + return nil, fmt.Errorf("flush scope records: %w", err) + } + chunk = chunk[:0] + } + } + } + if len(chunk) > 0 { + if err := cp.Log(enc.Scopes(chunk, buf[:0])); err != nil { + return nil, fmt.Errorf("flush scope records: %w", err) + } + } + } + if err := cp.Close(); err != nil { return nil, fmt.Errorf("close checkpoint: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/limits_default.go b/vendor/github.com/prometheus/prometheus/util/runtime/limits_default.go deleted file mode 100644 index 51a78423d33..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/limits_default.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows - -package runtime - -import ( - "fmt" - "math" - "syscall" -) - -// syscall.RLIM_INFINITY is a constant. -// Its type is int on most architectures but there are exceptions such as loong64. -// Uniform it to uint according to the standard. -// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html -var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 - -func limitToString(v uint64, unit string) string { - if v == unlimited { - return "unlimited" - } - return fmt.Sprintf("%d%s", v, unit) -} - -func getLimits(resource int, unit string) string { - rlimit := syscall.Rlimit{} - err := syscall.Getrlimit(resource, &rlimit) - if err != nil { - panic("syscall.Getrlimit failed: " + err.Error()) - } - // rlimit.Cur and rlimit.Max are int64 on some platforms, such as dragonfly. - // We need to cast them explicitly to uint64. - return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit)) //nolint:unconvert -} - -// FdLimits returns the soft and hard limits for file descriptors. -func FdLimits() string { - return getLimits(syscall.RLIMIT_NOFILE, "") -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/limits_windows.go b/vendor/github.com/prometheus/prometheus/util/runtime/limits_windows.go deleted file mode 100644 index 1cb7ea33a79..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/limits_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows - -package runtime - -// FdLimits not supported on Windows -func FdLimits() string { - return "N/A" -} - -// VMLimits not supported on Windows -func VMLimits() string { - return "N/A" -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/statfs.go b/vendor/github.com/prometheus/prometheus/util/runtime/statfs.go deleted file mode 100644 index b6edbd872b8..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/statfs.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build openbsd || netbsd || solaris - -package runtime - -// FsType returns the file system type or "unknown" if unsupported. -func FsType(path string) string { - return "unknown" -} - -// FsSize returns the file system size or 0 if unsupported. -func FsSize(path string) uint64 { - return 0 -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_default.go b/vendor/github.com/prometheus/prometheus/util/runtime/statfs_default.go deleted file mode 100644 index de65b780f0f..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_default.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !openbsd && !netbsd && !solaris && !386 - -package runtime - -import ( - "strconv" - "syscall" -) - -func FsType(path string) string { - // Types of file systems that may be returned by `statfs` - fsTypes := map[int64]string{ - 0xadf5: "ADFS_SUPER_MAGIC", - 0xADFF: "AFFS_SUPER_MAGIC", - 0x42465331: "BEFS_SUPER_MAGIC", - 0x1BADFACE: "BFS_MAGIC", - 0xFF534D42: "CIFS_MAGIC_NUMBER", - 0x73757245: "CODA_SUPER_MAGIC", - 0x012FF7B7: "COH_SUPER_MAGIC", - 0x28cd3d45: "CRAMFS_MAGIC", - 0x1373: "DEVFS_SUPER_MAGIC", - 0x00414A53: "EFS_SUPER_MAGIC", - 0x137D: "EXT_SUPER_MAGIC", - 0xEF51: "EXT2_OLD_SUPER_MAGIC", - 0xEF53: "EXT4_SUPER_MAGIC", - 0x4244: "HFS_SUPER_MAGIC", - 0xF995E849: "HPFS_SUPER_MAGIC", - 0x958458f6: "HUGETLBFS_MAGIC", - 0x9660: "ISOFS_SUPER_MAGIC", - 0x72b6: "JFFS2_SUPER_MAGIC", - 0x3153464a: "JFS_SUPER_MAGIC", - 0x137F: "MINIX_SUPER_MAGIC", - 0x138F: "MINIX_SUPER_MAGIC2", - 0x2468: "MINIX2_SUPER_MAGIC", - 0x2478: "MINIX2_SUPER_MAGIC2", - 0x4d44: "MSDOS_SUPER_MAGIC", - 0x564c: "NCP_SUPER_MAGIC", - 0x6969: "NFS_SUPER_MAGIC", - 0x5346544e: "NTFS_SB_MAGIC", - 0x9fa1: "OPENPROM_SUPER_MAGIC", - 0x9fa0: "PROC_SUPER_MAGIC", - 0x002f: "QNX4_SUPER_MAGIC", - 0x52654973: "REISERFS_SUPER_MAGIC", - 0x7275: "ROMFS_MAGIC", - 0x517B: "SMB_SUPER_MAGIC", - 0x012FF7B6: "SYSV2_SUPER_MAGIC", - 0x012FF7B5: "SYSV4_SUPER_MAGIC", - 0x01021994: "TMPFS_MAGIC", - 0x15013346: "UDF_SUPER_MAGIC", - 0x00011954: "UFS_MAGIC", - 0x9fa2: "USBDEVICE_SUPER_MAGIC", - 0xa501FCF5: "VXFS_SUPER_MAGIC", - 0x012FF7B4: "XENIX_SUPER_MAGIC", - 0x58465342: "XFS_SUPER_MAGIC", - 0x012FD16D: "_XIAFS_SUPER_MAGIC", - 0x794c7630: "OVERLAYFS_SUPER_MAGIC", - } - - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - // nolintlint might cry out depending on the architecture (e.g. ARM64), so ignore it. - //nolint:unconvert,nolintlint // This ensures Type format on all Platforms. - localType := int64(fs.Type) - if err != nil { - return strconv.FormatInt(localType, 16) - } - if fsType, ok := fsTypes[localType]; ok { - return fsType - } - return strconv.FormatInt(localType, 16) -} - -func FsSize(path string) uint64 { - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - if err != nil { - return 0 - } - return uint64(fs.Bsize) * fs.Blocks -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_linux_386.go b/vendor/github.com/prometheus/prometheus/util/runtime/statfs_linux_386.go deleted file mode 100644 index 82e586dc947..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_linux_386.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && 386 - -package runtime - -import ( - "strconv" - "syscall" -) - -// FsType returns the file system type (Unix only). -func FsType(path string) string { - // Types of file systems that may be returned by `statfs` - fsTypes := map[int32]string{ - 0xadf5: "ADFS_SUPER_MAGIC", - 0xADFF: "AFFS_SUPER_MAGIC", - 0x42465331: "BEFS_SUPER_MAGIC", - 0x1BADFACE: "BFS_MAGIC", - 0x73757245: "CODA_SUPER_MAGIC", - 0x012FF7B7: "COH_SUPER_MAGIC", - 0x28cd3d45: "CRAMFS_MAGIC", - 0x1373: "DEVFS_SUPER_MAGIC", - 0x00414A53: "EFS_SUPER_MAGIC", - 0x137D: "EXT_SUPER_MAGIC", - 0xEF51: "EXT2_OLD_SUPER_MAGIC", - 0xEF53: "EXT4_SUPER_MAGIC", - 0x4244: "HFS_SUPER_MAGIC", - 0x9660: "ISOFS_SUPER_MAGIC", - 0x72b6: "JFFS2_SUPER_MAGIC", - 0x3153464a: "JFS_SUPER_MAGIC", - 0x137F: "MINIX_SUPER_MAGIC", - 0x138F: "MINIX_SUPER_MAGIC2", - 0x2468: "MINIX2_SUPER_MAGIC", - 0x2478: "MINIX2_SUPER_MAGIC2", - 0x4d44: "MSDOS_SUPER_MAGIC", - 0x564c: "NCP_SUPER_MAGIC", - 0x6969: "NFS_SUPER_MAGIC", - 0x5346544e: "NTFS_SB_MAGIC", - 0x9fa1: "OPENPROM_SUPER_MAGIC", - 0x9fa0: "PROC_SUPER_MAGIC", - 0x002f: "QNX4_SUPER_MAGIC", - 0x52654973: "REISERFS_SUPER_MAGIC", - 0x7275: "ROMFS_MAGIC", - 0x517B: "SMB_SUPER_MAGIC", - 0x012FF7B6: "SYSV2_SUPER_MAGIC", - 0x012FF7B5: "SYSV4_SUPER_MAGIC", - 0x01021994: "TMPFS_MAGIC", - 0x15013346: "UDF_SUPER_MAGIC", - 0x00011954: "UFS_MAGIC", - 0x9fa2: "USBDEVICE_SUPER_MAGIC", - 0x012FF7B4: "XENIX_SUPER_MAGIC", - 0x58465342: "XFS_SUPER_MAGIC", - 0x012FD16D: "_XIAFS_SUPER_MAGIC", - 0x794c7630: "OVERLAYFS_SUPER_MAGIC", - } - - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - if err != nil { - return strconv.Itoa(int(fs.Type)) - } - if fsType, ok := fsTypes[fs.Type]; ok { - return fsType - } - return strconv.Itoa(int(fs.Type)) -} - -// FsSize returns the file system size (Unix only). -func FsSize(path string) uint64 { - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - if err != nil { - return 0 - } - return uint64(fs.Bsize) * fs.Blocks -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_uint32.go b/vendor/github.com/prometheus/prometheus/util/runtime/statfs_uint32.go deleted file mode 100644 index acffb412959..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_uint32.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (386 && darwin) || (386 && freebsd) - -package runtime - -import ( - "strconv" - "syscall" -) - -func FsType(path string) string { - // Types of file systems that may be returned by `statfs` - fsTypes := map[uint32]string{ - 0xadf5: "ADFS_SUPER_MAGIC", - 0xADFF: "AFFS_SUPER_MAGIC", - 0x42465331: "BEFS_SUPER_MAGIC", - 0x1BADFACE: "BFS_MAGIC", - 0x73757245: "CODA_SUPER_MAGIC", - 0x012FF7B7: "COH_SUPER_MAGIC", - 0x28cd3d45: "CRAMFS_MAGIC", - 0x1373: "DEVFS_SUPER_MAGIC", - 0x00414A53: "EFS_SUPER_MAGIC", - 0x137D: "EXT_SUPER_MAGIC", - 0xEF51: "EXT2_OLD_SUPER_MAGIC", - 0xEF53: "EXT4_SUPER_MAGIC", - 0x4244: "HFS_SUPER_MAGIC", - 0x9660: "ISOFS_SUPER_MAGIC", - 0x72b6: "JFFS2_SUPER_MAGIC", - 0x3153464a: "JFS_SUPER_MAGIC", - 0x137F: "MINIX_SUPER_MAGIC", - 0x138F: "MINIX_SUPER_MAGIC2", - 0x2468: "MINIX2_SUPER_MAGIC", - 0x2478: "MINIX2_SUPER_MAGIC2", - 0x4d44: "MSDOS_SUPER_MAGIC", - 0x564c: "NCP_SUPER_MAGIC", - 0x6969: "NFS_SUPER_MAGIC", - 0x5346544e: "NTFS_SB_MAGIC", - 0x9fa1: "OPENPROM_SUPER_MAGIC", - 0x9fa0: "PROC_SUPER_MAGIC", - 0x002f: "QNX4_SUPER_MAGIC", - 0x52654973: "REISERFS_SUPER_MAGIC", - 0x7275: "ROMFS_MAGIC", - 0x517B: "SMB_SUPER_MAGIC", - 0x012FF7B6: "SYSV2_SUPER_MAGIC", - 0x012FF7B5: "SYSV4_SUPER_MAGIC", - 0x01021994: "TMPFS_MAGIC", - 0x15013346: "UDF_SUPER_MAGIC", - 0x00011954: "UFS_MAGIC", - 0x9fa2: "USBDEVICE_SUPER_MAGIC", - 0x012FF7B4: "XENIX_SUPER_MAGIC", - 0x58465342: "XFS_SUPER_MAGIC", - 0x012FD16D: "_XIAFS_SUPER_MAGIC", - 0x794c7630: "OVERLAYFS_SUPER_MAGIC", - } - - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - if err != nil { - return strconv.Itoa(int(fs.Type)) - } - if fsType, ok := fsTypes[fs.Type]; ok { - return fsType - } - return strconv.Itoa(int(fs.Type)) -} - -func FsSize(path string) uint64 { - var fs syscall.Statfs_t - err := syscall.Statfs(path, &fs) - if err != nil { - return 0 - } - return uint64(fs.Bsize) * fs.Blocks -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_windows.go b/vendor/github.com/prometheus/prometheus/util/runtime/statfs_windows.go deleted file mode 100644 index 717d4c16f10..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/statfs_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows - -package runtime - -import ( - "os" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - dll = windows.MustLoadDLL("kernel32.dll") - getDiskFreeSpaceExW = dll.MustFindProc("GetDiskFreeSpaceExW") -) - -func FsType(path string) string { - return "unknown" -} - -func FsSize(path string) uint64 { - // Ensure the path exists. - if _, err := os.Stat(path); err != nil { - return 0 - } - - var avail int64 - var total int64 - var free int64 - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getdiskfreespaceexa - ret, _, _ := getDiskFreeSpaceExW.Call( - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), - uintptr(unsafe.Pointer(&avail)), - uintptr(unsafe.Pointer(&total)), - uintptr(unsafe.Pointer(&free))) - - if ret == 0 || uint64(free) > uint64(total) { - return 0 - } - - return uint64(total) -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/uname_default.go b/vendor/github.com/prometheus/prometheus/util/runtime/uname_default.go deleted file mode 100644 index 1bdc2e6696e..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/uname_default.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !linux - -package runtime - -import "runtime" - -// Uname for any platform other than linux. -func Uname() string { - return "(" + runtime.GOOS + ")" -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/uname_linux.go b/vendor/github.com/prometheus/prometheus/util/runtime/uname_linux.go deleted file mode 100644 index f2798cda4ba..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/uname_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "golang.org/x/sys/unix" - -// Uname returns the uname of the host machine. -func Uname() string { - buf := unix.Utsname{} - err := unix.Uname(&buf) - if err != nil { - panic("unix.Uname failed: " + err.Error()) - } - - str := "(" + unix.ByteSliceToString(buf.Sysname[:]) - str += " " + unix.ByteSliceToString(buf.Release[:]) - str += " " + unix.ByteSliceToString(buf.Version[:]) - str += " " + unix.ByteSliceToString(buf.Machine[:]) - str += " " + unix.ByteSliceToString(buf.Nodename[:]) - str += " " + unix.ByteSliceToString(buf.Domainname[:]) + ")" - return str -} diff --git a/vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_default.go b/vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_default.go deleted file mode 100644 index 0e3bc0ead50..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/runtime/vmlimits_default.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !openbsd - -package runtime - -import ( - "syscall" -) - -// VMLimits returns the soft and hard limits for virtual memory. -func VMLimits() string { - return getLimits(syscall.RLIMIT_AS, "b") -} diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/appender.go b/vendor/github.com/prometheus/prometheus/util/teststorage/appender.go index 6b1ba31f7d8..f357f2f4c22 100644 --- a/vendor/github.com/prometheus/prometheus/util/teststorage/appender.go +++ b/vendor/github.com/prometheus/prometheus/util/teststorage/appender.go @@ -538,6 +538,17 @@ func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m meta return computeOrCheckRef(ref, l) } +// UpdateResource is a no-op for the test appender. +func (a *appender) UpdateResource(ref storage.SeriesRef, l labels.Labels, _, _ map[string]string, _ []storage.EntityData, _ int64) (storage.SeriesRef, error) { + if err := a.checkErr(); err != nil { + return 0, err + } + if a.next != nil { + return a.next.UpdateResource(ref, l, nil, nil, nil, 0) + } + return computeOrCheckRef(ref, l) +} + type appenderV2 struct { baseAppender diff --git a/vendor/github.com/prometheus/prometheus/web/api/testhelpers/api.go b/vendor/github.com/prometheus/prometheus/web/api/testhelpers/api.go index 07d7003b5c9..ccdf954470a 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/testhelpers/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/testhelpers/api.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/notifications" ) @@ -70,6 +71,7 @@ type TSDBAdminStats interface { Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) WALReplayStatus() (tsdb.WALReplayStatus, error) BlockMetas() ([]tsdb.BlockMeta, error) + SeriesMetadata() (seriesmetadata.Reader, error) } // APIConfig holds configuration for creating a test API instance. diff --git a/vendor/github.com/prometheus/prometheus/web/api/testhelpers/mocks.go b/vendor/github.com/prometheus/prometheus/web/api/testhelpers/mocks.go index 6dac4719d32..2bcfaebd49f 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/testhelpers/mocks.go +++ b/vendor/github.com/prometheus/prometheus/web/api/testhelpers/mocks.go @@ -29,6 +29,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" ) @@ -487,6 +488,10 @@ func (*FakeTSDBAdminStats) BlockMetas() ([]tsdb.BlockMeta, error) { return []tsdb.BlockMeta{}, nil } +func (*FakeTSDBAdminStats) SeriesMetadata() (seriesmetadata.Reader, error) { + return seriesmetadata.NewMemSeriesMetadata(), nil +} + // NewEmptyQueryable returns a queryable with no series. func NewEmptyQueryable() storage.SampleAndChunkQueryable { return &FakeQueryable{series: []storage.Series{}} diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 6e61fd19c6a..d319d72937a 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -41,6 +41,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/route" + "github.com/prometheus/otlptranslator" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" @@ -55,6 +56,7 @@ import ( "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/features" "github.com/prometheus/prometheus/util/httputil" @@ -111,6 +113,12 @@ var ( // Return false to fall back to default status code. type OverrideErrorCode func(errorNum, error) (code int, override bool) +// maxMetadataResults is the safety cap on resource metadata API responses. +// Prevents unbounded memory allocation from large result sets. +const maxMetadataResults = 500_000 + +var errMaxResultsReached = errors.New("result set too large, truncated at safety cap") + var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"} type apiError struct { @@ -212,6 +220,7 @@ type TSDBAdminStats interface { Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) WALReplayStatus() (tsdb.WALReplayStatus, error) BlockMetas() ([]tsdb.BlockMeta, error) + SeriesMetadata() (seriesmetadata.Reader, error) } type QueryOpts interface { @@ -236,18 +245,19 @@ type API struct { ready func(http.HandlerFunc) http.HandlerFunc globalURLOptions GlobalURLOptions - db TSDBAdminStats - dbDir string - enableAdmin bool - logger *slog.Logger - CORSOrigin *regexp.Regexp - buildInfo *PrometheusVersion - runtimeInfo func() (RuntimeInfo, error) - gatherer prometheus.Gatherer - isAgent bool - statsRenderer StatsRenderer - notificationsGetter func() []notifications.Notification - notificationsSub func() (<-chan notifications.Notification, func(), bool) + db TSDBAdminStats + dbDir string + enableAdmin bool + enableNativeMetadata bool + logger *slog.Logger + CORSOrigin *regexp.Regexp + buildInfo *PrometheusVersion + runtimeInfo func() (RuntimeInfo, error) + gatherer prometheus.Gatherer + isAgent bool + statsRenderer StatsRenderer + notificationsGetter func() []notifications.Notification + notificationsSub func() (<-chan notifications.Notification, func(), bool) // Allows customizing the default mapping overrideErrorCode OverrideErrorCode @@ -301,6 +311,7 @@ func NewAPI( enableTypeAndUnitLabels bool, appendMetadata bool, overrideErrorCode OverrideErrorCode, + enableNativeMetadata bool, featureRegistry features.Collector, openAPIOptions OpenAPIOptions, promqlParser parser.Parser, @@ -314,28 +325,29 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - globalURLOptions: globalURLOptions, - db: db, - dbDir: dbDir, - enableAdmin: enableAdmin, - rulesRetriever: rr, - logger: logger, - CORSOrigin: corsOrigin, - runtimeInfo: runtimeInfo, - buildInfo: buildInfo, - gatherer: gatherer, - isAgent: isAgent, - statsRenderer: DefaultStatsRenderer, - notificationsGetter: notificationsGetter, - notificationsSub: notificationsSub, - overrideErrorCode: overrideErrorCode, - featureRegistry: featureRegistry, - openAPIBuilder: NewOpenAPIBuilder(openAPIOptions, logger), - parser: promqlParser, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + globalURLOptions: globalURLOptions, + db: db, + dbDir: dbDir, + enableAdmin: enableAdmin, + enableNativeMetadata: enableNativeMetadata, + rulesRetriever: rr, + logger: logger, + CORSOrigin: corsOrigin, + runtimeInfo: runtimeInfo, + buildInfo: buildInfo, + gatherer: gatherer, + isAgent: isAgent, + statsRenderer: DefaultStatsRenderer, + notificationsGetter: notificationsGetter, + notificationsSub: notificationsSub, + overrideErrorCode: overrideErrorCode, + featureRegistry: featureRegistry, + openAPIBuilder: NewOpenAPIBuilder(openAPIOptions, logger), + parser: promqlParser, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -452,6 +464,8 @@ func (api *API) Register(r *route.Router) { r.Get("/alertmanagers", wrapAgent(api.alertmanagers)) r.Get("/metadata", wrap(api.metricMetadata)) + r.Get("/resources", wrap(api.resourceAttributes)) + r.Get("/resources/series", wrap(api.resourceSeriesLookup)) r.Get("/status/config", wrap(api.serveConfig)) r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo)) @@ -1281,17 +1295,16 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { builder := labels.NewBuilder(labels.EmptyLabels()) metric := r.FormValue("metric") res := []metricMetadata{} + for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { if limit >= 0 && len(res) >= limit { break } targetLabels := t.Labels(builder) - // Filter targets that don't satisfy the label matchers. if matchTarget != "" && !matchLabels(targetLabels, matchers) { continue } - // If no metric is specified, get the full list for the target. if metric == "" { for _, md := range t.ListMetadata() { res = append(res, metricMetadata{ @@ -1304,7 +1317,6 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } continue } - // Get metadata for the specified metric. if md, ok := t.GetMetadata(metric); ok { res = append(res, metricMetadata{ Target: targetLabels, @@ -1468,6 +1480,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } metric := r.FormValue("metric") + + // Collect metadata from active scrape targets. for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { if metric == "" { @@ -1522,6 +1536,763 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { return apiFuncResult{res, nil, nil, nil} } +// ResourceAttributeData contains identifying and descriptive attribute maps. +type ResourceAttributeData struct { + Identifying map[string]string `json:"identifying"` + Descriptive map[string]string `json:"descriptive"` +} + +// EntityData represents a single entity with type and attributes. +type EntityData struct { + Type string `json:"type"` + Identifying map[string]string `json:"identifying"` + Descriptive map[string]string `json:"descriptive"` +} + +// ResourceAttributeVersion is a single version of resource attributes with its time range. +type ResourceAttributeVersion struct { + Attributes ResourceAttributeData `json:"resource_attributes"` + Entities []EntityData `json:"entities,omitempty"` + MinTimeMs int64 `json:"min_time_ms"` + MaxTimeMs int64 `json:"max_time_ms"` +} + +// ResourceAttributesResponse is the response format for the resource_attributes endpoint. +type ResourceAttributesResponse struct { + Labels labels.Labels `json:"labels"` + Versions []ResourceAttributeVersion `json:"versions"` +} + +// PaginatedResourceAttributes wraps resource attribute results with cursor-based pagination. +type PaginatedResourceAttributes struct { + Results []ResourceAttributesResponse `json:"results"` + NextToken string `json:"nextToken,omitempty"` +} + +// PaginatedSeriesMetadata wraps series metadata results with cursor-based pagination. +type PaginatedSeriesMetadata struct { + Results []SeriesMetadataResponse `json:"results"` + NextToken string `json:"nextToken,omitempty"` +} + +// ScopeAttributeVersion is a single version of scope metadata with its time range. +type ScopeAttributeVersion struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` + SchemaURL string `json:"schema_url,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` + MinTimeMs int64 `json:"min_time_ms"` + MaxTimeMs int64 `json:"max_time_ms"` +} + +// SeriesMetadataResponse is the response for the reverse lookup endpoint. +type SeriesMetadataResponse struct { + Labels labels.Labels `json:"labels"` + Versions []ResourceAttributeVersion `json:"versions,omitempty"` + ScopeVersions []ScopeAttributeVersion `json:"scope_versions,omitempty"` +} + +func (api *API) resourceAttributes(r *http.Request) (result apiFuncResult) { + if !api.enableNativeMetadata { + return apiFuncResult{nil, &apiError{errorExec, errors.New("native metadata is disabled; enable with --enable-feature=native-metadata")}, nil, nil} + } + if api.db == nil { + return apiFuncResult{nil, &apiError{errorInternal, errors.New("TSDB not available")}, nil, nil} + } + + if err := r.ParseForm(); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil} + } + + // Handle format=attributes for autocomplete use case + if r.FormValue("format") == "attributes" { + return api.resourceAttributePairs(r) + } + + start, err := parseTimeParam(r, "start", MinTime) + if err != nil { + return invalidParamError(err, "start") + } + end, err := parseTimeParam(r, "end", MaxTime) + if err != nil { + return invalidParamError(err, "end") + } + if end.Before(start) { + err := errors.New("end timestamp must not be before start timestamp") + return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + } + + startMs := timestamp.FromTime(start) + endMs := timestamp.FromTime(end) + + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } + + // Parse match[] parameters if provided + var matcherSets [][]*labels.Matcher + if len(r.Form["match[]"]) > 0 { + matcherSets, err = api.parseMatchersParam(r.Form["match[]"]) + if err != nil { + return invalidParamError(err, "match[]") + } + } + + mr, err := api.db.SeriesMetadata() + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to get series metadata: %w", err)}, nil, nil} + } + defer mr.Close() + + nextToken := r.FormValue("next_token") + + // If no matchers provided, return all resource attributes + if len(matcherSets) == 0 { + return api.resourceAttributesAll(mr, limit, startMs, endMs, nextToken) + } + + // Query series matching the selectors + ctx := r.Context() + q, err := api.Queryable.Querier(startMs, endMs) + if err != nil { + return apiFuncResult{nil, returnAPIError(err), nil, nil} + } + defer func() { + if result.finalizer == nil { + q.Close() + } + }() + closer := func() { + q.Close() + } + + hints := &storage.SelectHints{ + Start: startMs, + End: endMs, + Func: "series", + Limit: toHintLimit(limit), + } + + var set storage.SeriesSet + if len(matcherSets) > 1 { + var sets []storage.SeriesSet + for _, mset := range matcherSets { + s := q.Select(ctx, true, hints, mset...) + sets = append(sets, s) + } + set = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) + } else { + set = q.Select(ctx, false, hints, matcherSets[0]...) + } + + var results []ResourceAttributesResponse + warnings := set.Warnings() + + for set.Next() { + lset := set.At().Labels() + hash := labels.StableHash(lset) + + versioned, ok := mr.GetVersionedResource(hash) + if !ok || len(versioned.Versions) == 0 { + continue + } + + // Filter versions to only those overlapping with [start, end] + versions := filterVersions(versioned.Versions, startMs, endMs) + if len(versions) == 0 { + continue + } + + results = append(results, ResourceAttributesResponse{ + Labels: lset, + Versions: versions, + }) + } + + if err := set.Err(); err != nil { + return apiFuncResult{nil, returnAPIError(err), warnings, closer} + } + + // Sort for deterministic output. + slices.SortFunc(results, func(a, b ResourceAttributesResponse) int { + return labels.Compare(a.Labels, b.Labels) + }) + + // Apply cursor pagination. + results = applyResourceCursor(results, nextToken) + + var respNextToken string + if limit > 0 && len(results) > limit { + respNextToken = getResourceNextToken(results[limit-1].Labels) + results = results[:limit] + warnings.Add(errors.New("results truncated due to limit")) + } + + if results == nil { + results = []ResourceAttributesResponse{} + } + + return apiFuncResult{&PaginatedResourceAttributes{Results: results, NextToken: respNextToken}, nil, warnings, closer} +} + +// filterVersions returns versions that overlap with [startMs, endMs]. +// Uses the unified ResourceVersion that contains both attributes and entities. +func filterVersions(versions []*seriesmetadata.ResourceVersion, startMs, endMs int64) []ResourceAttributeVersion { + result := make([]ResourceAttributeVersion, 0, len(versions)) + for _, v := range versions { + // Version overlaps if: version.MinTime <= endMs AND version.MaxTime >= startMs + if v.MinTime <= endMs && v.MaxTime >= startMs { + rv := ResourceAttributeVersion{ + Attributes: ResourceAttributeData{ + Identifying: v.Identifying, + Descriptive: v.Descriptive, + }, + MinTimeMs: v.MinTime, + MaxTimeMs: v.MaxTime, + } + + // Extract entities from the unified ResourceVersion + for _, entity := range v.Entities { + rv.Entities = append(rv.Entities, EntityData{ + Type: entity.Type, + Identifying: entity.ID, + Descriptive: entity.Description, + }) + } + + result = append(result, rv) + } + } + return result +} + +// resourceAttributesAll returns all resource attributes without filtering by matchers. +func (*API) resourceAttributesAll(mr seriesmetadata.Reader, limit int, startMs, endMs int64, nextToken string) apiFuncResult { + var results []ResourceAttributesResponse + var warnings annotations.Annotations + + err := mr.IterVersionedResources(context.Background(), func(labelsHash uint64, resources *seriesmetadata.VersionedResource) error { + // Filter versions to only those overlapping with [start, end] + versions := filterVersions(resources.Versions, startMs, endMs) + if len(versions) == 0 { + return nil + } + + lset, ok := mr.LabelsForHash(labelsHash) + if !ok { + // With incremental head metadata and block Parquet labels, + // unresolved hashes indicate a race or deleted series — skip. + return nil + } + results = append(results, ResourceAttributesResponse{ + Labels: lset, + Versions: versions, + }) + if len(results) >= maxMetadataResults { + return errMaxResultsReached + } + return nil + }) + if err != nil && !errors.Is(err, errMaxResultsReached) { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to iterate resources: %w", err)}, nil, nil} + } + if errors.Is(err, errMaxResultsReached) { + warnings.Add(errMaxResultsReached) + } + + // Sort for deterministic output. + slices.SortFunc(results, func(a, b ResourceAttributesResponse) int { + return labels.Compare(a.Labels, b.Labels) + }) + + // Apply cursor pagination. + results = applyResourceCursor(results, nextToken) + + var respNextToken string + if limit > 0 && len(results) > limit { + respNextToken = getResourceNextToken(results[limit-1].Labels) + results = results[:limit] + warnings.Add(errors.New("results truncated due to limit")) + } + + if results == nil { + results = []ResourceAttributesResponse{} + } + + return apiFuncResult{&PaginatedResourceAttributes{Results: results, NextToken: respNextToken}, nil, warnings, nil} +} + +// resourceAttributePairs returns all unique resource attribute names and their values. +// Primarily intended for autocomplete in the UI. +// Query parameters: +// - translate: if "true", translates OTel attribute names to Prometheus label names +// - match[]: if provided, only returns attributes from resources associated with matching series +func (api *API) resourceAttributePairs(r *http.Request) apiFuncResult { + if api.db == nil { + return apiFuncResult{nil, &apiError{errorInternal, errors.New("TSDB not available")}, nil, nil} + } + + mr, err := api.db.SeriesMetadata() + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to get series metadata: %w", err)}, nil, nil} + } + defer mr.Close() + + // Check if translation is requested + translate := r.FormValue("translate") == "true" + + // Build label namer for translation if needed + var labelNamer *otlptranslator.LabelNamer + if translate { + cfg := api.config() + allowUTF8 := cfg.GlobalConfig.MetricNameValidationScheme == model.UTF8Validation + labelNamer = &otlptranslator.LabelNamer{ + UTF8Allowed: allowUTF8, + UnderscoreLabelSanitization: cfg.OTLPConfig.LabelNameUnderscoreSanitization, + PreserveMultipleUnderscores: cfg.OTLPConfig.LabelNamePreserveMultipleUnderscores, + } + } + + // If match[] parameter is provided, filter resources by matching series + var allowedHashes map[uint64]struct{} + if len(r.Form["match[]"]) > 0 { + matcherSets, err := api.parseMatchersParam(r.Form["match[]"]) + if err != nil { + return invalidParamError(err, "match[]") + } + + // Query for matching series to get their hashes + q, err := api.Queryable.Querier(timestamp.FromTime(MinTime), timestamp.FromTime(MaxTime)) + if err != nil { + return apiFuncResult{nil, returnAPIError(err), nil, nil} + } + defer q.Close() + + hints := &storage.SelectHints{ + Start: timestamp.FromTime(MinTime), + End: timestamp.FromTime(MaxTime), + Func: "series", + } + + var set storage.SeriesSet + if len(matcherSets) > 1 { + var sets []storage.SeriesSet + for _, mset := range matcherSets { + s := q.Select(r.Context(), true, hints, mset...) + sets = append(sets, s) + } + set = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) + } else { + set = q.Select(r.Context(), false, hints, matcherSets[0]...) + } + + // Collect the StableHash of each matching series + allowedHashes = make(map[uint64]struct{}) + for set.Next() { + hash := labels.StableHash(set.At().Labels()) + allowedHashes[hash] = struct{}{} + } + if err := set.Err(); err != nil { + return apiFuncResult{nil, returnAPIError(err), nil, nil} + } + } + + // Collect unique label names and their values + labelValues := make(map[string]map[string]struct{}) + + err = mr.IterVersionedResources(r.Context(), func(labelsHash uint64, resources *seriesmetadata.VersionedResource) error { + // If we have a filter, only process resources for matching series + if allowedHashes != nil { + if _, ok := allowedHashes[labelsHash]; !ok { + return nil // Skip this resource + } + } + + for _, version := range resources.Versions { + // Process identifying attributes + for name, value := range version.Identifying { + labelName := name + if translate && labelNamer != nil { + var err error + labelName, err = labelNamer.Build(name) + if err != nil { + continue // Skip attributes that can't be translated + } + } + if labelValues[labelName] == nil { + labelValues[labelName] = make(map[string]struct{}) + } + labelValues[labelName][value] = struct{}{} + } + + // Process descriptive attributes + for name, value := range version.Descriptive { + labelName := name + if translate && labelNamer != nil { + var err error + labelName, err = labelNamer.Build(name) + if err != nil { + continue // Skip attributes that can't be translated + } + } + if labelValues[labelName] == nil { + labelValues[labelName] = make(map[string]struct{}) + } + labelValues[labelName][value] = struct{}{} + } + } + return nil + }) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to iterate resources: %w", err)}, nil, nil} + } + + result := make(map[string][]string, len(labelValues)) + for name, values := range labelValues { + valueList := make([]string, 0, len(values)) + for v := range values { + valueList = append(valueList, v) + } + slices.Sort(valueList) + result[name] = valueList + } + + return apiFuncResult{result, nil, nil, nil} +} + +// parseAttrFilter parses "key:value" into key, value. +func parseAttrFilter(s string) (key, value string, err error) { + key, value, ok := strings.Cut(s, ":") + if !ok { + return "", "", fmt.Errorf("invalid attribute filter %q: expected key:value format", s) + } + if key == "" { + return "", "", fmt.Errorf("invalid attribute filter %q: key must not be empty", s) + } + return key, value, nil +} + +// matchesResourceVersion checks if a ResourceVersion matches the given attribute filters. +// All filters must match (AND semantics). Each filter matches against either identifying or descriptive attributes. +// intersectSorted returns elements present in both sorted uint64 slices. +func intersectSorted(a, b []uint64) []uint64 { + result := make([]uint64, 0, min(len(a), len(b))) + i, j := 0, 0 + for i < len(a) && j < len(b) { + switch { + case a[i] < b[j]: + i++ + case a[i] > b[j]: + j++ + default: + result = append(result, a[i]) + i++ + j++ + } + } + return result +} + +func matchesResourceVersion(rv *seriesmetadata.ResourceVersion, resourceAttrFilters map[string]string) bool { + for k, v := range resourceAttrFilters { + if rv.Identifying[k] == v { + continue + } + if rv.Descriptive[k] == v { + continue + } + return false + } + return true +} + +// filterScopeVersions returns scope versions that overlap with [startMs, endMs]. +func filterScopeVersions(versions []*seriesmetadata.ScopeVersion, startMs, endMs int64) []ScopeAttributeVersion { + result := make([]ScopeAttributeVersion, 0, len(versions)) + for _, v := range versions { + if v.MinTime <= endMs && v.MaxTime >= startMs { + result = append(result, ScopeAttributeVersion{ + Name: v.Name, + Version: v.Version, + SchemaURL: v.SchemaURL, + Attrs: v.Attrs, + MinTimeMs: v.MinTime, + MaxTimeMs: v.MaxTime, + }) + } + } + return result +} + +func (api *API) resourceSeriesLookup(r *http.Request) (result apiFuncResult) { + if !api.enableNativeMetadata { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("native metadata is disabled; enable with --enable-feature=native-metadata")}, nil, nil} + } + if api.db == nil { + return apiFuncResult{nil, &apiError{errorInternal, errors.New("TSDB not available")}, nil, nil} + } + + if err := r.ParseForm(); err != nil { + return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil} + } + + // Parse resource attribute filters + resourceAttrFilters := make(map[string]string) + for _, f := range r.Form["resource.attr"] { + k, v, err := parseAttrFilter(f) + if err != nil { + return invalidParamError(err, "resource.attr") + } + resourceAttrFilters[k] = v + } + + if len(resourceAttrFilters) == 0 { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("at least one resource.attr filter is required")}, nil, nil} + } + + start, err := parseTimeParam(r, "start", MinTime) + if err != nil { + return invalidParamError(err, "start") + } + end, err := parseTimeParam(r, "end", MaxTime) + if err != nil { + return invalidParamError(err, "end") + } + if end.Before(start) { + return apiFuncResult{nil, &apiError{errorBadData, errors.New("end timestamp must not be before start timestamp")}, nil, nil} + } + startMs := timestamp.FromTime(start) + endMs := timestamp.FromTime(end) + + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } + + nextToken := r.FormValue("next_token") + + // Parse optional match[] parameters + var matcherSets [][]*labels.Matcher + if len(r.Form["match[]"]) > 0 { + matcherSets, err = api.parseMatchersParam(r.Form["match[]"]) + if err != nil { + return invalidParamError(err, "match[]") + } + } + + mr, err := api.db.SeriesMetadata() + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to get series metadata: %w", err)}, nil, nil} + } + defer mr.Close() + + // Track matched series by labels hash. Each entry accumulates resource + scope versions. + type matchedEntry struct { + resourceVersions []ResourceAttributeVersion + scopeVersions []ScopeAttributeVersion + } + matched := make(map[uint64]*matchedEntry) + + // Filter resources by attribute matches. + // Try indexed path first: intersect candidate sets from the inverted index. + var candidates []uint64 + useIndex := true + for k, v := range resourceAttrFilters { + hashes := mr.LookupResourceAttr(k, v) + if hashes == nil { + // Index not built — fall back to full scan. + useIndex = false + break + } + if candidates == nil { + // Start with the first sorted set (zero-copy from index). + candidates = hashes + } else { + // Intersect: two-pointer intersection of sorted slices. + candidates = intersectSorted(candidates, hashes) + } + if len(candidates) == 0 { + break + } + } + + if useIndex { + // Indexed path: verify each candidate against time range and attribute filters. + for _, hash := range candidates { + if err := r.Context().Err(); err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("request cancelled: %w", err)}, nil, nil} + } + resources, ok := mr.GetVersionedResource(hash) + if !ok { + continue + } + var matchingVersions []*seriesmetadata.ResourceVersion + for _, rv := range resources.Versions { + if rv.MinTime > endMs || rv.MaxTime < startMs { + continue + } + if matchesResourceVersion(rv, resourceAttrFilters) { + matchingVersions = append(matchingVersions, rv) + } + } + if len(matchingVersions) > 0 { + matched[hash] = &matchedEntry{ + resourceVersions: filterVersions(matchingVersions, startMs, endMs), + } + if len(matched) >= maxMetadataResults { + break + } + } + } + } else { + // Fallback: full scan for readers without an index. + err = mr.IterVersionedResources(r.Context(), func(labelsHash uint64, resources *seriesmetadata.VersionedResource) error { + var matchingVersions []*seriesmetadata.ResourceVersion + for _, rv := range resources.Versions { + if rv.MinTime > endMs || rv.MaxTime < startMs { + continue + } + if matchesResourceVersion(rv, resourceAttrFilters) { + matchingVersions = append(matchingVersions, rv) + } + } + if len(matchingVersions) > 0 { + matched[labelsHash] = &matchedEntry{ + resourceVersions: filterVersions(matchingVersions, startMs, endMs), + } + if len(matched) >= maxMetadataResults { + return errMaxResultsReached + } + } + return nil + }) + if err != nil && !errors.Is(err, errMaxResultsReached) { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to iterate resources: %w", err)}, nil, nil} + } + } + + // Populate scope versions for matched series (for complete response) + for hash, entry := range matched { + if scopes, ok := mr.GetVersionedScope(hash); ok { + entry.scopeVersions = filterScopeVersions(scopes.Versions, startMs, endMs) + } + } + + if len(matched) == 0 { + return apiFuncResult{&PaginatedSeriesMetadata{Results: []SeriesMetadataResponse{}}, nil, nil, nil} + } + + // Resolve labelsHash -> labels.Labels, and if match[] is provided, intersect + // with label matchers at the same time (avoiding a second full series scan). + ctx := r.Context() + q, err := api.Queryable.Querier(startMs, endMs) + if err != nil { + return apiFuncResult{nil, returnAPIError(err), nil, nil} + } + defer func() { + if result.finalizer == nil { + q.Close() + } + }() + + hints := &storage.SelectHints{ + Start: startMs, + End: endMs, + Func: "series", + } + + hashToLabels := make(map[uint64]labels.Labels) + if len(matcherSets) > 0 { + // match[] provided: Select only matching series, save their labels, + // and intersect with metadata-matched hashes. + var set storage.SeriesSet + if len(matcherSets) > 1 { + var sets []storage.SeriesSet + for _, mset := range matcherSets { + s := q.Select(ctx, true, hints, mset...) + sets = append(sets, s) + } + set = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) + } else { + set = q.Select(ctx, false, hints, matcherSets[0]...) + } + + for set.Next() { + lset := set.At().Labels() + hash := labels.StableHash(lset) + if _, ok := matched[hash]; ok { + hashToLabels[hash] = lset + } + } + if err := set.Err(); err != nil { + return apiFuncResult{nil, returnAPIError(err), nil, nil} + } + + // Remove matched entries that didn't appear in the match[] results. + for hash := range matched { + if _, ok := hashToLabels[hash]; !ok { + delete(matched, hash) + } + } + } else { + // No match[]: resolve labels from metadata reader directly. + // With incremental head metadata and block Parquet labels, + // unresolved hashes indicate a race or deleted series — skip. + for hash := range matched { + lset, ok := mr.LabelsForHash(hash) + if !ok { + delete(matched, hash) + continue + } + hashToLabels[hash] = lset + } + } + + if len(matched) == 0 { + return apiFuncResult{&PaginatedSeriesMetadata{Results: []SeriesMetadataResponse{}}, nil, nil, nil} + } + + // Build response — collect all, then sort for determinism. + results := make([]SeriesMetadataResponse, 0, len(matched)) + for hash, entry := range matched { + lset, ok := hashToLabels[hash] + if !ok { + continue + } + results = append(results, SeriesMetadataResponse{ + Labels: lset, + Versions: entry.resourceVersions, + ScopeVersions: entry.scopeVersions, + }) + } + + // Sort for deterministic output. + slices.SortFunc(results, func(a, b SeriesMetadataResponse) int { + return labels.Compare(a.Labels, b.Labels) + }) + + // Apply cursor pagination. + results = applySeriesMetadataCursor(results, nextToken) + + var warnings annotations.Annotations + if len(matched) >= maxMetadataResults { + warnings.Add(errMaxResultsReached) + } + var respNextToken string + if limit > 0 && len(results) > limit { + respNextToken = getResourceNextToken(results[limit-1].Labels) + results = results[:limit] + warnings.Add(errors.New("results truncated due to limit")) + } + + if results == nil { + results = []SeriesMetadataResponse{} + } + + return apiFuncResult{&PaginatedSeriesMetadata{Results: results, NextToken: respNextToken}, nil, warnings, nil} +} + // RuleDiscovery has info for all rules. type RuleDiscovery struct { RuleGroups []*RuleGroup `json:"groups"` @@ -1786,6 +2557,47 @@ func getRuleGroupNextToken(file, group string) string { return hex.EncodeToString(h.Sum(nil)) } +// getResourceNextToken computes a cursor token for a sorted labels set. +func getResourceNextToken(lset labels.Labels) string { + h := sha1.New() + h.Write([]byte(lset.String())) + return hex.EncodeToString(h.Sum(nil)) +} + +// applyResourceCursor skips past results whose token matches nextToken. +// Results must be sorted. If nextToken is empty, returns results unchanged. +func applyResourceCursor(results []ResourceAttributesResponse, nextToken string) []ResourceAttributesResponse { + if nextToken == "" || len(results) == 0 { + return results + } + for i, r := range results { + if getResourceNextToken(r.Labels) == nextToken { + if i+1 < len(results) { + return results[i+1:] + } + return nil + } + } + return results +} + +// applySeriesMetadataCursor skips past results whose token matches nextToken. +// Results must be sorted. If nextToken is empty, returns results unchanged. +func applySeriesMetadataCursor(results []SeriesMetadataResponse, nextToken string) []SeriesMetadataResponse { + if nextToken == "" || len(results) == 0 { + return results + } + for i, r := range results { + if getResourceNextToken(r.Labels) == nextToken { + if i+1 < len(results) { + return results[i+1:] + } + return nil + } + } + return results +} + type prometheusConfig struct { YAML string `json:"yaml"` } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi.go b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi.go index 59fa8969efa..c063543426e 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi.go @@ -212,6 +212,7 @@ func (*OpenAPIBuilder) buildTags(version string) []*base.Tag { description string }{ {"query", "Query", "Query and evaluate PromQL expressions."}, + {"resources", "Resources", "Query OTel resource attributes associated with time series."}, {"metadata", "Metadata", "Retrieve metric metadata such as type and unit."}, {"labels", "Labels", "Query label names and values."}, {"series", "Series", "Query and manage time series."}, @@ -276,6 +277,10 @@ func (b *OpenAPIBuilder) getAllPathDefinitions() *orderedmap.Map[string, *v3.Pat // Series endpoints. paths.Set("/series", b.seriesPath()) + // Resources endpoints. + paths.Set("/resources", b.resourcesPath()) + paths.Set("/resources/series", b.resourcesSeriesPath()) + // Metadata endpoints. paths.Set("/metadata", b.metadataPath()) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_examples.go b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_examples.go index 50e155b1844..246f7fe776f 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_examples.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_examples.go @@ -534,6 +534,68 @@ func labelValuesResponseExamples() *orderedmap.Map[string, *base.Example] { return examples } +// resourcesResponseExamples returns examples for /resources response. +func resourcesResponseExamples() *orderedmap.Map[string, *base.Example] { + examples := orderedmap.New[string, *base.Example]() + + examples.Set("defaultFormat", &base.Example{ + Summary: "Full resource data with labels and versions", + Value: createYAMLNode(map[string]any{ + "status": "success", + "data": []map[string]any{ + { + "labels": map[string]string{ + "__name__": "http_requests_total", + "instance": "localhost:8080", + "job": "myservice", + }, + "versions": []map[string]any{ + { + "resource_attributes": map[string]any{ + "identifying": map[string]string{ + "service.name": "myservice", + "service.namespace": "production", + }, + "descriptive": map[string]string{ + "service.version": "1.0.0", + "host.name": "server-01", + }, + }, + "entities": []map[string]any{ + { + "type": "service", + "identifying": map[string]string{ + "service.name": "myservice", + }, + "descriptive": map[string]string{ + "service.version": "1.0.0", + }, + }, + }, + "min_time_ms": int64(1767357420000), + "max_time_ms": int64(1767361020000), + }, + }, + }, + }, + }), + }) + + examples.Set("attributesFormat", &base.Example{ + Summary: "Simplified attribute name to values map (format=attributes)", + Value: createYAMLNode(map[string]any{ + "status": "success", + "data": map[string][]string{ + "service.name": {"myservice", "otherservice"}, + "service.namespace": {"production", "staging"}, + "host.name": {"server-01", "server-02"}, + }, + }), + }) + + return examples +} + // metadataResponseExamples returns examples for /metadata response. func metadataResponseExamples() *orderedmap.Map[string, *base.Example] { examples := orderedmap.New[string, *base.Example]() diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_paths.go b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_paths.go index 2f5ab592f74..c688c23ada3 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_paths.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_paths.go @@ -21,8 +21,18 @@ import ( "github.com/pb33f/libopenapi/datamodel/high/base" v3 "github.com/pb33f/libopenapi/datamodel/high/v3" "github.com/pb33f/libopenapi/orderedmap" + yaml "go.yaml.in/yaml/v4" ) +// createEnumNodes creates YAML nodes for enum values. +func createEnumNodes(values []string) []*yaml.Node { + nodes := make([]*yaml.Node, len(values)) + for i, v := range values { + nodes[i] = &yaml.Node{Kind: yaml.ScalarNode, Value: v} + } + return nodes +} + // Path definition methods for API endpoints. func (*OpenAPIBuilder) queryPath() *v3.PathItem { @@ -234,6 +244,66 @@ func (*OpenAPIBuilder) seriesPath() *v3.PathItem { } } +func (*OpenAPIBuilder) resourcesPath() *v3.PathItem { + params := []*v3.Parameter{ + queryParamWithExample("format", "Response format. Default returns full resource data with labels and versions. Set to 'attributes' for a simplified map of attribute names to values (useful for autocomplete).", false, + base.CreateSchemaProxy(&base.Schema{ + Type: []string{"string"}, + Enum: createEnumNodes([]string{"attributes"}), + }), []example{{"default", nil}, {"attributes", "attributes"}}), + queryParamWithExample("translate", "When set to 'true', translates OTel attribute names to Prometheus label names. Only applicable when format=attributes.", false, + base.CreateSchemaProxy(&base.Schema{ + Type: []string{"string"}, + Enum: createEnumNodes([]string{"true", "false"}), + }), []example{{"example", "true"}}), + queryParamWithExample("match[]", "Series selector to filter resources by matching time series.", false, base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()}, + }), []example{{"example", []string{"{job=\"prometheus\"}"}}}), + queryParamWithExample("start", "Start timestamp to filter resource versions.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))), + queryParamWithExample("end", "End timestamp to filter resource versions.", false, timestampSchema(), timestampExamples(exampleTime)), + queryParamWithExample("limit", "Maximum number of resources to return.", false, integerSchema(), []example{{"example", 100}}), + queryParamWithExample("next_token", "Cursor token from a previous paginated response to fetch the next page.", false, stringSchema(), []example{{"example", "a1b2c3d4e5f6..."}}), + } + return &v3.PathItem{ + Get: &v3.Operation{ + OperationId: "resources", + Summary: "Get resource attributes", + Description: "Returns OTel resource attributes associated with time series. Supports two response formats based on the 'format' parameter.", + Tags: []string{"resources"}, + Parameters: params, + Responses: responsesWithErrorExamples("ResourcesOutputBody", resourcesResponseExamples(), errorResponseExamples(), "Resource attributes retrieved successfully.", "Error retrieving resource attributes."), + }, + } +} + +func (*OpenAPIBuilder) resourcesSeriesPath() *v3.PathItem { + params := []*v3.Parameter{ + queryParamWithExample("resource.attr", "Resource attribute filter in key:value format. Repeatable. All must match (AND).", false, base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()}, + }), []example{{"example", "service.name:payment-service"}}), + queryParamWithExample("match[]", "Series selector to pre-filter by label matchers.", false, base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()}, + }), []example{{"example", []string{`{__name__="http_requests_total"}`}}}), + queryParamWithExample("start", "Start timestamp to filter metadata versions.", false, timestampSchema(), timestampExamples(exampleTime.Add(-1*time.Hour))), + queryParamWithExample("end", "End timestamp to filter metadata versions.", false, timestampSchema(), timestampExamples(exampleTime)), + queryParamWithExample("limit", "Maximum number of series to return.", false, integerSchema(), []example{{"example", 100}}), + queryParamWithExample("next_token", "Cursor token from a previous paginated response to fetch the next page.", false, stringSchema(), []example{{"example", "a1b2c3d4e5f6..."}}), + } + return &v3.PathItem{ + Get: &v3.Operation{ + OperationId: "resources-series-lookup", + Summary: "Find series by OTel metadata criteria", + Description: "Reverse lookup: given attribute filters (resource, scope, entity), find all series that have matching OTel metadata. At least one metadata filter is required.", + Tags: []string{"resources"}, + Parameters: params, + Responses: responsesWithErrorExamples("ResourcesSeriesOutputBody", nil, errorResponseExamples(), "Matching series with metadata retrieved successfully.", "Error performing reverse lookup."), + }, + } +} + func (*OpenAPIBuilder) metadataPath() *v3.PathItem { params := []*v3.Parameter{ queryParamWithExample("limit", "The maximum number of metrics to return.", false, integerSchema(), []example{{"example", 100}}), diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_schemas.go b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_schemas.go index de39b43e37b..5a51afe39ab 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_schemas.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/openapi_schemas.go @@ -60,6 +60,15 @@ func (b *OpenAPIBuilder) buildComponents() *v3.Components { schemas.Set("SeriesPostInputBody", b.seriesPostInputBodySchema()) schemas.Set("SeriesDeleteOutputBody", b.simpleResponseBodySchema()) + // Resources schemas. + schemas.Set("ResourcesOutputBody", b.resourcesOutputBodySchema()) + schemas.Set("ResourcesSeriesOutputBody", b.resourcesSeriesOutputBodySchema()) + schemas.Set("ResourcesAttributesOutputBody", b.resourcesAttributesOutputBodySchema()) + schemas.Set("ResourceAttributesResponse", b.resourceAttributesResponseSchema()) + schemas.Set("ResourceAttributeVersion", b.resourceAttributeVersionSchema()) + schemas.Set("ResourceAttributeData", b.resourceAttributeDataSchema()) + schemas.Set("EntityData", b.entityDataSchema()) + // Metadata schemas. schemas.Set("Metadata", b.metadataSchema()) schemas.Set("MetadataOutputBody", b.metadataOutputBodySchema()) @@ -735,6 +744,191 @@ func (*OpenAPIBuilder) seriesPostInputBodySchema() *base.SchemaProxy { }) } +func (*OpenAPIBuilder) resourcesOutputBodySchema() *base.SchemaProxy { + // The data field is a paginated object with results array and optional nextToken cursor. + dataProps := orderedmap.New[string, *base.SchemaProxy]() + dataProps.Set("results", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/ResourceAttributesResponse")}, + Description: "Array of resource attributes for matching time series.", + })) + dataProps.Set("nextToken", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"string"}, + Description: "Cursor token for fetching the next page. Empty when there are no more results.", + })) + + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("status", statusSchema()) + props.Set("data", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Paginated resource attributes with cursor-based pagination.", + Required: []string{"results"}, + Properties: dataProps, + })) + props.Set("warnings", warningsSchema()) + props.Set("infos", infosSchema()) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Response body for resources endpoint (default format).", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"status", "data"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) resourcesSeriesOutputBodySchema() *base.SchemaProxy { + // The data field is a paginated object with results array and optional nextToken cursor. + dataProps := orderedmap.New[string, *base.SchemaProxy]() + dataProps.Set("results", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Description: "Array of series matching the metadata criteria, with their resource and scope versions.", + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "A series matching the metadata criteria with its resource and scope version history.", + })}, + })) + dataProps.Set("nextToken", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"string"}, + Description: "Cursor token for fetching the next page. Empty when there are no more results.", + })) + + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("status", statusSchema()) + props.Set("data", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Paginated series metadata with cursor-based pagination.", + Required: []string{"results"}, + Properties: dataProps, + })) + props.Set("warnings", warningsSchema()) + props.Set("infos", infosSchema()) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Response body for the reverse lookup endpoint.", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"status", "data"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) resourcesAttributesOutputBodySchema() *base.SchemaProxy { + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("status", statusSchema()) + props.Set("data", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{ + A: base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: stringSchema()}, + }), + }, + Description: "Map of attribute names to their unique values.", + })) + props.Set("warnings", warningsSchema()) + props.Set("infos", infosSchema()) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Response body for resources endpoint with format=attributes (for autocomplete).", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"status", "data"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) resourceAttributesResponseSchema() *base.SchemaProxy { + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("labels", schemaRef("#/components/schemas/Labels")) + props.Set("versions", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/ResourceAttributeVersion")}, + Description: "Array of resource attribute versions for this series.", + })) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Resource attributes for a single time series.", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"labels", "versions"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) resourceAttributeVersionSchema() *base.SchemaProxy { + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("resource_attributes", schemaRef("#/components/schemas/ResourceAttributeData")) + props.Set("entities", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"array"}, + Items: &base.DynamicValue[*base.SchemaProxy, bool]{A: schemaRef("#/components/schemas/EntityData")}, + Description: "Entities associated with this resource version.", + })) + props.Set("min_time_ms", integerSchemaWithDescription("Start timestamp of this version in milliseconds.")) + props.Set("max_time_ms", integerSchemaWithDescription("End timestamp of this version in milliseconds.")) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "A single version of resource attributes with time range.", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"resource_attributes", "min_time_ms", "max_time_ms"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) resourceAttributeDataSchema() *base.SchemaProxy { + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("identifying", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{ + A: stringSchema(), + }, + Description: "Identifying attributes (e.g., service.name, service.namespace, service.instance.id).", + })) + props.Set("descriptive", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{ + A: stringSchema(), + }, + Description: "Descriptive attributes providing additional context.", + })) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Resource attribute data with identifying and descriptive attributes.", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"identifying", "descriptive"}, + Properties: props, + }) +} + +func (*OpenAPIBuilder) entityDataSchema() *base.SchemaProxy { + props := orderedmap.New[string, *base.SchemaProxy]() + props.Set("type", stringSchemaWithDescription("Entity type (e.g., service, host).")) + props.Set("identifying", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{ + A: stringSchema(), + }, + Description: "Identifying attributes for this entity.", + })) + props.Set("descriptive", base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{ + A: stringSchema(), + }, + Description: "Descriptive attributes for this entity.", + })) + + return base.CreateSchemaProxy(&base.Schema{ + Type: []string{"object"}, + Description: "Entity data with type and attributes.", + AdditionalProperties: &base.DynamicValue[*base.SchemaProxy, bool]{N: 1, B: false}, + Required: []string{"type", "identifying", "descriptive"}, + Properties: props, + }) +} + func (*OpenAPIBuilder) metadataSchema() *base.SchemaProxy { props := orderedmap.New[string, *base.SchemaProxy]() props.Set("type", stringSchemaWithDescription("Metric type (counter, gauge, histogram, summary, or untyped).")) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/test_helpers.go b/vendor/github.com/prometheus/prometheus/web/api/v1/test_helpers.go index 873a80c238f..2f05c591459 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/test_helpers.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/test_helpers.go @@ -21,6 +21,7 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/tsdb/seriesmetadata" "github.com/prometheus/prometheus/web/api/testhelpers" ) @@ -102,6 +103,7 @@ func newTestAPI(t *testing.T, cfg testhelpers.APIConfig) *testhelpers.APIWrapper false, // enableTypeAndUnitLabels false, // appendMetadata nil, // overrideErrorCode + false, // enableNativeMetadata nil, // featureRegistry OpenAPIOptions{}, // openAPIOptions parser.NewParser(parser.Options{}), // promqlParser @@ -157,3 +159,7 @@ type tsdbAdminStatsAdapter struct { func adaptTSDBAdminStats(t testhelpers.TSDBAdminStats) TSDBAdminStats { return &tsdbAdminStatsAdapter{t} } + +func (*tsdbAdminStatsAdapter) SeriesMetadata() (seriesmetadata.Reader, error) { + return nil, nil +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/LICENSE b/vendor/go.opentelemetry.io/collector/pdata/xpdata/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity.go new file mode 100644 index 00000000000..c0ae9f26eab --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity // import "go.opentelemetry.io/collector/pdata/xpdata/entity" + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Entity is a helper struct that represents an entity in a more user-friendly way than the underlying +// EntityRef protobuf message. After adding an entity to a resource, the entity shares the resource's +// attributes map, so modifications to the entity's attributes are immediately reflected in the resource. +// To create an Entity, use the EntityMap's PutEmpty method. +type Entity struct { + ref EntityRef + attributes pcommon.Map +} + +func (e Entity) Type() string { + return e.ref.Type() +} + +func (e Entity) SchemaURL() string { + return e.ref.SchemaUrl() +} + +func (e Entity) SetSchemaURL(schemaURL string) { + e.ref.SetSchemaUrl(schemaURL) +} + +// IdentifyingAttributes returns an EntityAttributeMap for managing the entity's identifying attributes. +func (e Entity) IdentifyingAttributes() EntityAttributeMap { + return EntityAttributeMap{ + keys: e.ref.IdKeys(), + attributes: e.attributes, + } +} + +// DescriptiveAttributes returns an EntityAttributeMap for managing the entity's descriptive attributes. +func (e Entity) DescriptiveAttributes() EntityAttributeMap { + return EntityAttributeMap{ + keys: e.ref.DescriptionKeys(), + attributes: e.attributes, + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_attribute_map.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_attribute_map.go new file mode 100644 index 00000000000..62c642165d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_attribute_map.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity // import "go.opentelemetry.io/collector/pdata/xpdata/entity" + +import "go.opentelemetry.io/collector/pdata/pcommon" + +// EntityAttributeMap is a wrapper around pcommon.Map that restricts operations to only the keys +// that belong to a specific set of entity attributes (either ID or Description attributes). +type EntityAttributeMap struct { + keys pcommon.StringSlice + attributes pcommon.Map +} + +// Get returns the Value associated with the key and true. Returned +// Value is not a copy, it is a reference to the value stored in this map. It is +// allowed to modify the returned value using Value.Set* functions. +// +// If the key does not exist in the entity's key list or in the underlying map, +// returns an invalid instance and false. Calling any functions on the returned +// invalid instance will cause a panic. +func (m EntityAttributeMap) Get(key string) (pcommon.Value, bool) { + if !m.containsKey(key) { + return pcommon.Value{}, false + } + return m.attributes.Get(key) +} + +// CanPut returns true if it's safe to call Put methods on the given key. +// Returns true if: +// - The key is already owned by this entity (in the entity's key list), OR +// - The key doesn't exist in the shared attributes map (available to claim) +// +// Returns false if the key exists in the shared map but belongs to another entity. +// +// Use this method before calling Put* methods to avoid conflicts: +// +// if entity.IdentifyingAttributes().CanPut("service.name") { +// entity.IdentifyingAttributes().PutStr("service.name", "my-service") +// } +func (m EntityAttributeMap) CanPut(key string) bool { + if m.containsKey(key) { + return true + } + _, exists := m.attributes.Get(key) + return !exists +} + +// PutEmpty inserts or updates an empty value to the map under given key +// and returns the updated/inserted value. +// The key is also added to the entity's key list if not already present. +// +// WARNING: This method is destructive and will overwrite any existing value in the shared +// attributes map, even if it belongs to another entity. Use CanPut() to check safety first +// if you need to avoid conflicts with other entities. +func (m EntityAttributeMap) PutEmpty(k string) pcommon.Value { + if !m.containsKey(k) { + m.keys.Append(k) + } + return m.attributes.PutEmpty(k) +} + +// PutStr performs the Insert or Update action. The Value is +// inserted to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +// The key is also added to the entity's key list if not already present. +// +// WARNING: This method is destructive and will overwrite any existing value in the shared +// attributes map, even if it belongs to another entity. Use CanPut() to check safety first +// if you need to avoid conflicts with other entities. +func (m EntityAttributeMap) PutStr(k, v string) { + if !m.containsKey(k) { + m.keys.Append(k) + } + m.attributes.PutStr(k, v) +} + +// Remove removes the entry associated with the key and returns true if the key existed. +// The key is also removed from the entity's key list. +func (m EntityAttributeMap) Remove(key string) bool { + var keyFound bool + m.keys.RemoveIf(func(k string) bool { + if k == key { + keyFound = true + return true + } + return false + }) + if !keyFound { + return false + } + m.attributes.Remove(key) + return true +} + +func (m EntityAttributeMap) containsKey(key string) bool { + for _, k := range m.keys.All() { + if k == key { + return true + } + } + return false +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_map.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_map.go new file mode 100644 index 00000000000..b394bee98c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/entity_map.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity // import "go.opentelemetry.io/collector/pdata/xpdata/entity" + +import ( + "iter" + + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// EntityMap logically represents a map of Entity keyed by entity type. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewEntityMap function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityMap struct { + refs EntityRefSlice + attributes pcommon.Map +} + +// NewEntityMap creates a EntityMap with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewEntityMap() EntityMap { + return EntityMap{ + refs: NewEntityRefSlice(), + attributes: pcommon.NewMap(), + } +} + +// Len returns the number of elements in the map. +// +// Returns "0" for a newly instance created with "NewEntityMap()". +func (em EntityMap) Len() int { + return em.refs.Len() +} + +// EnsureCapacity is an operation that ensures the map has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the map capacity will be expanded to equal newCap. +func (em EntityMap) EnsureCapacity(newCap int) { + em.refs.EnsureCapacity(newCap) +} + +// Get returns the Entity associated with the entityType and true. The returned +// Entity is not a copy, it is a reference to the entity stored in this map. +// It is allowed to modify the returned entity. +// Such modification will be applied to the entity stored in this map. +// +// If the entityType does not exist, returns a zero-initialized Entity and false. +// Calling any functions on the returned invalid instance may cause a panic. +func (em EntityMap) Get(entityType string) (Entity, bool) { + if entityType == "" { + return Entity{}, false + } + for i := 0; i < em.Len(); i++ { + if em.refs.At(i).Type() == entityType { + return Entity{ + ref: em.refs.At(i), + attributes: em.attributes, + }, true + } + } + return Entity{}, false +} + +// All returns an iterator over entity type-Entity pairs in the EntityMap. +// +// for entityType, entity := range em.All() { +// ... // Do something with entity type and entity +// } +func (em EntityMap) All() iter.Seq2[string, Entity] { + return func(yield func(string, Entity) bool) { + for i := 0; i < em.Len(); i++ { + ref := em.refs.At(i) + entity := Entity{ + ref: ref, + attributes: em.attributes, + } + if !yield(ref.Type(), entity) { + return + } + } + } +} + +// Remove removes the entity associated with the entityType and returns true if the entity +// was present in the map, otherwise returns false. All attributes associated with the entity +// are also removed. +func (em EntityMap) Remove(entityType string) bool { + for i := 0; i < em.refs.Len(); i++ { + ref := em.refs.At(i) + if ref.Type() == entityType { + for _, k := range ref.IdKeys().All() { + em.attributes.Remove(k) + } + for _, k := range ref.DescriptionKeys().All() { + em.attributes.Remove(k) + } + em.refs.RemoveIf(func(er EntityRef) bool { + return er.Type() == entityType + }) + return true + } + } + return false +} + +// PutEmpty inserts or replaces an empty Entity with the specified type and returns it. +// If an entity with the given type already exists, it replaces it and removes all attributes associated with it. +func (em EntityMap) PutEmpty(entityType string) Entity { + em.Remove(entityType) + ref := em.refs.AppendEmpty() + ref.SetType(entityType) + return Entity{ + ref: ref, + attributes: em.attributes, + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityref.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityref.go new file mode 100644 index 00000000000..d3c1aa3d805 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityref.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package entity + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewEntityRef function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityRef internal.EntityRefWrapper + +func newEntityRef(orig *internal.EntityRef, state *internal.State) EntityRef { + return EntityRef(internal.NewEntityRefWrapper(orig, state)) +} + +// NewEntityRef creates a new empty EntityRef. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewEntityRef() EntityRef { + return newEntityRef(internal.NewEntityRef(), internal.NewState()) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms EntityRef) MoveTo(dest EntityRef) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + // If they point to the same data, they are the same, nothing to do. + if ms.getOrig() == dest.getOrig() { + return + } + internal.DeleteEntityRef(dest.getOrig(), false) + *dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig() +} + +// SchemaUrl returns the schemaurl associated with this EntityRef. +func (ms EntityRef) SchemaUrl() string { + return ms.getOrig().SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this EntityRef. +func (ms EntityRef) SetSchemaUrl(v string) { + ms.getState().AssertMutable() + ms.getOrig().SchemaUrl = v +} + +// Type returns the type associated with this EntityRef. +func (ms EntityRef) Type() string { + return ms.getOrig().Type +} + +// SetType replaces the type associated with this EntityRef. +func (ms EntityRef) SetType(v string) { + ms.getState().AssertMutable() + ms.getOrig().Type = v +} + +// IdKeys returns the IdKeys associated with this EntityRef. +func (ms EntityRef) IdKeys() pcommon.StringSlice { + return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.getOrig().IdKeys, ms.getState())) +} + +// DescriptionKeys returns the DescriptionKeys associated with this EntityRef. +func (ms EntityRef) DescriptionKeys() pcommon.StringSlice { + return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.getOrig().DescriptionKeys, ms.getState())) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms EntityRef) CopyTo(dest EntityRef) { + dest.getState().AssertMutable() + internal.CopyEntityRef(dest.getOrig(), ms.getOrig()) +} + +func (ms EntityRef) getOrig() *internal.EntityRef { + return internal.GetEntityRefOrig(internal.EntityRefWrapper(ms)) +} + +func (ms EntityRef) getState() *internal.State { + return internal.GetEntityRefState(internal.EntityRefWrapper(ms)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityrefslice.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityrefslice.go new file mode 100644 index 00000000000..6a48bfb42d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/generated_entityrefslice.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package entity + +import ( + "iter" + "sort" + + "go.opentelemetry.io/collector/pdata/internal" +) + +// EntityRefSlice logically represents a slice of EntityRef. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewEntityRefSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type EntityRefSlice internal.EntityRefSliceWrapper + +func newEntityRefSlice(orig *[]*internal.EntityRef, state *internal.State) EntityRefSlice { + return EntityRefSlice(internal.NewEntityRefSliceWrapper(orig, state)) +} + +// NewEntityRefSlice creates a EntityRefSliceWrapper with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewEntityRefSlice() EntityRefSlice { + orig := []*internal.EntityRef(nil) + return newEntityRefSlice(&orig, internal.NewState()) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewEntityRefSlice()". +func (es EntityRefSlice) Len() int { + return len(*es.getOrig()) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es EntityRefSlice) At(i int) EntityRef { + return newEntityRef((*es.getOrig())[i], es.getState()) +} + +// All returns an iterator over index-value pairs in the slice. +// +// for i, v := range es.All() { +// ... // Do something with index-value pair +// } +func (es EntityRefSlice) All() iter.Seq2[int, EntityRef] { + return func(yield func(int, EntityRef) bool) { + for i := 0; i < es.Len(); i++ { + if !yield(i, es.At(i)) { + return + } + } + } +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new EntityRefSlice can be initialized: +// +// es := NewEntityRefSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es EntityRefSlice) EnsureCapacity(newCap int) { + es.getState().AssertMutable() + oldCap := cap(*es.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]*internal.EntityRef, len(*es.getOrig()), newCap) + copy(newOrig, *es.getOrig()) + *es.getOrig() = newOrig +} + +// AppendEmpty will append to the end of the slice an empty EntityRef. +// It returns the newly added EntityRef. +func (es EntityRefSlice) AppendEmpty() EntityRef { + es.getState().AssertMutable() + *es.getOrig() = append(*es.getOrig(), internal.NewEntityRef()) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es EntityRefSlice) MoveAndAppendTo(dest EntityRefSlice) { + es.getState().AssertMutable() + dest.getState().AssertMutable() + // If they point to the same data, they are the same, nothing to do. + if es.getOrig() == dest.getOrig() { + return + } + if *dest.getOrig() == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.getOrig() = *es.getOrig() + } else { + *dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...) + } + *es.getOrig() = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es EntityRefSlice) RemoveIf(f func(EntityRef) bool) { + es.getState().AssertMutable() + newLen := 0 + for i := 0; i < len(*es.getOrig()); i++ { + if f(es.At(i)) { + internal.DeleteEntityRef((*es.getOrig())[i], true) + (*es.getOrig())[i] = nil + + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.getOrig())[newLen] = (*es.getOrig())[i] + // Cannot delete here since we just move the data(or pointer to data) to a different position in the slice. + (*es.getOrig())[i] = nil + newLen++ + } + *es.getOrig() = (*es.getOrig())[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es EntityRefSlice) CopyTo(dest EntityRefSlice) { + dest.getState().AssertMutable() + if es.getOrig() == dest.getOrig() { + return + } + *dest.getOrig() = internal.CopyEntityRefPtrSlice(*dest.getOrig(), *es.getOrig()) +} + +// Sort sorts the EntityRef elements within EntityRefSlice given the +// provided less function so that two instances of EntityRefSlice +// can be compared. +func (es EntityRefSlice) Sort(less func(a, b EntityRef) bool) { + es.getState().AssertMutable() + sort.SliceStable(*es.getOrig(), func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} + +func (ms EntityRefSlice) getOrig() *[]*internal.EntityRef { + return internal.GetEntityRefSliceOrig(internal.EntityRefSliceWrapper(ms)) +} + +func (ms EntityRefSlice) getState() *internal.State { + return internal.GetEntityRefSliceState(internal.EntityRefSliceWrapper(ms)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/resource_entities.go b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/resource_entities.go new file mode 100644 index 00000000000..b29cb6a4607 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/xpdata/entity/resource_entities.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity // import "go.opentelemetry.io/collector/pdata/xpdata/entity" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceEntityRefs returns the EntityRefs associated with this Resource. +// Once EntityRefs is stabilized in the proto definition, +// this function will be available in the pcommon package as part of a Resource method. +func ResourceEntityRefs(res pcommon.Resource) EntityRefSlice { + ir := internal.ResourceWrapper(res) + return newEntityRefSlice(&internal.GetResourceOrig(ir).EntityRefs, internal.GetResourceState(ir)) +} + +// ResourceEntities returns the Entities associated with this Resource. +// The returned EntityMap shares the resource's attributes map, so modifications +// to entity attributes are immediately reflected in the resource. +func ResourceEntities(res pcommon.Resource) EntityMap { + return EntityMap{ + refs: ResourceEntityRefs(res), + attributes: res.Attributes(), + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f0cc08efc9f..8962fe25145 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,6 +159,10 @@ github.com/alecthomas/kingpin/v2 # github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b ## explicit; go 1.15 github.com/alecthomas/units +# github.com/andybalholm/brotli v1.1.1 +## explicit; go 1.13 +github.com/andybalholm/brotli +github.com/andybalholm/brotli/matchfinder # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 github.com/armon/go-metrics @@ -1146,12 +1150,48 @@ github.com/opentracing-contrib/go-stdlib/nethttp github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/parquet-go/bitpack v1.0.0 +## explicit; go 1.24.0 +github.com/parquet-go/bitpack +github.com/parquet-go/bitpack/unsafecast +# github.com/parquet-go/jsonlite v1.0.0 +## explicit; go 1.23 +github.com/parquet-go/jsonlite +# github.com/parquet-go/parquet-go v0.26.3 +## explicit; go 1.24.9 +github.com/parquet-go/parquet-go +github.com/parquet-go/parquet-go/bloom +github.com/parquet-go/parquet-go/bloom/xxhash +github.com/parquet-go/parquet-go/compress +github.com/parquet-go/parquet-go/compress/brotli +github.com/parquet-go/parquet-go/compress/gzip +github.com/parquet-go/parquet-go/compress/lz4 +github.com/parquet-go/parquet-go/compress/snappy +github.com/parquet-go/parquet-go/compress/uncompressed +github.com/parquet-go/parquet-go/compress/zstd +github.com/parquet-go/parquet-go/deprecated +github.com/parquet-go/parquet-go/encoding +github.com/parquet-go/parquet-go/encoding/bitpacked +github.com/parquet-go/parquet-go/encoding/bytestreamsplit +github.com/parquet-go/parquet-go/encoding/delta +github.com/parquet-go/parquet-go/encoding/plain +github.com/parquet-go/parquet-go/encoding/rle +github.com/parquet-go/parquet-go/encoding/thrift +github.com/parquet-go/parquet-go/format +github.com/parquet-go/parquet-go/hashprobe +github.com/parquet-go/parquet-go/hashprobe/aeshash +github.com/parquet-go/parquet-go/hashprobe/wyhash +github.com/parquet-go/parquet-go/internal/bytealg +github.com/parquet-go/parquet-go/internal/debug +github.com/parquet-go/parquet-go/internal/memory +github.com/parquet-go/parquet-go/internal/unsafecast +github.com/parquet-go/parquet-go/sparse # github.com/pb33f/jsonpath v0.7.1 ## explicit; go 1.24 github.com/pb33f/jsonpath/pkg/jsonpath github.com/pb33f/jsonpath/pkg/jsonpath/config github.com/pb33f/jsonpath/pkg/jsonpath/token -# github.com/pb33f/libopenapi v0.33.4 +# github.com/pb33f/libopenapi v0.33.5 ## explicit; go 1.24.0 github.com/pb33f/libopenapi github.com/pb33f/libopenapi/datamodel @@ -1322,7 +1362,7 @@ github.com/prometheus/otlptranslator github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20260225105904-7c22e95a1b6f +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v1.8.2-0.20260303092349-c93a08fb3844 ## explicit; go 1.25.5 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1363,6 +1403,7 @@ github.com/prometheus/prometheus/tsdb/goversion github.com/prometheus/prometheus/tsdb/hashcache github.com/prometheus/prometheus/tsdb/index github.com/prometheus/prometheus/tsdb/record +github.com/prometheus/prometheus/tsdb/seriesmetadata github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wlog @@ -1381,7 +1422,6 @@ github.com/prometheus/prometheus/util/namevalidationutil github.com/prometheus/prometheus/util/notifications github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool -github.com/prometheus/prometheus/util/runtime github.com/prometheus/prometheus/util/stats github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/teststorage @@ -1630,6 +1670,9 @@ go.opentelemetry.io/collector/pdata/plog go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace +# go.opentelemetry.io/collector/pdata/xpdata v0.142.0 +## explicit; go 1.24.0 +go.opentelemetry.io/collector/pdata/xpdata/entity # go.opentelemetry.io/collector/pipeline v1.51.0 ## explicit; go 1.24.0 go.opentelemetry.io/collector/pipeline @@ -2310,7 +2353,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/yaml v1.6.0 ## explicit; go 1.22 sigs.k8s.io/yaml -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20260225105904-7c22e95a1b6f +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v1.8.2-0.20260303092349-c93a08fb3844 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20251126142931-6f9f62ab6f86 # go.yaml.in/yaml/v3 => github.com/grafana/go-yaml/v3 v3.0.0-20260130164322-e3c24e8f4c87 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20250905101755-5eb4f3acbf71