|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, |
| 13 | + * software distributed under the License is distributed on an |
| 14 | + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 15 | + * KIND, either express or implied. See the License for the |
| 16 | + * specific language governing permissions and limitations |
| 17 | + * under the License. |
| 18 | + */ |
| 19 | + |
| 20 | +#include "iceberg/avro/avro_reader.h" |
| 21 | + |
| 22 | +#include <memory> |
| 23 | + |
| 24 | +#include <arrow/array/builder_base.h> |
| 25 | +#include <arrow/c/bridge.h> |
| 26 | +#include <arrow/filesystem/filesystem.h> |
| 27 | +#include <arrow/result.h> |
| 28 | +#include <arrow/type.h> |
| 29 | +#include <avro/DataFile.hh> |
| 30 | +#include <avro/GenericDatum.hh> |
| 31 | + |
| 32 | +#include "iceberg/arrow/arrow_fs_file_io.h" |
| 33 | +#include "iceberg/avro/avro_schema_util_internal.h" |
| 34 | +#include "iceberg/avro/avro_stream_internal.h" |
| 35 | +#include "iceberg/schema_internal.h" |
| 36 | +#include "iceberg/util/checked_cast.h" |
| 37 | +#include "iceberg/util/macros.h" |
| 38 | + |
| 39 | +namespace iceberg::avro { |
| 40 | + |
| 41 | +namespace { |
| 42 | + |
| 43 | +Result<std::unique_ptr<AvroInputStream>> CreateInputStream(const ReaderOptions& options, |
| 44 | + int64_t buffer_size) { |
| 45 | + ::arrow::fs::FileInfo file_info(options.path, ::arrow::fs::FileType::File); |
| 46 | + if (options.length) { |
| 47 | + file_info.set_size(options.length.value()); |
| 48 | + } |
| 49 | + |
| 50 | + auto io = internal::checked_pointer_cast<arrow::ArrowFileSystemFileIO>(options.io); |
| 51 | + auto result = io->fs()->OpenInputFile(file_info); |
| 52 | + if (!result.ok()) { |
| 53 | + return IOError("Failed to open file {} for {}", options.path, |
| 54 | + result.status().message()); |
| 55 | + } |
| 56 | + |
| 57 | + return std::make_unique<AvroInputStream>(result.MoveValueUnsafe(), buffer_size); |
| 58 | +} |
| 59 | + |
| 60 | +} // namespace |
| 61 | + |
| 62 | +// A stateful context to keep track of the reading progress. |
| 63 | +struct ReadContext { |
| 64 | + // The datum to reuse for reading the data. |
| 65 | + std::unique_ptr<::avro::GenericDatum> datum_; |
| 66 | + // The arrow schema to build the record batch. |
| 67 | + std::shared_ptr<::arrow::Schema> arrow_schema_; |
| 68 | + // The builder to build the record batch. |
| 69 | + std::shared_ptr<::arrow::ArrayBuilder> builder_; |
| 70 | +}; |
| 71 | + |
| 72 | +// TODO(gang.wu): there are a lot to do to make this reader work. |
| 73 | +// 1. read the datum from the avro file |
| 74 | +// 2. append the datum to the builder until it reaches the batch size |
| 75 | +// 3. convert the builder to the record batch |
| 76 | +// 4. check if the reader has reached the split end |
| 77 | +// 5. prune the reader schema based on the projection |
| 78 | +// 6. read key-value metadata from the avro file |
| 79 | +// 7. collect basic reader metrics |
| 80 | +class AvroBatchReader::Impl { |
| 81 | + public: |
| 82 | + Result<Data> Next(); |
| 83 | + |
| 84 | + Status Open(const ReaderOptions& options) { |
| 85 | + batch_size_ = options.batch_size; |
| 86 | + if (options.split) { |
| 87 | + split_end_ = options.split->offset + options.split->length; |
| 88 | + if (options.length) { |
| 89 | + split_end_ = |
| 90 | + std::min(split_end_.value(), static_cast<int64_t>(options.length.value())); |
| 91 | + } |
| 92 | + } |
| 93 | + read_schema_ = options.projection; |
| 94 | + |
| 95 | + // Open the input stream and adapt to the avro interface. |
| 96 | + // TODO(gangwu): make this configurable |
| 97 | + constexpr int64_t kDefaultBufferSize = 1024 * 1024; |
| 98 | + ICEBERG_ASSIGN_OR_RAISE(auto input_stream, |
| 99 | + CreateInputStream(options, kDefaultBufferSize)); |
| 100 | + |
| 101 | + // Create a base reader without setting reader schema to enable projection. |
| 102 | + auto base_reader = |
| 103 | + std::make_unique<::avro::DataFileReaderBase>(std::move(input_stream)); |
| 104 | + const ::avro::ValidSchema& file_schema = base_reader->dataSchema(); |
| 105 | + |
| 106 | + // Validate field ids in the file schema. |
| 107 | + HasIdVisitor has_id_visitor; |
| 108 | + ICEBERG_RETURN_UNEXPECTED(has_id_visitor.Visit(file_schema)); |
| 109 | + if (has_id_visitor.HasNoIds()) { |
| 110 | + // TODO(gangwu): support applying field-ids based on name mapping |
| 111 | + return NotImplemented("Avro file schema has no field IDs"); |
| 112 | + } |
| 113 | + if (!has_id_visitor.AllHaveIds()) { |
| 114 | + return InvalidSchema("Not all fields in the Avro file schema have field IDs"); |
| 115 | + } |
| 116 | + |
| 117 | + // Project the read schema on top of the file schema. |
| 118 | + // TODO(gangwu): support pruning source fields |
| 119 | + ICEBERG_ASSIGN_OR_RAISE(projection_, Project(*options.projection, file_schema.root(), |
| 120 | + /*prune_source=*/false)); |
| 121 | + base_reader->init(file_schema); |
| 122 | + reader_ = std::make_unique<::avro::DataFileReader<::avro::GenericDatum>>( |
| 123 | + std::move(base_reader)); |
| 124 | + return {}; |
| 125 | + } |
| 126 | + |
| 127 | + Status Close() { |
| 128 | + if (reader_ != nullptr) { |
| 129 | + reader_->close(); |
| 130 | + reader_.reset(); |
| 131 | + } |
| 132 | + return {}; |
| 133 | + } |
| 134 | + |
| 135 | + private: |
| 136 | + Status InitReadContext() { |
| 137 | + context_ = std::make_unique<ReadContext>(); |
| 138 | + context_->datum_ = std::make_unique<::avro::GenericDatum>(reader_->readerSchema()); |
| 139 | + |
| 140 | + ArrowSchema arrow_schema; |
| 141 | + ICEBERG_RETURN_UNEXPECTED(ToArrowSchema(*read_schema_, &arrow_schema)); |
| 142 | + auto import_result = ::arrow::ImportSchema(&arrow_schema); |
| 143 | + if (!import_result.ok()) { |
| 144 | + return InvalidSchema("Failed to import the arrow schema: {}", |
| 145 | + import_result.status().message()); |
| 146 | + } |
| 147 | + context_->arrow_schema_ = import_result.MoveValueUnsafe(); |
| 148 | + |
| 149 | + auto arrow_struct_type = |
| 150 | + std::make_shared<::arrow::StructType>(context_->arrow_schema_->fields()); |
| 151 | + auto builder_result = ::arrow::MakeBuilder(arrow_struct_type); |
| 152 | + if (!builder_result.ok()) { |
| 153 | + return InvalidSchema("Failed to make the arrow builder: {}", |
| 154 | + builder_result.status().message()); |
| 155 | + } |
| 156 | + context_->builder_ = builder_result.MoveValueUnsafe(); |
| 157 | + |
| 158 | + return {}; |
| 159 | + } |
| 160 | + |
| 161 | + private: |
| 162 | + // Max number of rows in the record batch to read. |
| 163 | + int64_t batch_size_{}; |
| 164 | + // The end of the split to read and used to terminate the reading. |
| 165 | + std::optional<int64_t> split_end_; |
| 166 | + // The schema to read. |
| 167 | + std::shared_ptr<Schema> read_schema_; |
| 168 | + // The projection result to apply to the read schema. |
| 169 | + SchemaProjection projection_; |
| 170 | + // The avro reader to read the data into a datum. |
| 171 | + std::unique_ptr<::avro::DataFileReader<::avro::GenericDatum>> reader_; |
| 172 | + // The context to keep track of the reading progress. |
| 173 | + std::unique_ptr<ReadContext> context_; |
| 174 | +}; |
| 175 | + |
| 176 | +Result<Reader::Data> AvroBatchReader::Next() { return impl_->Next(); } |
| 177 | + |
| 178 | +Status AvroBatchReader::Open(const ReaderOptions& options) { |
| 179 | + impl_ = std::make_unique<Impl>(); |
| 180 | + return impl_->Open(options); |
| 181 | +} |
| 182 | + |
| 183 | +Status AvroBatchReader::Close() { return impl_->Close(); } |
| 184 | + |
| 185 | +} // namespace iceberg::avro |
0 commit comments