|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, |
| 13 | + * software distributed under the License is distributed on an |
| 14 | + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 15 | + * KIND, either express or implied. See the License for the |
| 16 | + * specific language governing permissions and limitations |
| 17 | + * under the License. |
| 18 | + */ |
| 19 | + |
| 20 | +#include "iceberg/parquet/parquet_writer.h" |
| 21 | + |
| 22 | +#include <memory> |
| 23 | + |
| 24 | +#include <arrow/c/bridge.h> |
| 25 | +#include <arrow/record_batch.h> |
| 26 | +#include <arrow/util/key_value_metadata.h> |
| 27 | +#include <parquet/arrow/schema.h> |
| 28 | +#include <parquet/arrow/writer.h> |
| 29 | +#include <parquet/file_writer.h> |
| 30 | +#include <parquet/properties.h> |
| 31 | + |
| 32 | +#include "iceberg/arrow/arrow_error_transform_internal.h" |
| 33 | +#include "iceberg/arrow/arrow_fs_file_io_internal.h" |
| 34 | +#include "iceberg/schema_internal.h" |
| 35 | +#include "iceberg/util/checked_cast.h" |
| 36 | +#include "iceberg/util/macros.h" |
| 37 | + |
| 38 | +namespace iceberg::parquet { |
| 39 | + |
| 40 | +namespace { |
| 41 | + |
| 42 | +Result<std::shared_ptr<::arrow::io::OutputStream>> OpenOutputStream( |
| 43 | + const WriterOptions& options) { |
| 44 | + auto io = internal::checked_pointer_cast<arrow::ArrowFileSystemFileIO>(options.io); |
| 45 | + ICEBERG_ARROW_ASSIGN_OR_RETURN(auto output, io->fs()->OpenOutputStream(options.path)); |
| 46 | + return output; |
| 47 | +} |
| 48 | + |
| 49 | +} // namespace |
| 50 | + |
| 51 | +class ParquetWriter::Impl { |
| 52 | + public: |
| 53 | + Status Open(const WriterOptions& options) { |
| 54 | + auto writer_properties = |
| 55 | + ::parquet::WriterProperties::Builder().memory_pool(pool_)->build(); |
| 56 | + auto arrow_writer_properties = ::parquet::default_arrow_writer_properties(); |
| 57 | + |
| 58 | + ArrowSchema c_schema; |
| 59 | + ICEBERG_RETURN_UNEXPECTED(ToArrowSchema(*options.schema, &c_schema)); |
| 60 | + ICEBERG_ARROW_ASSIGN_OR_RETURN(arrow_schema_, ::arrow::ImportSchema(&c_schema)); |
| 61 | + |
| 62 | + std::shared_ptr<::parquet::SchemaDescriptor> schema_descriptor; |
| 63 | + ICEBERG_ARROW_RETURN_NOT_OK( |
| 64 | + ::parquet::arrow::ToParquetSchema(arrow_schema_.get(), *writer_properties, |
| 65 | + *arrow_writer_properties, &schema_descriptor)); |
| 66 | + auto schema_node = std::static_pointer_cast<::parquet::schema::GroupNode>( |
| 67 | + schema_descriptor->schema_root()); |
| 68 | + |
| 69 | + ICEBERG_ASSIGN_OR_RAISE(output_stream_, OpenOutputStream(options)); |
| 70 | + auto file_writer = ::parquet::ParquetFileWriter::Open( |
| 71 | + output_stream_, std::move(schema_node), std::move(writer_properties)); |
| 72 | + ICEBERG_ARROW_RETURN_NOT_OK( |
| 73 | + ::parquet::arrow::FileWriter::Make(pool_, std::move(file_writer), arrow_schema_, |
| 74 | + std::move(arrow_writer_properties), &writer_)); |
| 75 | + |
| 76 | + return {}; |
| 77 | + } |
| 78 | + |
| 79 | + Status Write(ArrowArray array) { |
| 80 | + ICEBERG_ARROW_ASSIGN_OR_RETURN(auto batch, |
| 81 | + ::arrow::ImportRecordBatch(&array, arrow_schema_)); |
| 82 | + |
| 83 | + ICEBERG_ARROW_RETURN_NOT_OK(writer_->WriteRecordBatch(*batch)); |
| 84 | + |
| 85 | + return {}; |
| 86 | + } |
| 87 | + |
| 88 | + // Close the writer and release resources |
| 89 | + Status Close() { |
| 90 | + if (writer_ == nullptr) { |
| 91 | + return {}; // Already closed |
| 92 | + } |
| 93 | + |
| 94 | + ICEBERG_ARROW_RETURN_NOT_OK(writer_->Close()); |
| 95 | + auto& metadata = writer_->metadata(); |
| 96 | + split_offsets_.reserve(metadata->num_row_groups()); |
| 97 | + for (int i = 0; i < metadata->num_row_groups(); ++i) { |
| 98 | + split_offsets_.push_back(metadata->RowGroup(i)->file_offset()); |
| 99 | + } |
| 100 | + writer_.reset(); |
| 101 | + |
| 102 | + ICEBERG_ARROW_ASSIGN_OR_RETURN(total_bytes_, output_stream_->Tell()); |
| 103 | + ICEBERG_ARROW_RETURN_NOT_OK(output_stream_->Close()); |
| 104 | + return {}; |
| 105 | + } |
| 106 | + |
| 107 | + bool Closed() const { return writer_ == nullptr; } |
| 108 | + |
| 109 | + int64_t length() const { return total_bytes_; } |
| 110 | + |
| 111 | + std::vector<int64_t> split_offsets() const { return split_offsets_; } |
| 112 | + |
| 113 | + private: |
| 114 | + // TODO(gangwu): make memory pool configurable |
| 115 | + ::arrow::MemoryPool* pool_ = ::arrow::default_memory_pool(); |
| 116 | + // Schema to write from the Parquet file. |
| 117 | + std::shared_ptr<::arrow::Schema> arrow_schema_; |
| 118 | + // The output stream to write Parquet file. |
| 119 | + std::shared_ptr<::arrow::io::OutputStream> output_stream_; |
| 120 | + // Parquet file writer to write ArrowArray. |
| 121 | + std::unique_ptr<::parquet::arrow::FileWriter> writer_; |
| 122 | + // Total length of the written Parquet file. |
| 123 | + int64_t total_bytes_{0}; |
| 124 | + // Row group start offsets in the Parquet file. |
| 125 | + std::vector<int64_t> split_offsets_; |
| 126 | +}; |
| 127 | + |
| 128 | +ParquetWriter::~ParquetWriter() = default; |
| 129 | + |
| 130 | +Status ParquetWriter::Open(const WriterOptions& options) { |
| 131 | + impl_ = std::make_unique<Impl>(); |
| 132 | + return impl_->Open(options); |
| 133 | +} |
| 134 | + |
| 135 | +Status ParquetWriter::Write(ArrowArray array) { return impl_->Write(array); } |
| 136 | + |
| 137 | +Status ParquetWriter::Close() { return impl_->Close(); } |
| 138 | + |
| 139 | +std::optional<Metrics> ParquetWriter::metrics() { |
| 140 | + if (!impl_->Closed()) { |
| 141 | + return std::nullopt; |
| 142 | + } |
| 143 | + return {}; |
| 144 | +} |
| 145 | + |
| 146 | +std::optional<int64_t> ParquetWriter::length() { |
| 147 | + if (!impl_->Closed()) { |
| 148 | + return std::nullopt; |
| 149 | + } |
| 150 | + return impl_->length(); |
| 151 | +} |
| 152 | + |
| 153 | +std::vector<int64_t> ParquetWriter::split_offsets() { |
| 154 | + if (!impl_->Closed()) { |
| 155 | + return {}; |
| 156 | + } |
| 157 | + return impl_->split_offsets(); |
| 158 | +} |
| 159 | + |
| 160 | +void RegisterWriter() { |
| 161 | + static WriterFactoryRegistry parquet_writer_register( |
| 162 | + FileFormatType::kParquet, []() -> Result<std::unique_ptr<Writer>> { |
| 163 | + return std::make_unique<ParquetWriter>(); |
| 164 | + }); |
| 165 | +} |
| 166 | + |
| 167 | +} // namespace iceberg::parquet |
0 commit comments