From 49e2a6542a75c2e1708e2a76ee459f4849bb5ff9 Mon Sep 17 00:00:00 2001 From: Moreno Date: Mon, 11 Aug 2025 10:44:48 +0200 Subject: [PATCH 1/7] Improve package structure This package structure more swifty than previous one. With this structure testing and building is easier and it's no more needed scripts --- Arrow/Package.resolved | 24 --- Arrow/Package.swift | 64 ------ Arrow/README.md | 56 ----- ArrowFlight/Package.resolved | 195 ------------------ ArrowFlight/Package.swift | 61 ------ Package.swift | 26 +-- .../Arrow/ArrowArray.swift | 0 .../Arrow/ArrowArrayBuilder.swift | 0 .../Arrow/ArrowBuffer.swift | 0 .../Arrow/ArrowBufferBuilder.swift | 0 .../Arrow/ArrowCExporter.swift | 0 .../Arrow/ArrowCImporter.swift | 0 .../Sources => Sources}/Arrow/ArrowData.swift | 0 .../Arrow/ArrowDecoder.swift | 0 .../Arrow/ArrowEncoder.swift | 0 .../Arrow/ArrowReader.swift | 0 .../Arrow/ArrowReaderHelper.swift | 0 .../Arrow/ArrowSchema.swift | 0 .../Arrow/ArrowTable.swift | 0 .../Sources => Sources}/Arrow/ArrowType.swift | 0 .../Arrow/ArrowWriter.swift | 0 .../Arrow/ArrowWriterHelper.swift | 0 .../Arrow/BitUtility.swift | 0 .../Arrow/ChunkedArray.swift | 0 .../Arrow/File_generated.swift | 0 .../Arrow/MemoryAllocator.swift | 0 .../Arrow/Message_generated.swift | 0 .../Sources => Sources}/Arrow/ProtoUtil.swift | 0 .../Arrow/Schema_generated.swift | 0 .../Arrow/SparseTensor_generated.swift | 0 .../Arrow/Tensor_generated.swift | 0 .../Sources => Sources}/ArrowC/ArrowCData.c | 0 .../ArrowC/include/ArrowCData.h | 0 .../ArrowFlight/Flight.grpc.swift | 0 .../ArrowFlight/Flight.pb.swift | 0 .../ArrowFlight/FlightAction.swift | 0 .../ArrowFlight/FlightActionType.swift | 0 .../ArrowFlight/FlightClient.swift | 0 .../ArrowFlight/FlightCriteria.swift | 0 .../ArrowFlight/FlightData.swift | 0 .../ArrowFlight/FlightDescriptor.swift | 0 .../ArrowFlight/FlightEndpoint.swift | 0 .../ArrowFlight/FlightInfo.swift | 0 .../ArrowFlight/FlightLocation.swift | 0 .../ArrowFlight/FlightPutResult.swift | 0 .../ArrowFlight/FlightResult.swift | 0 .../ArrowFlight/FlightSchemaResult.swift | 0 .../ArrowFlight/FlightServer.swift | 0 .../ArrowFlight/FlightSql.pb.swift | 0 .../ArrowFlight/FlightTicket.swift | 0 .../ArrowFlight/RecordBatchStreamReader.swift | 0 .../ArrowFlight/RecordBatchStreamWriter.swift | 0 .../ArrowFlightTests/FlightTest.swift | 0 .../ArrowTests/ArrayBuilderTest.swift | 0 .../ArrowTests/ArrayTests.swift | 0 .../ArrowTests/CDataTests.swift | 0 .../ArrowTests/CodableTests.swift | 0 .../Tests => Tests}/ArrowTests/IPCTests.swift | 2 +- .../ArrowTests/RecordBatchTests.swift | 0 .../ArrowTests/TableTests.swift | 0 60 files changed, 12 insertions(+), 416 deletions(-) delete mode 100644 Arrow/Package.resolved delete mode 100644 Arrow/Package.swift delete mode 100644 Arrow/README.md delete mode 100644 ArrowFlight/Package.resolved delete mode 100644 ArrowFlight/Package.swift rename {Arrow/Sources => Sources}/Arrow/ArrowArray.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowArrayBuilder.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowBuffer.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowBufferBuilder.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowCExporter.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowCImporter.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowData.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowDecoder.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowEncoder.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowReader.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowReaderHelper.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowSchema.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowTable.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowType.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowWriter.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ArrowWriterHelper.swift (100%) rename {Arrow/Sources => Sources}/Arrow/BitUtility.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ChunkedArray.swift (100%) rename {Arrow/Sources => Sources}/Arrow/File_generated.swift (100%) rename {Arrow/Sources => Sources}/Arrow/MemoryAllocator.swift (100%) rename {Arrow/Sources => Sources}/Arrow/Message_generated.swift (100%) rename {Arrow/Sources => Sources}/Arrow/ProtoUtil.swift (100%) rename {Arrow/Sources => Sources}/Arrow/Schema_generated.swift (100%) rename {Arrow/Sources => Sources}/Arrow/SparseTensor_generated.swift (100%) rename {Arrow/Sources => Sources}/Arrow/Tensor_generated.swift (100%) rename {Arrow/Sources => Sources}/ArrowC/ArrowCData.c (100%) rename {Arrow/Sources => Sources}/ArrowC/include/ArrowCData.h (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/Flight.grpc.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/Flight.pb.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightAction.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightActionType.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightClient.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightCriteria.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightData.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightDescriptor.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightEndpoint.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightInfo.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightLocation.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightPutResult.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightResult.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightSchemaResult.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightServer.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightSql.pb.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/FlightTicket.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/RecordBatchStreamReader.swift (100%) rename {ArrowFlight/Sources => Sources}/ArrowFlight/RecordBatchStreamWriter.swift (100%) rename {ArrowFlight/Tests => Tests}/ArrowFlightTests/FlightTest.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/ArrayBuilderTest.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/ArrayTests.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/CDataTests.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/CodableTests.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/IPCTests.swift (99%) rename {Arrow/Tests => Tests}/ArrowTests/RecordBatchTests.swift (100%) rename {Arrow/Tests => Tests}/ArrowTests/TableTests.swift (100%) diff --git a/Arrow/Package.resolved b/Arrow/Package.resolved deleted file mode 100644 index 907d438..0000000 --- a/Arrow/Package.resolved +++ /dev/null @@ -1,24 +0,0 @@ -{ - "originHash" : "bf353d71e72c7f9a8c9662e9debaf3c9a0e4127ad01dca316cc273758f5c8391", - "pins" : [ - { - "identity" : "flatbuffers", - "kind" : "remoteSourceControl", - "location" : "https://github.com/google/flatbuffers.git", - "state" : { - "revision" : "1c514626e83c20fffa8557e75641848e1e15cd5e", - "version" : "25.2.10" - } - }, - { - "identity" : "swift-atomics", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-atomics.git", - "state" : { - "revision" : "b601256eab081c0f92f059e12818ac1d4f178ff7", - "version" : "1.3.0" - } - } - ], - "version" : 3 -} diff --git a/Arrow/Package.swift b/Arrow/Package.swift deleted file mode 100644 index 7f3dff9..0000000 --- a/Arrow/Package.swift +++ /dev/null @@ -1,64 +0,0 @@ -// swift-tools-version: 5.10 -// The swift-tools-version declares the minimum version of Swift required to build this package. - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import PackageDescription - -let package = Package( - name: "Arrow", - platforms: [ - .macOS(.v10_14) - ], - products: [ - .library( - name: "Arrow", - targets: ["Arrow"]) - ], - dependencies: [ - .package(url: "https://github.com/google/flatbuffers.git", from: "25.2.10"), - .package(url: "https://github.com/apple/swift-atomics.git", from: "1.3.0") - ], - targets: [ - .target( - name: "ArrowC", - path: "Sources/ArrowC", - swiftSettings: [ - // build: .unsafeFlags(["-warnings-as-errors"]) - ] - - ), - .target( - name: "Arrow", - dependencies: ["ArrowC", - .product(name: "FlatBuffers", package: "flatbuffers"), - .product(name: "Atomics", package: "swift-atomics") - ], - swiftSettings: [ - // build: .unsafeFlags(["-warnings-as-errors"]) - ] - ), - .testTarget( - name: "ArrowTests", - dependencies: ["Arrow", "ArrowC"], - swiftSettings: [ - // build: .unsafeFlags(["-warnings-as-errors"]) - ] - ) - ] -) diff --git a/Arrow/README.md b/Arrow/README.md deleted file mode 100644 index 3acded8..0000000 --- a/Arrow/README.md +++ /dev/null @@ -1,56 +0,0 @@ - - -# Apache Arrow Swift - -An implementation of Arrow targeting Swift. - -## Status - -## Memory Management - -- Allocations are 64-byte aligned and padded to 8-bytes. -- Allocations are automatically garbage collected - -## Arrays - -### Primitive Types - -- Int8, Int16, Int32, Int64 -- UInt8, UInt16, UInt32, UInt64 -- Float, Double -- String (utf-8) - -### Parametric Types - -- Date32 -- Date64 - -### Type Metadata - -- Data Types -- Fields -- Schema - -## Test data generation - -Test data files for the reader tests are generated by an executable built in go whose source is included in the data-generator directory. -```sh -$ go build -o swift-datagen -``` diff --git a/ArrowFlight/Package.resolved b/ArrowFlight/Package.resolved deleted file mode 100644 index 351df12..0000000 --- a/ArrowFlight/Package.resolved +++ /dev/null @@ -1,195 +0,0 @@ -{ - "originHash" : "8aa0aeba452ce44a74de9eec71fef142d262ac569314743fc16523e396ae4124", - "pins" : [ - { - "identity" : "flatbuffers", - "kind" : "remoteSourceControl", - "location" : "https://github.com/google/flatbuffers.git", - "state" : { - "branch" : "v25.2.10", - "revision" : "1c514626e83c20fffa8557e75641848e1e15cd5e" - } - }, - { - "identity" : "grpc-swift", - "kind" : "remoteSourceControl", - "location" : "https://github.com/grpc/grpc-swift.git", - "state" : { - "revision" : "a56a157218877ef3e9625f7e1f7b2cb7e46ead1b", - "version" : "1.26.1" - } - }, - { - "identity" : "swift-algorithms", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-algorithms.git", - "state" : { - "revision" : "87e50f483c54e6efd60e885f7f5aa946cee68023", - "version" : "1.2.1" - } - }, - { - "identity" : "swift-asn1", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-asn1.git", - "state" : { - "revision" : "a54383ada6cecde007d374f58f864e29370ba5c3", - "version" : "1.3.2" - } - }, - { - "identity" : "swift-async-algorithms", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-async-algorithms.git", - "state" : { - "revision" : "042e1c4d9d19748c9c228f8d4ebc97bb1e339b0b", - "version" : "1.0.4" - } - }, - { - "identity" : "swift-atomics", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-atomics.git", - "state" : { - "revision" : "b601256eab081c0f92f059e12818ac1d4f178ff7", - "version" : "1.3.0" - } - }, - { - "identity" : "swift-certificates", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-certificates.git", - "state" : { - "revision" : "999fd70c7803da89f3904d635a6815a2a7cd7585", - "version" : "1.10.0" - } - }, - { - "identity" : "swift-collections", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-collections.git", - "state" : { - "revision" : "c1805596154bb3a265fd91b8ac0c4433b4348fb0", - "version" : "1.2.0" - } - }, - { - "identity" : "swift-crypto", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-crypto.git", - "state" : { - "revision" : "e8d6eba1fef23ae5b359c46b03f7d94be2f41fed", - "version" : "3.12.3" - } - }, - { - "identity" : "swift-http-structured-headers", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-http-structured-headers.git", - "state" : { - "revision" : "db6eea3692638a65e2124990155cd220c2915903", - "version" : "1.3.0" - } - }, - { - "identity" : "swift-http-types", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-http-types.git", - "state" : { - "revision" : "a0a57e949a8903563aba4615869310c0ebf14c03", - "version" : "1.4.0" - } - }, - { - "identity" : "swift-log", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-log.git", - "state" : { - "revision" : "3d8596ed08bd13520157f0355e35caed215ffbfa", - "version" : "1.6.3" - } - }, - { - "identity" : "swift-nio", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-nio.git", - "state" : { - "revision" : "34d486b01cd891297ac615e40d5999536a1e138d", - "version" : "2.83.0" - } - }, - { - "identity" : "swift-nio-extras", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-nio-extras.git", - "state" : { - "revision" : "145db1962f4f33a4ea07a32e751d5217602eea29", - "version" : "1.28.0" - } - }, - { - "identity" : "swift-nio-http2", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-nio-http2.git", - "state" : { - "revision" : "4281466512f63d1bd530e33f4aa6993ee7864be0", - "version" : "1.36.0" - } - }, - { - "identity" : "swift-nio-ssl", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-nio-ssl.git", - "state" : { - "revision" : "4b38f35946d00d8f6176fe58f96d83aba64b36c7", - "version" : "2.31.0" - } - }, - { - "identity" : "swift-nio-transport-services", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-nio-transport-services.git", - "state" : { - "revision" : "cd1e89816d345d2523b11c55654570acd5cd4c56", - "version" : "1.24.0" - } - }, - { - "identity" : "swift-numerics", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-numerics.git", - "state" : { - "revision" : "e0ec0f5f3af6f3e4d5e7a19d2af26b481acb6ba8", - "version" : "1.0.3" - } - }, - { - "identity" : "swift-protobuf", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-protobuf.git", - "state" : { - "revision" : "102a647b573f60f73afdce5613a51d71349fe507", - "version" : "1.30.0" - } - }, - { - "identity" : "swift-service-lifecycle", - "kind" : "remoteSourceControl", - "location" : "https://github.com/swift-server/swift-service-lifecycle.git", - "state" : { - "revision" : "e7187309187695115033536e8fc9b2eb87fd956d", - "version" : "2.8.0" - } - }, - { - "identity" : "swift-system", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-system.git", - "state" : { - "revision" : "61e4ca4b81b9e09e2ec863b00c340eb13497dac6", - "version" : "1.5.0" - } - } - ], - "version" : 3 -} diff --git a/ArrowFlight/Package.swift b/ArrowFlight/Package.swift deleted file mode 100644 index 581ec45..0000000 --- a/ArrowFlight/Package.swift +++ /dev/null @@ -1,61 +0,0 @@ -// swift-tools-version: 5.10 -// The swift-tools-version declares the minimum version of Swift required to build this package. - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -import PackageDescription - -let package = Package( - name: "ArrowFlight", - platforms: [ - .macOS(.v10_15) - ], - products: [ - // Products define the executables and libraries a package produces, making them visible to other packages. - .library( - name: "ArrowFlight", - targets: ["ArrowFlight"]) - ], - dependencies: [ - .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.25.0"), - .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.29.0"), - .package(path: "../Arrow") - ], - targets: [ - // Targets are the basic building blocks of a package, defining a module or a test suite. - // Targets can depend on other targets in this package and products from dependencies. - .target( - name: "ArrowFlight", - dependencies: [ - .product(name: "Arrow", package: "Arrow"), - .product(name: "GRPC", package: "grpc-swift"), - .product(name: "SwiftProtobuf", package: "swift-protobuf") - ], - swiftSettings: [ - // build: .unsafeFlags(["-warnings-as-errors"]) - ] - ), - .testTarget( - name: "ArrowFlightTests", - dependencies: ["ArrowFlight"], - swiftSettings: [ - // build: .unsafeFlags(["-warnings-as-errors"]) - ] - ) - ] -) diff --git a/Package.swift b/Package.swift index b631c7f..7bc6217 100644 --- a/Package.swift +++ b/Package.swift @@ -23,23 +23,23 @@ import PackageDescription let package = Package( name: "Arrow", platforms: [ - .macOS(.v10_15) + .macOS(.v10_15), ], products: [ .library( name: "Arrow", - targets: ["Arrow"]) + targets: ["Arrow"] + ), ], dependencies: [ .package(url: "https://github.com/google/flatbuffers.git", from: "25.2.10"), .package(url: "https://github.com/grpc/grpc-swift.git", from: "1.25.0"), .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.29.0"), - .package(url: "https://github.com/apple/swift-atomics.git", from: "1.3.0") + .package(url: "https://github.com/apple/swift-atomics.git", from: "1.3.0"), ], targets: [ .target( name: "ArrowC", - path: "Arrow/Sources/ArrowC", swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] @@ -47,11 +47,11 @@ let package = Package( ), .target( name: "Arrow", - dependencies: ["ArrowC", - .product(name: "FlatBuffers", package: "flatbuffers"), - .product(name: "Atomics", package: "swift-atomics") + dependencies: [ + .target(name: "ArrowC"), + .product(name: "FlatBuffers", package: "flatbuffers"), + .product(name: "Atomics", package: "swift-atomics"), ], - path: "Arrow/Sources/Arrow", swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] @@ -59,11 +59,10 @@ let package = Package( .target( name: "ArrowFlight", dependencies: [ - "Arrow", + .target(name: "Arrow"), .product(name: "GRPC", package: "grpc-swift"), - .product(name: "SwiftProtobuf", package: "swift-protobuf") + .product(name: "SwiftProtobuf", package: "swift-protobuf"), ], - path: "ArrowFlight/Sources/ArrowFlight", swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] @@ -71,7 +70,6 @@ let package = Package( .testTarget( name: "ArrowTests", dependencies: ["Arrow", "ArrowC"], - path: "Arrow/Tests", swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] @@ -79,11 +77,9 @@ let package = Package( .testTarget( name: "ArrowFlightTests", dependencies: ["ArrowFlight"], - path: "ArrowFlight/Tests", swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] - ) - + ), ] ) diff --git a/Arrow/Sources/Arrow/ArrowArray.swift b/Sources/Arrow/ArrowArray.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowArray.swift rename to Sources/Arrow/ArrowArray.swift diff --git a/Arrow/Sources/Arrow/ArrowArrayBuilder.swift b/Sources/Arrow/ArrowArrayBuilder.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowArrayBuilder.swift rename to Sources/Arrow/ArrowArrayBuilder.swift diff --git a/Arrow/Sources/Arrow/ArrowBuffer.swift b/Sources/Arrow/ArrowBuffer.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowBuffer.swift rename to Sources/Arrow/ArrowBuffer.swift diff --git a/Arrow/Sources/Arrow/ArrowBufferBuilder.swift b/Sources/Arrow/ArrowBufferBuilder.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowBufferBuilder.swift rename to Sources/Arrow/ArrowBufferBuilder.swift diff --git a/Arrow/Sources/Arrow/ArrowCExporter.swift b/Sources/Arrow/ArrowCExporter.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowCExporter.swift rename to Sources/Arrow/ArrowCExporter.swift diff --git a/Arrow/Sources/Arrow/ArrowCImporter.swift b/Sources/Arrow/ArrowCImporter.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowCImporter.swift rename to Sources/Arrow/ArrowCImporter.swift diff --git a/Arrow/Sources/Arrow/ArrowData.swift b/Sources/Arrow/ArrowData.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowData.swift rename to Sources/Arrow/ArrowData.swift diff --git a/Arrow/Sources/Arrow/ArrowDecoder.swift b/Sources/Arrow/ArrowDecoder.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowDecoder.swift rename to Sources/Arrow/ArrowDecoder.swift diff --git a/Arrow/Sources/Arrow/ArrowEncoder.swift b/Sources/Arrow/ArrowEncoder.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowEncoder.swift rename to Sources/Arrow/ArrowEncoder.swift diff --git a/Arrow/Sources/Arrow/ArrowReader.swift b/Sources/Arrow/ArrowReader.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowReader.swift rename to Sources/Arrow/ArrowReader.swift diff --git a/Arrow/Sources/Arrow/ArrowReaderHelper.swift b/Sources/Arrow/ArrowReaderHelper.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowReaderHelper.swift rename to Sources/Arrow/ArrowReaderHelper.swift diff --git a/Arrow/Sources/Arrow/ArrowSchema.swift b/Sources/Arrow/ArrowSchema.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowSchema.swift rename to Sources/Arrow/ArrowSchema.swift diff --git a/Arrow/Sources/Arrow/ArrowTable.swift b/Sources/Arrow/ArrowTable.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowTable.swift rename to Sources/Arrow/ArrowTable.swift diff --git a/Arrow/Sources/Arrow/ArrowType.swift b/Sources/Arrow/ArrowType.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowType.swift rename to Sources/Arrow/ArrowType.swift diff --git a/Arrow/Sources/Arrow/ArrowWriter.swift b/Sources/Arrow/ArrowWriter.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowWriter.swift rename to Sources/Arrow/ArrowWriter.swift diff --git a/Arrow/Sources/Arrow/ArrowWriterHelper.swift b/Sources/Arrow/ArrowWriterHelper.swift similarity index 100% rename from Arrow/Sources/Arrow/ArrowWriterHelper.swift rename to Sources/Arrow/ArrowWriterHelper.swift diff --git a/Arrow/Sources/Arrow/BitUtility.swift b/Sources/Arrow/BitUtility.swift similarity index 100% rename from Arrow/Sources/Arrow/BitUtility.swift rename to Sources/Arrow/BitUtility.swift diff --git a/Arrow/Sources/Arrow/ChunkedArray.swift b/Sources/Arrow/ChunkedArray.swift similarity index 100% rename from Arrow/Sources/Arrow/ChunkedArray.swift rename to Sources/Arrow/ChunkedArray.swift diff --git a/Arrow/Sources/Arrow/File_generated.swift b/Sources/Arrow/File_generated.swift similarity index 100% rename from Arrow/Sources/Arrow/File_generated.swift rename to Sources/Arrow/File_generated.swift diff --git a/Arrow/Sources/Arrow/MemoryAllocator.swift b/Sources/Arrow/MemoryAllocator.swift similarity index 100% rename from Arrow/Sources/Arrow/MemoryAllocator.swift rename to Sources/Arrow/MemoryAllocator.swift diff --git a/Arrow/Sources/Arrow/Message_generated.swift b/Sources/Arrow/Message_generated.swift similarity index 100% rename from Arrow/Sources/Arrow/Message_generated.swift rename to Sources/Arrow/Message_generated.swift diff --git a/Arrow/Sources/Arrow/ProtoUtil.swift b/Sources/Arrow/ProtoUtil.swift similarity index 100% rename from Arrow/Sources/Arrow/ProtoUtil.swift rename to Sources/Arrow/ProtoUtil.swift diff --git a/Arrow/Sources/Arrow/Schema_generated.swift b/Sources/Arrow/Schema_generated.swift similarity index 100% rename from Arrow/Sources/Arrow/Schema_generated.swift rename to Sources/Arrow/Schema_generated.swift diff --git a/Arrow/Sources/Arrow/SparseTensor_generated.swift b/Sources/Arrow/SparseTensor_generated.swift similarity index 100% rename from Arrow/Sources/Arrow/SparseTensor_generated.swift rename to Sources/Arrow/SparseTensor_generated.swift diff --git a/Arrow/Sources/Arrow/Tensor_generated.swift b/Sources/Arrow/Tensor_generated.swift similarity index 100% rename from Arrow/Sources/Arrow/Tensor_generated.swift rename to Sources/Arrow/Tensor_generated.swift diff --git a/Arrow/Sources/ArrowC/ArrowCData.c b/Sources/ArrowC/ArrowCData.c similarity index 100% rename from Arrow/Sources/ArrowC/ArrowCData.c rename to Sources/ArrowC/ArrowCData.c diff --git a/Arrow/Sources/ArrowC/include/ArrowCData.h b/Sources/ArrowC/include/ArrowCData.h similarity index 100% rename from Arrow/Sources/ArrowC/include/ArrowCData.h rename to Sources/ArrowC/include/ArrowCData.h diff --git a/ArrowFlight/Sources/ArrowFlight/Flight.grpc.swift b/Sources/ArrowFlight/Flight.grpc.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/Flight.grpc.swift rename to Sources/ArrowFlight/Flight.grpc.swift diff --git a/ArrowFlight/Sources/ArrowFlight/Flight.pb.swift b/Sources/ArrowFlight/Flight.pb.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/Flight.pb.swift rename to Sources/ArrowFlight/Flight.pb.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightAction.swift b/Sources/ArrowFlight/FlightAction.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightAction.swift rename to Sources/ArrowFlight/FlightAction.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightActionType.swift b/Sources/ArrowFlight/FlightActionType.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightActionType.swift rename to Sources/ArrowFlight/FlightActionType.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightClient.swift b/Sources/ArrowFlight/FlightClient.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightClient.swift rename to Sources/ArrowFlight/FlightClient.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightCriteria.swift b/Sources/ArrowFlight/FlightCriteria.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightCriteria.swift rename to Sources/ArrowFlight/FlightCriteria.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightData.swift b/Sources/ArrowFlight/FlightData.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightData.swift rename to Sources/ArrowFlight/FlightData.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightDescriptor.swift b/Sources/ArrowFlight/FlightDescriptor.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightDescriptor.swift rename to Sources/ArrowFlight/FlightDescriptor.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightEndpoint.swift b/Sources/ArrowFlight/FlightEndpoint.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightEndpoint.swift rename to Sources/ArrowFlight/FlightEndpoint.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightInfo.swift b/Sources/ArrowFlight/FlightInfo.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightInfo.swift rename to Sources/ArrowFlight/FlightInfo.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightLocation.swift b/Sources/ArrowFlight/FlightLocation.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightLocation.swift rename to Sources/ArrowFlight/FlightLocation.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightPutResult.swift b/Sources/ArrowFlight/FlightPutResult.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightPutResult.swift rename to Sources/ArrowFlight/FlightPutResult.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightResult.swift b/Sources/ArrowFlight/FlightResult.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightResult.swift rename to Sources/ArrowFlight/FlightResult.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightSchemaResult.swift b/Sources/ArrowFlight/FlightSchemaResult.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightSchemaResult.swift rename to Sources/ArrowFlight/FlightSchemaResult.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightServer.swift b/Sources/ArrowFlight/FlightServer.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightServer.swift rename to Sources/ArrowFlight/FlightServer.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightSql.pb.swift b/Sources/ArrowFlight/FlightSql.pb.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightSql.pb.swift rename to Sources/ArrowFlight/FlightSql.pb.swift diff --git a/ArrowFlight/Sources/ArrowFlight/FlightTicket.swift b/Sources/ArrowFlight/FlightTicket.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/FlightTicket.swift rename to Sources/ArrowFlight/FlightTicket.swift diff --git a/ArrowFlight/Sources/ArrowFlight/RecordBatchStreamReader.swift b/Sources/ArrowFlight/RecordBatchStreamReader.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/RecordBatchStreamReader.swift rename to Sources/ArrowFlight/RecordBatchStreamReader.swift diff --git a/ArrowFlight/Sources/ArrowFlight/RecordBatchStreamWriter.swift b/Sources/ArrowFlight/RecordBatchStreamWriter.swift similarity index 100% rename from ArrowFlight/Sources/ArrowFlight/RecordBatchStreamWriter.swift rename to Sources/ArrowFlight/RecordBatchStreamWriter.swift diff --git a/ArrowFlight/Tests/ArrowFlightTests/FlightTest.swift b/Tests/ArrowFlightTests/FlightTest.swift similarity index 100% rename from ArrowFlight/Tests/ArrowFlightTests/FlightTest.swift rename to Tests/ArrowFlightTests/FlightTest.swift diff --git a/Arrow/Tests/ArrowTests/ArrayBuilderTest.swift b/Tests/ArrowTests/ArrayBuilderTest.swift similarity index 100% rename from Arrow/Tests/ArrowTests/ArrayBuilderTest.swift rename to Tests/ArrowTests/ArrayBuilderTest.swift diff --git a/Arrow/Tests/ArrowTests/ArrayTests.swift b/Tests/ArrowTests/ArrayTests.swift similarity index 100% rename from Arrow/Tests/ArrowTests/ArrayTests.swift rename to Tests/ArrowTests/ArrayTests.swift diff --git a/Arrow/Tests/ArrowTests/CDataTests.swift b/Tests/ArrowTests/CDataTests.swift similarity index 100% rename from Arrow/Tests/ArrowTests/CDataTests.swift rename to Tests/ArrowTests/CDataTests.swift diff --git a/Arrow/Tests/ArrowTests/CodableTests.swift b/Tests/ArrowTests/CodableTests.swift similarity index 100% rename from Arrow/Tests/ArrowTests/CodableTests.swift rename to Tests/ArrowTests/CodableTests.swift diff --git a/Arrow/Tests/ArrowTests/IPCTests.swift b/Tests/ArrowTests/IPCTests.swift similarity index 99% rename from Arrow/Tests/ArrowTests/IPCTests.swift rename to Tests/ArrowTests/IPCTests.swift index 26f38ce..efd6b99 100644 --- a/Arrow/Tests/ArrowTests/IPCTests.swift +++ b/Tests/ArrowTests/IPCTests.swift @@ -329,7 +329,7 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body } func testFileReader_struct() throws { - let fileURL = currentDirectory().appendingPathComponent("../../testdata_struct.arrow") + let fileURL = currentDirectory().appendingPathComponent("../testdata_struct.arrow") let arrowReader = ArrowReader() try checkStructRecordBatch(arrowReader.fromFile(fileURL)) } diff --git a/Arrow/Tests/ArrowTests/RecordBatchTests.swift b/Tests/ArrowTests/RecordBatchTests.swift similarity index 100% rename from Arrow/Tests/ArrowTests/RecordBatchTests.swift rename to Tests/ArrowTests/RecordBatchTests.swift diff --git a/Arrow/Tests/ArrowTests/TableTests.swift b/Tests/ArrowTests/TableTests.swift similarity index 100% rename from Arrow/Tests/ArrowTests/TableTests.swift rename to Tests/ArrowTests/TableTests.swift From 62145cd096b4978dac18bc2f1ebef4d9bc4a9a4c Mon Sep 17 00:00:00 2001 From: Moreno Date: Mon, 11 Aug 2025 11:10:28 +0200 Subject: [PATCH 2/7] migrated CDataWGo into new package Migrated as executable target. This makes the target executable from the command line using swift run go-swift. The idea is to make a pre-build plugin and use it before test launch --- CDataWGo/Package.swift | 43 ------------------- Package.swift | 6 +++ .../go-swift/CDataTest.swift | 0 {CDataWGo => Sources/go-swift}/go.mod | 0 {CDataWGo => Sources/go-swift}/go.sum | 0 .../go-swift}/include/go_swift.h | 4 +- {CDataWGo => Sources/go-swift}/main.go | 0 7 files changed, 8 insertions(+), 45 deletions(-) delete mode 100644 CDataWGo/Package.swift rename {CDataWGo/Sources => Sources}/go-swift/CDataTest.swift (100%) rename {CDataWGo => Sources/go-swift}/go.mod (100%) rename {CDataWGo => Sources/go-swift}/go.sum (100%) rename {CDataWGo => Sources/go-swift}/include/go_swift.h (90%) rename {CDataWGo => Sources/go-swift}/main.go (100%) diff --git a/CDataWGo/Package.swift b/CDataWGo/Package.swift deleted file mode 100644 index 4d51711..0000000 --- a/CDataWGo/Package.swift +++ /dev/null @@ -1,43 +0,0 @@ -// swift-tools-version: 5.10 -// The swift-tools-version declares the minimum version of Swift required to build this package. - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import PackageDescription - -let package = Package( - name: "go-swift", - platforms: [ - .macOS(.v10_14) - ], - products: [ - .library( - name: "go-swift", - type: .static, - targets: ["go-swift"]) - ], - dependencies: [ - .package(path: "../Arrow") // 👈 Reference to a Local Package - ], - targets: [ - .target( - name: "go-swift", - dependencies: [ - .product(name: "Arrow", package: "Arrow") - ]) - ] -) diff --git a/Package.swift b/Package.swift index 7bc6217..13dd81a 100644 --- a/Package.swift +++ b/Package.swift @@ -67,6 +67,12 @@ let package = Package( // build: .unsafeFlags(["-warnings-as-errors"]) ] ), + .executableTarget( + name: "go-swift", + dependencies: [ + .target(name: "Arrow"), + ] + ), .testTarget( name: "ArrowTests", dependencies: ["Arrow", "ArrowC"], diff --git a/CDataWGo/Sources/go-swift/CDataTest.swift b/Sources/go-swift/CDataTest.swift similarity index 100% rename from CDataWGo/Sources/go-swift/CDataTest.swift rename to Sources/go-swift/CDataTest.swift diff --git a/CDataWGo/go.mod b/Sources/go-swift/go.mod similarity index 100% rename from CDataWGo/go.mod rename to Sources/go-swift/go.mod diff --git a/CDataWGo/go.sum b/Sources/go-swift/go.sum similarity index 100% rename from CDataWGo/go.sum rename to Sources/go-swift/go.sum diff --git a/CDataWGo/include/go_swift.h b/Sources/go-swift/include/go_swift.h similarity index 90% rename from CDataWGo/include/go_swift.h rename to Sources/go-swift/include/go_swift.h index a7b79ae..acf3294 100644 --- a/CDataWGo/include/go_swift.h +++ b/Sources/go-swift/include/go_swift.h @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -#include "../../Arrow/Sources/ArrowC/include/ArrowCData.h" +#include "../ArrowC/include/ArrowCData.h" void stringTypeFromSwift(struct ArrowSchema*); @@ -27,4 +27,4 @@ void arrayStringFromSwift(struct ArrowArray*); void arrayIntToSwift(struct ArrowArray* array); -void arrayStringToSwift(struct ArrowArray* array); \ No newline at end of file +void arrayStringToSwift(struct ArrowArray* array); diff --git a/CDataWGo/main.go b/Sources/go-swift/main.go similarity index 100% rename from CDataWGo/main.go rename to Sources/go-swift/main.go From 3af683e1283a512b1c7bc2637e77a299984763b5 Mon Sep 17 00:00:00 2001 From: Moreno Date: Thu, 21 Aug 2025 17:19:55 +0200 Subject: [PATCH 3/7] fix swift lint file --- .swiftlint.yml | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/.swiftlint.yml b/.swiftlint.yml index 7e4da29..5c55bda 100644 --- a/.swiftlint.yml +++ b/.swiftlint.yml @@ -16,23 +16,23 @@ # under the License. included: - - Arrow/Package.swift - - Arrow/Sources - - Arrow/Tests - - ArrowFlight/Package.swift - - ArrowFlight/Sources - - ArrowFlight/Tests - - CDataWGo/Package.swift - - CDataWGo/Sources/go-swift + # - Arrow/Package.swift + # - Arrow/Sources + # - Arrow/Tests + # - ArrowFlight/Package.swift + # - ArrowFlight/Sources + # - ArrowFlight/Tests + # - CDataWGo/Package.swift + # - CDataWGo/Sources/go-swift excluded: - - Arrow/Sources/Arrow/File_generated.swift - - Arrow/Sources/Arrow/Message_generated.swift - - Arrow/Sources/Arrow/Schema_generated.swift - - Arrow/Sources/Arrow/SparseTensor_generated.swift - - Arrow/Sources/Arrow/Tensor_generated.swift - - ArrowFlight/Sources/ArrowFlight/Flight.grpc.swift - - ArrowFlight/Sources/ArrowFlight/Flight.pb.swift - - ArrowFlight/Sources/ArrowFlight/FlightSql.pb.swift + - Arrow/File_generated.swift + - Arrow/Message_generated.swift + - Arrow/Schema_generated.swift + - Arrow/SparseTensor_generated.swift + - Arrow/Tensor_generated.swift + - ArrowFlight/Flight.grpc.swift + - ArrowFlight/Flight.pb.swift + - ArrowFlight/FlightSql.pb.swift identifier_name: min_length: 2 # only warning allow_zero_lintable_files: false From 61aa82f12aac46abeffcb99de2bf1259b7ad5a79 Mon Sep 17 00:00:00 2001 From: Moreno Date: Sat, 30 Aug 2025 10:55:20 +0200 Subject: [PATCH 4/7] improve test - CI folder fix attempt --- Package.swift | 2 +- Tests/ArrowTests/IPCTests.swift | 12 ++++++------ ci/scripts/build.sh | 2 +- dev/release/release.sh | 2 +- dev/release/release_rc.sh | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Package.swift b/Package.swift index 13dd81a..6408809 100644 --- a/Package.swift +++ b/Package.swift @@ -67,7 +67,7 @@ let package = Package( // build: .unsafeFlags(["-warnings-as-errors"]) ] ), - .executableTarget( + .library( name: "go-swift", dependencies: [ .target(name: "Arrow"), diff --git a/Tests/ArrowTests/IPCTests.swift b/Tests/ArrowTests/IPCTests.swift index efd6b99..c7aab04 100644 --- a/Tests/ArrowTests/IPCTests.swift +++ b/Tests/ArrowTests/IPCTests.swift @@ -264,7 +264,7 @@ final class IPCStreamReaderTests: XCTestCase { final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body_length func testFileReader_double() throws { - let fileURL = currentDirectory().appendingPathComponent("../../testdata_double.arrow") + let fileURL = currentDirectory().appendingPathComponent("../testdata_double.arrow") let arrowReader = ArrowReader() let result = arrowReader.fromFile(fileURL) let recordBatches: [RecordBatch] @@ -298,14 +298,14 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body } func testFileReader_bool() throws { - let fileURL = currentDirectory().appendingPathComponent("../../testdata_bool.arrow") + let fileURL = currentDirectory().appendingPathComponent("../testdata_bool.arrow") let arrowReader = ArrowReader() try checkBoolRecordBatch(arrowReader.fromFile(fileURL)) } func testFileWriter_bool() throws { // read existing file - let fileURL = currentDirectory().appendingPathComponent("../../testdata_bool.arrow") + let fileURL = currentDirectory().appendingPathComponent("../testdata_bool.arrow") let arrowReader = ArrowReader() let fileRBs = try checkBoolRecordBatch(arrowReader.fromFile(fileURL)) let arrowWriter = ArrowWriter() @@ -319,7 +319,7 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body throw error } // write file record batches to another file - let outputUrl = currentDirectory().appendingPathComponent("../../testfilewriter_bool.arrow") + let outputUrl = currentDirectory().appendingPathComponent("../testfilewriter_bool.arrow") switch arrowWriter.toFile(outputUrl, info: writerInfo) { case .success: try checkBoolRecordBatch(arrowReader.fromFile(outputUrl)) @@ -336,7 +336,7 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body func testFileWriter_struct() throws { // read existing file - let fileURL = currentDirectory().appendingPathComponent("../../testdata_struct.arrow") + let fileURL = currentDirectory().appendingPathComponent("../testdata_struct.arrow") let arrowReader = ArrowReader() let fileRBs = try checkStructRecordBatch(arrowReader.fromFile(fileURL)) let arrowWriter = ArrowWriter() @@ -350,7 +350,7 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body throw error } // write file record batches to another file - let outputUrl = currentDirectory().appendingPathComponent("../../testfilewriter_struct.arrow") + let outputUrl = currentDirectory().appendingPathComponent("../testfilewriter_struct.arrow") switch arrowWriter.toFile(outputUrl, info: writerInfo) { case .success: try checkStructRecordBatch(arrowReader.fromFile(outputUrl)) diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 043642d..1459fa9 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -53,7 +53,7 @@ export GOPATH="${build_dir}" pushd "${data_gen_dir}" go get -d ./... go run . -cp *.arrow ../../Arrow +cp *.arrow ../Arrow popd github_actions_group_end diff --git a/dev/release/release.sh b/dev/release/release.sh index 9a763c6..3f641c8 100755 --- a/dev/release/release.sh +++ b/dev/release/release.sh @@ -20,7 +20,7 @@ set -eu SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -SOURCE_TOP_DIR="$(cd "${SOURCE_DIR}/../../" && pwd)" +SOURCE_TOP_DIR="$(cd "${SOURCE_DIR}/../" && pwd)" if [ "$#" -ne 2 ]; then echo "Usage: $0 " diff --git a/dev/release/release_rc.sh b/dev/release/release_rc.sh index ff5e5f0..c77b435 100755 --- a/dev/release/release_rc.sh +++ b/dev/release/release_rc.sh @@ -20,7 +20,7 @@ set -eu SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -SOURCE_TOP_DIR="$(cd "${SOURCE_DIR}/../../" && pwd)" +SOURCE_TOP_DIR="$(cd "${SOURCE_DIR}/../" && pwd)" if [ "$#" -ne 2 ]; then echo "Usage: $0 " From e6d9114c942aa430100da61e6057d1c156e40e5b Mon Sep 17 00:00:00 2001 From: Moreno Date: Sat, 30 Aug 2025 11:39:55 +0200 Subject: [PATCH 5/7] fix Package.swift - lint files --- Package.swift | 2 +- Sources/Arrow/ArrowArray.swift | 126 +- Sources/Arrow/ArrowArrayBuilder.swift | 64 +- Sources/Arrow/ArrowBuffer.swift | 5 +- Sources/Arrow/ArrowBufferBuilder.swift | 148 +- Sources/Arrow/ArrowCExporter.swift | 22 +- Sources/Arrow/ArrowCImporter.swift | 36 +- Sources/Arrow/ArrowData.swift | 2 +- Sources/Arrow/ArrowDecoder.swift | 200 +- Sources/Arrow/ArrowEncoder.swift | 84 +- Sources/Arrow/ArrowReader.swift | 107 +- Sources/Arrow/ArrowReaderHelper.swift | 34 +- Sources/Arrow/ArrowSchema.swift | 4 +- Sources/Arrow/ArrowTable.swift | 68 +- Sources/Arrow/ArrowType.swift | 79 +- Sources/Arrow/ArrowWriter.swift | 103 +- Sources/Arrow/ArrowWriterHelper.swift | 23 +- Sources/Arrow/BitUtility.swift | 2 +- Sources/Arrow/ChunkedArray.swift | 30 +- Sources/Arrow/MemoryAllocator.swift | 3 +- Sources/Arrow/ProtoUtil.swift | 2 +- Sources/ArrowFlight/Flight.grpc.swift | 2428 +++-- Sources/ArrowFlight/Flight.pb.swift | 2109 ++--- Sources/ArrowFlight/FlightAction.swift | 8 +- Sources/ArrowFlight/FlightActionType.swift | 10 +- Sources/ArrowFlight/FlightClient.swift | 44 +- Sources/ArrowFlight/FlightData.swift | 6 +- Sources/ArrowFlight/FlightDescriptor.swift | 20 +- Sources/ArrowFlight/FlightEndpoint.swift | 8 +- Sources/ArrowFlight/FlightInfo.swift | 13 +- Sources/ArrowFlight/FlightLocation.swift | 2 +- Sources/ArrowFlight/FlightPutResult.swift | 4 +- Sources/ArrowFlight/FlightResult.swift | 4 +- Sources/ArrowFlight/FlightSchemaResult.swift | 6 +- Sources/ArrowFlight/FlightServer.swift | 39 +- Sources/ArrowFlight/FlightSql.pb.swift | 7913 ++++++++--------- Sources/ArrowFlight/FlightTicket.swift | 4 +- .../ArrowFlight/RecordBatchStreamReader.swift | 11 +- .../ArrowFlight/RecordBatchStreamWriter.swift | 22 +- 39 files changed, 6914 insertions(+), 6881 deletions(-) diff --git a/Package.swift b/Package.swift index 6408809..af2fdb0 100644 --- a/Package.swift +++ b/Package.swift @@ -67,7 +67,7 @@ let package = Package( // build: .unsafeFlags(["-warnings-as-errors"]) ] ), - .library( + .target( name: "go-swift", dependencies: [ .target(name: "Arrow"), diff --git a/Sources/Arrow/ArrowArray.swift b/Sources/Arrow/ArrowArray.swift index d4ee873..a81bf54 100644 --- a/Sources/Arrow/ArrowArray.swift +++ b/Sources/Arrow/ArrowArray.swift @@ -18,14 +18,14 @@ import Foundation public protocol ArrowArrayHolder { - var type: ArrowType {get} - var length: UInt {get} - var nullCount: UInt {get} - var array: AnyArray {get} - var data: ArrowData {get} - var getBufferData: () -> [Data] {get} - var getBufferDataSizes: () -> [Int] {get} - var getArrowColumn: (ArrowField, [ArrowArrayHolder]) throws -> ArrowColumn {get} + var type: ArrowType { get } + var length: UInt { get } + var nullCount: UInt { get } + var array: AnyArray { get } + var data: ArrowData { get } + var getBufferData: () -> [Data] { get } + var getBufferDataSizes: () -> [Int] { get } + var getArrowColumn: (ArrowField, [ArrowArrayHolder]) throws -> ArrowColumn { get } } public class ArrowArrayHolderImpl: ArrowArrayHolder { @@ -38,12 +38,12 @@ public class ArrowArrayHolderImpl: ArrowArrayHolder { public let getBufferDataSizes: () -> [Int] public let getArrowColumn: (ArrowField, [ArrowArrayHolder]) throws -> ArrowColumn public init(_ arrowArray: ArrowArray) { - self.array = arrowArray - self.data = arrowArray.arrowData - self.length = arrowArray.length - self.type = arrowArray.arrowData.type - self.nullCount = arrowArray.nullCount - self.getBufferData = {() -> [Data] in + array = arrowArray + data = arrowArray.arrowData + length = arrowArray.length + type = arrowArray.arrowData.type + nullCount = arrowArray.nullCount + getBufferData = { () -> [Data] in var bufferData = [Data]() for buffer in arrowArray.arrowData.buffers { bufferData.append(Data()) @@ -53,7 +53,7 @@ public class ArrowArrayHolderImpl: ArrowArrayHolder { return bufferData } - self.getBufferDataSizes = {() -> [Int] in + getBufferDataSizes = { () -> [Int] in var bufferDataSizes = [Int]() for buffer in arrowArray.arrowData.buffers { bufferDataSizes.append(Int(buffer.capacity)) @@ -62,7 +62,7 @@ public class ArrowArrayHolderImpl: ArrowArrayHolder { return bufferDataSizes } - self.getArrowColumn = {(field: ArrowField, arrayHolders: [ArrowArrayHolder]) throws -> ArrowColumn in + getArrowColumn = { (field: ArrowField, arrayHolders: [ArrowArrayHolder]) throws -> ArrowColumn in var arrays = [ArrowArray]() for arrayHolder in arrayHolders { if let array = arrayHolder.array as? ArrowArray { @@ -70,12 +70,13 @@ public class ArrowArrayHolderImpl: ArrowArrayHolder { } } - return ArrowColumn(field, chunked: ChunkedArrayHolder(try ChunkedArray(arrays))) + return try ArrowColumn(field, chunked: ChunkedArrayHolder(ChunkedArray(arrays))) } } public static func loadArray( // swiftlint:disable:this cyclomatic_complexity - _ arrowType: ArrowType, with: ArrowData) throws -> ArrowArrayHolder { + _ arrowType: ArrowType, with: ArrowData + ) throws -> ArrowArrayHolder { switch arrowType.id { case .int8: return try ArrowArrayHolderImpl(FixedArray(with)) @@ -124,22 +125,22 @@ public class ArrowArrayHolderImpl: ArrowArrayHolder { public class ArrowArray: AsString, AnyArray { public typealias ItemType = T public let arrowData: ArrowData - public var nullCount: UInt {return self.arrowData.nullCount} - public var length: UInt {return self.arrowData.length} + public var nullCount: UInt { return arrowData.nullCount } + public var length: UInt { return arrowData.length } public required init(_ arrowData: ArrowData) throws { self.arrowData = arrowData } public func isNull(_ at: UInt) throws -> Bool { - if at >= self.length { + if at >= length { throw ArrowError.outOfBounds(index: Int64(at)) } - return self.arrowData.isNull(at) + return arrowData.isNull(at) } - public subscript(_ index: UInt) -> T? { + public subscript(_: UInt) -> T? { fatalError("subscript() has not been implemented") } @@ -161,35 +162,35 @@ public class ArrowArray: AsString, AnyArray { } public class FixedArray: ArrowArray { - public override subscript(_ index: UInt) -> T? { - if self.arrowData.isNull(index) { + override public subscript(_ index: UInt) -> T? { + if arrowData.isNull(index) { return nil } - let byteOffset = self.arrowData.stride * Int(index) - return self.arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: T.self) + let byteOffset = arrowData.stride * Int(index) + return arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: T.self) } } public class StringArray: ArrowArray { - public override subscript(_ index: UInt) -> String? { + override public subscript(_ index: UInt) -> String? { let offsetIndex = MemoryLayout.stride * Int(index) - if self.arrowData.isNull(index) { + if arrowData.isNull(index) { return nil } - let offsets = self.arrowData.buffers[1] - let values = self.arrowData.buffers[2] + let offsets = arrowData.buffers[1] + let values = arrowData.buffers[2] var startIndex: Int32 = 0 if index > 0 { startIndex = offsets.rawPointer.advanced(by: offsetIndex).load(as: Int32.self) } - let endIndex = offsets.rawPointer.advanced(by: offsetIndex + MemoryLayout.stride ) + let endIndex = offsets.rawPointer.advanced(by: offsetIndex + MemoryLayout.stride) .load(as: Int32.self) let arrayLength = Int(endIndex - startIndex) - let rawPointer = values.rawPointer.advanced(by: Int(startIndex)) + let rawPointer = values.rawPointer.advanced(by: Int(startIndex)) .bindMemory(to: UInt8.self, capacity: arrayLength) let buffer = UnsafeBufferPointer(start: rawPointer, count: arrayLength) let byteArray = Array(buffer) @@ -198,36 +199,36 @@ public class StringArray: ArrowArray { } public class BoolArray: ArrowArray { - public override subscript(_ index: UInt) -> Bool? { - if self.arrowData.isNull(index) { + override public subscript(_ index: UInt) -> Bool? { + if arrowData.isNull(index) { return nil } - let valueBuffer = self.arrowData.buffers[1] + let valueBuffer = arrowData.buffers[1] return BitUtility.isSet(index, buffer: valueBuffer) } } public class Date32Array: ArrowArray { - public override subscript(_ index: UInt) -> Date? { - if self.arrowData.isNull(index) { + override public subscript(_ index: UInt) -> Date? { + if arrowData.isNull(index) { return nil } - let byteOffset = self.arrowData.stride * Int(index) - let milliseconds = self.arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: UInt32.self) + let byteOffset = arrowData.stride * Int(index) + let milliseconds = arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: UInt32.self) return Date(timeIntervalSince1970: TimeInterval(milliseconds * 86400)) } } public class Date64Array: ArrowArray { - public override subscript(_ index: UInt) -> Date? { - if self.arrowData.isNull(index) { + override public subscript(_ index: UInt) -> Date? { + if arrowData.isNull(index) { return nil } - let byteOffset = self.arrowData.stride * Int(index) - let milliseconds = self.arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: UInt64.self) + let byteOffset = arrowData.stride * Int(index) + let milliseconds = arrowData.buffers[1].rawPointer.advanced(by: byteOffset).load(as: UInt64.self) return Date(timeIntervalSince1970: TimeInterval(milliseconds / 1000)) } } @@ -236,7 +237,6 @@ public class Time32Array: FixedArray {} public class Time64Array: FixedArray {} public class TimestampArray: FixedArray { - public struct FormattingOptions: Equatable { public var dateFormat: String = "yyyy-MM-dd HH:mm:ss.SSS" public var locale: Locale = .current @@ -267,7 +267,7 @@ public class TimestampArray: FixedArray { public func formattedDate(at index: UInt, options: FormattingOptions = FormattingOptions()) -> String? { guard let timestamp = self[index] else { return nil } - guard let timestampType = self.arrowData.type as? ArrowTypeTimestamp else { + guard let timestampType = arrowData.type as? ArrowTypeTimestamp else { return options.fallbackToRaw ? "\(timestamp)" : nil } @@ -294,7 +294,7 @@ public class TimestampArray: FixedArray { case .seconds: timeInterval = TimeInterval(timestamp) case .milliseconds: - timeInterval = TimeInterval(timestamp) / 1_000 + timeInterval = TimeInterval(timestamp) / 1000 case .microseconds: timeInterval = TimeInterval(timestamp) / 1_000_000 case .nanoseconds: @@ -304,7 +304,7 @@ public class TimestampArray: FixedArray { return Date(timeIntervalSince1970: timeInterval) } - public override func asString(_ index: UInt) -> String { + override public func asString(_ index: UInt) -> String { if let formatted = formattedDate(at: index) { return formatted } @@ -321,31 +321,31 @@ public class BinaryArray: ArrowArray { public var options = Options() - public override subscript(_ index: UInt) -> Data? { + override public subscript(_ index: UInt) -> Data? { let offsetIndex = MemoryLayout.stride * Int(index) - if self.arrowData.isNull(index) { + if arrowData.isNull(index) { return nil } - let offsets = self.arrowData.buffers[1] - let values = self.arrowData.buffers[2] + let offsets = arrowData.buffers[1] + let values = arrowData.buffers[2] var startIndex: Int32 = 0 if index > 0 { startIndex = offsets.rawPointer.advanced(by: offsetIndex).load(as: Int32.self) } - let endIndex = offsets.rawPointer.advanced(by: offsetIndex + MemoryLayout.stride ) + let endIndex = offsets.rawPointer.advanced(by: offsetIndex + MemoryLayout.stride) .load(as: Int32.self) let arrayLength = Int(endIndex - startIndex) - let rawPointer = values.rawPointer.advanced(by: Int(startIndex)) + let rawPointer = values.rawPointer.advanced(by: Int(startIndex)) .bindMemory(to: UInt8.self, capacity: arrayLength) let buffer = UnsafeBufferPointer(start: rawPointer, count: arrayLength) let byteArray = Array(buffer) return Data(byteArray) } - public override func asString(_ index: UInt) -> String { - if self[index] == nil {return ""} + override public func asString(_ index: UInt) -> String { + if self[index] == nil { return "" } let data = self[index]! if options.printAsHex { return data.hexEncodedString() @@ -361,14 +361,14 @@ public class StructArray: ArrowArray<[Any?]> { try super.init(arrowData) var fields = [ArrowArrayHolder]() for child in arrowData.children { - fields.append(try ArrowArrayHolderImpl.loadArray(child.type, with: child)) + try fields.append(ArrowArrayHolderImpl.loadArray(child.type, with: child)) } - self.arrowFields = fields + arrowFields = fields } - public override subscript(_ index: UInt) -> [Any?]? { - if self.arrowData.isNull(index) { + override public subscript(_ index: UInt) -> [Any?]? { + if arrowData.isNull(index) { return nil } @@ -384,14 +384,14 @@ public class StructArray: ArrowArray<[Any?]> { return nil } - public override func asString(_ index: UInt) -> String { - if self.arrowData.isNull(index) { + override public func asString(_ index: UInt) -> String { + if arrowData.isNull(index) { return "" } var output = "{" if let fields = arrowFields { - for fieldIndex in 0..>: ArrowArrayHolderBuilder { let type: ArrowType let bufferBuilder: T - public var length: UInt {return self.bufferBuilder.length} - public var capacity: UInt {return self.bufferBuilder.capacity} - public var nullCount: UInt {return self.bufferBuilder.nullCount} - public var offset: UInt {return self.bufferBuilder.offset} + public var length: UInt { return bufferBuilder.length } + public var capacity: UInt { return bufferBuilder.capacity } + public var nullCount: UInt { return bufferBuilder.nullCount } + public var offset: UInt { return bufferBuilder.offset } fileprivate init(_ type: ArrowType) throws { self.type = type - self.bufferBuilder = try T() + bufferBuilder = try T() } public func append(_ vals: T.ItemType?...) { for val in vals { - self.bufferBuilder.append(val) + bufferBuilder.append(val) } } public func append(_ vals: [T.ItemType?]) { for val in vals { - self.bufferBuilder.append(val) + bufferBuilder.append(val) } } public func append(_ val: T.ItemType?) { - self.bufferBuilder.append(val) + bufferBuilder.append(val) } public func appendAny(_ val: Any?) { - self.bufferBuilder.append(val as? T.ItemType) + bufferBuilder.append(val as? T.ItemType) } public func finish() throws -> ArrowArray { - let buffers = self.bufferBuilder.finish() - let arrowData = try ArrowData(self.type, buffers: buffers, nullCount: self.nullCount) + let buffers = bufferBuilder.finish() + let arrowData = try ArrowData(type, buffers: buffers, nullCount: nullCount) let array = try U(arrowData) return array } public func getStride() -> Int { - return self.type.getStride() + return type.getStride() } public func toHolder() throws -> ArrowArrayHolder { - return try ArrowArrayHolderImpl(self.finish()) + return try ArrowArrayHolderImpl(finish()) } } @@ -132,43 +132,43 @@ public class StructArrayBuilder: ArrowArrayBuilder StructArray { - let buffers = self.bufferBuilder.finish() + override public func finish() throws -> StructArray { + let buffers = bufferBuilder.finish() var childData = [ArrowData]() - for builder in self.builders { - childData.append(try builder.toHolder().array.arrowData) + for builder in builders { + try childData.append(builder.toHolder().array.arrowData) } - let arrowData = try ArrowData(self.type, buffers: buffers, - children: childData, nullCount: self.nullCount, - length: self.length) + let arrowData = try ArrowData(type, buffers: buffers, + children: childData, nullCount: nullCount, + length: length) let structArray = try StructArray(arrowData) return structArray } @@ -176,7 +176,8 @@ public class StructArrayBuilder: ArrowArrayBuilder ArrowArrayHolderBuilder { + _ builderType: Any.Type + ) throws -> ArrowArrayHolderBuilder { if builderType == Int8.self || builderType == Int8?.self { return try ArrowArrayBuilders.loadNumberArrayBuilder() as NumberArrayBuilder } else if builderType == Int16.self || builderType == Int16?.self { @@ -236,14 +237,15 @@ public class ArrowArrayBuilders { let builderType = type(of: value) let arrowType = ArrowType(ArrowType.infoForType(builderType)) fields.append(ArrowField(propertyName, type: arrowType, isNullable: true)) - builders.append(try loadBuilder(arrowType: arrowType)) + try builders.append(loadBuilder(arrowType: arrowType)) } return try StructArrayBuilder(fields, builders: builders) } public static func loadBuilder( // swiftlint:disable:this cyclomatic_complexity - arrowType: ArrowType) throws -> ArrowArrayHolderBuilder { + arrowType: ArrowType + ) throws -> ArrowArrayHolderBuilder { switch arrowType.id { case .uint8: return try loadNumberArrayBuilder() as NumberArrayBuilder diff --git a/Sources/Arrow/ArrowBuffer.swift b/Sources/Arrow/ArrowBuffer.swift index 1ff53cd..741576f 100644 --- a/Sources/Arrow/ArrowBuffer.swift +++ b/Sources/Arrow/ArrowBuffer.swift @@ -39,7 +39,7 @@ public class ArrowBuffer { } func append(to data: inout Data) { - let ptr = UnsafePointer(rawPointer.assumingMemoryBound(to: UInt8.self)) + let ptr = UnsafePointer(rawPointer.assumingMemoryBound(to: UInt8.self)) data.append(ptr, count: Int(capacity)) } @@ -47,7 +47,8 @@ public class ArrowBuffer { return ArrowBuffer( length: 0, capacity: 0, - rawPointer: UnsafeMutableRawPointer.allocate(byteCount: 0, alignment: .zero)) + rawPointer: UnsafeMutableRawPointer.allocate(byteCount: 0, alignment: .zero) + ) } static func createBuffer(_ data: [UInt8], length: UInt) -> ArrowBuffer { diff --git a/Sources/Arrow/ArrowBufferBuilder.swift b/Sources/Arrow/ArrowBufferBuilder.swift index cc0bae0..fe85268 100644 --- a/Sources/Arrow/ArrowBufferBuilder.swift +++ b/Sources/Arrow/ArrowBufferBuilder.swift @@ -19,10 +19,10 @@ import Foundation public protocol ArrowBufferBuilder { associatedtype ItemType - var capacity: UInt {get} - var length: UInt {get} - var nullCount: UInt {get} - var offset: UInt {get} + var capacity: UInt { get } + var length: UInt { get } + var nullCount: UInt { get } + var offset: UInt { get } init() throws func append(_ newValue: ItemType?) func isNull(_ index: UInt) -> Bool @@ -33,16 +33,16 @@ public protocol ArrowBufferBuilder { public class BaseBufferBuilder { var nulls: ArrowBuffer public var offset: UInt = 0 - public var capacity: UInt {return self.nulls.capacity} + public var capacity: UInt { return nulls.capacity } public var length: UInt = 0 - public var nullCount: UInt = 0 + public var nullCount: UInt = 0 init(_ nulls: ArrowBuffer) { self.nulls = nulls } public func isNull(_ index: UInt) -> Bool { - return self.nulls.length == 0 || BitUtility.isSet(index + self.offset, buffer: self.nulls) + return nulls.length == 0 || BitUtility.isSet(index + offset, buffer: nulls) } func resizeLength(_ data: ArrowBuffer, len: UInt = 0) -> UInt { @@ -60,7 +60,7 @@ public class BaseBufferBuilder { public class ValuesBufferBuilder: BaseBufferBuilder { var values: ArrowBuffer var stride: Int - public override var capacity: UInt {return self.values.capacity} + override public var capacity: UInt { return values.capacity } init(values: ArrowBuffer, nulls: ArrowBuffer, stride: Int = MemoryLayout.stride) { self.stride = stride @@ -73,35 +73,35 @@ public class FixedBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilder { public typealias ItemType = T private let defaultVal: ItemType public required init() throws { - self.defaultVal = try FixedBufferBuilder.defaultValueForType() + defaultVal = try FixedBufferBuilder.defaultValueForType() let values = ArrowBuffer.createBuffer(0, size: UInt(MemoryLayout.stride)) let nulls = ArrowBuffer.createBuffer(0, size: UInt(MemoryLayout.stride)) super.init(values: values, nulls: nulls) } public func append(_ newValue: ItemType?) { - let index = UInt(self.length) + let index = UInt(length) let byteIndex = self.stride * Int(index) - self.length += 1 - if length > self.values.length { - self.resize(length) + length += 1 + if length > values.length { + resize(length) } if let val = newValue { - BitUtility.setBit(index + self.offset, buffer: self.nulls) - self.values.rawPointer.advanced(by: byteIndex).storeBytes(of: val, as: T.self) + BitUtility.setBit(index + offset, buffer: nulls) + values.rawPointer.advanced(by: byteIndex).storeBytes(of: val, as: T.self) } else { - self.nullCount += 1 - BitUtility.clearBit(index + self.offset, buffer: self.nulls) - self.values.rawPointer.advanced(by: byteIndex).storeBytes(of: defaultVal, as: T.self) + nullCount += 1 + BitUtility.clearBit(index + offset, buffer: nulls) + values.rawPointer.advanced(by: byteIndex).storeBytes(of: defaultVal, as: T.self) } } public func resize(_ length: UInt) { - if length > self.values.length { + if length > values.length { let resizeLength = resizeLength(self.values) var values = ArrowBuffer.createBuffer(resizeLength, size: UInt(MemoryLayout.size)) - var nulls = ArrowBuffer.createBuffer(resizeLength/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(resizeLength / 8 + 1, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.values, to: &values, len: self.values.capacity) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: self.nulls.capacity) self.values = values @@ -112,7 +112,7 @@ public class FixedBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilder { public func finish() -> [ArrowBuffer] { let length = self.length var values = ArrowBuffer.createBuffer(length, size: UInt(MemoryLayout.size)) - var nulls = ArrowBuffer.createBuffer(length/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(length / 8 + 1, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.values, to: &values, len: values.capacity) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: nulls.capacity) return [nulls, values] @@ -155,29 +155,29 @@ public class BoolBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilder { } public func append(_ newValue: ItemType?) { - let index = UInt(self.length) - self.length += 1 - if (length/8) > self.values.length { - self.resize(length) + let index = UInt(length) + length += 1 + if (length / 8) > values.length { + resize(length) } if newValue != nil { - BitUtility.setBit(index + self.offset, buffer: self.nulls) + BitUtility.setBit(index + offset, buffer: nulls) if newValue == true { - BitUtility.setBit(index + self.offset, buffer: self.values) + BitUtility.setBit(index + offset, buffer: values) } else { - BitUtility.clearBit(index + self.offset, buffer: self.values) + BitUtility.clearBit(index + offset, buffer: values) } } else { - self.nullCount += 1 - BitUtility.clearBit(index + self.offset, buffer: self.nulls) - BitUtility.clearBit(index + self.offset, buffer: self.values) + nullCount += 1 + BitUtility.clearBit(index + offset, buffer: nulls) + BitUtility.clearBit(index + offset, buffer: values) } } public func resize(_ length: UInt) { - if (length/8) > self.values.length { + if (length / 8) > values.length { let resizeLength = resizeLength(self.values) var values = ArrowBuffer.createBuffer(resizeLength, size: UInt(MemoryLayout.size)) var nulls = ArrowBuffer.createBuffer(resizeLength, size: UInt(MemoryLayout.size)) @@ -205,16 +205,16 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde public required init() throws { let values = ArrowBuffer.createBuffer(0, size: UInt(binaryStride)) let nulls = ArrowBuffer.createBuffer(0, size: UInt(binaryStride)) - self.offsets = ArrowBuffer.createBuffer(0, size: UInt(MemoryLayout.stride)) + offsets = ArrowBuffer.createBuffer(0, size: UInt(MemoryLayout.stride)) super.init(values: values, nulls: nulls, stride: binaryStride) } public func append(_ newValue: ItemType?) { - let index = UInt(self.length) - self.length += 1 + let index = UInt(length) + length += 1 let offsetIndex = MemoryLayout.stride * Int(index) - if self.length >= self.offsets.length { - self.resize(UInt( self.offsets.length + 1)) + if length >= offsets.length { + resize(UInt(offsets.length + 1)) } var binData: Data var isNull = false @@ -227,20 +227,20 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde } var currentIndex: Int32 = 0 - var currentOffset: Int32 = Int32(binData.count) + var currentOffset = Int32(binData.count) if index > 0 { - currentIndex = self.offsets.rawPointer.advanced(by: offsetIndex).load(as: Int32.self) + currentIndex = offsets.rawPointer.advanced(by: offsetIndex).load(as: Int32.self) currentOffset += currentIndex - if currentOffset > self.values.length { - self.value_resize(UInt(currentOffset)) + if currentOffset > values.length { + value_resize(UInt(currentOffset)) } } if isNull { - self.nullCount += 1 - BitUtility.clearBit(index + self.offset, buffer: self.nulls) + nullCount += 1 + BitUtility.clearBit(index + offset, buffer: nulls) } else { - BitUtility.setBit(index + self.offset, buffer: self.nulls) + BitUtility.setBit(index + offset, buffer: nulls) } binData.withUnsafeBytes { bufferPointer in @@ -249,12 +249,12 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde .copyMemory(from: rawPointer, byteCount: binData.count) } - self.offsets.rawPointer.advanced(by: (offsetIndex + MemoryLayout.stride)) + offsets.rawPointer.advanced(by: offsetIndex + MemoryLayout.stride) .storeBytes(of: currentOffset, as: Int32.self) } public func value_resize(_ length: UInt) { - if length > self.values.length { + if length > values.length { let resizeLength = resizeLength(self.values, len: length) var values = ArrowBuffer.createBuffer(resizeLength, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.values, to: &values, len: self.values.capacity) @@ -263,9 +263,9 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde } public func resize(_ length: UInt) { - if length > self.offsets.length { + if length > offsets.length { let resizeLength = resizeLength(self.offsets, len: length) - var nulls = ArrowBuffer.createBuffer(resizeLength/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(resizeLength / 8 + 1, size: UInt(MemoryLayout.size)) var offsets = ArrowBuffer.createBuffer(resizeLength, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: self.nulls.capacity) ArrowBuffer.copyCurrent(self.offsets, to: &offsets, len: self.offsets.capacity) @@ -277,7 +277,7 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde public func finish() -> [ArrowBuffer] { let length = self.length var values = ArrowBuffer.createBuffer(self.values.length, size: UInt(MemoryLayout.size)) - var nulls = ArrowBuffer.createBuffer(length/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(length / 8 + 1, size: UInt(MemoryLayout.size)) var offsets = ArrowBuffer.createBuffer(length, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.values, to: &values, len: values.capacity) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: nulls.capacity) @@ -288,50 +288,50 @@ public class VariableBufferBuilder: ValuesBufferBuilder, ArrowBufferBuilde public class AbstractWrapperBufferBuilder: ArrowBufferBuilder { public typealias ItemType = T - public var capacity: UInt {return self.bufferBuilder.capacity} - public var length: UInt {return self.bufferBuilder.length} - public var nullCount: UInt {return self.bufferBuilder.nullCount} - public var offset: UInt {return self.bufferBuilder.offset} + public var capacity: UInt { return bufferBuilder.capacity } + public var length: UInt { return bufferBuilder.length } + public var nullCount: UInt { return bufferBuilder.nullCount } + public var offset: UInt { return bufferBuilder.offset } let bufferBuilder: FixedBufferBuilder public required init() throws { - self.bufferBuilder = try FixedBufferBuilder() + bufferBuilder = try FixedBufferBuilder() } - public func append(_ newValue: ItemType?) { + public func append(_: ItemType?) { fatalError("Method is not implemented") } public func isNull(_ index: UInt) -> Bool { - return self.bufferBuilder.isNull(index) + return bufferBuilder.isNull(index) } public func resize(_ length: UInt) { - self.bufferBuilder.resize(length) + bufferBuilder.resize(length) } public func finish() -> [ArrowBuffer] { - return self.bufferBuilder.finish() + return bufferBuilder.finish() } } public class Date32BufferBuilder: AbstractWrapperBufferBuilder { - public override func append(_ newValue: ItemType?) { + override public func append(_ newValue: ItemType?) { if let val = newValue { let daysSinceEpoch = Int32(val.timeIntervalSince1970 / 86400) - self.bufferBuilder.append(daysSinceEpoch) + bufferBuilder.append(daysSinceEpoch) } else { - self.bufferBuilder.append(nil) + bufferBuilder.append(nil) } } } public class Date64BufferBuilder: AbstractWrapperBufferBuilder { - public override func append(_ newValue: ItemType?) { + override public func append(_ newValue: ItemType?) { if let val = newValue { let daysSinceEpoch = Int64(val.timeIntervalSince1970 * 1000) - self.bufferBuilder.append(daysSinceEpoch) + bufferBuilder.append(daysSinceEpoch) } else { - self.bufferBuilder.append(nil) + bufferBuilder.append(nil) } } } @@ -349,24 +349,24 @@ public final class StructBufferBuilder: BaseBufferBuilder, ArrowBufferBuilder { } public func append(_ newValue: [Any?]?) { - let index = UInt(self.length) - self.length += 1 - if length > self.nulls.length { - self.resize(length) + let index = UInt(length) + length += 1 + if length > nulls.length { + resize(length) } if newValue != nil { - BitUtility.setBit(index + self.offset, buffer: self.nulls) + BitUtility.setBit(index + offset, buffer: nulls) } else { - self.nullCount += 1 - BitUtility.clearBit(index + self.offset, buffer: self.nulls) + nullCount += 1 + BitUtility.clearBit(index + offset, buffer: nulls) } } public func resize(_ length: UInt) { - if length > self.nulls.length { + if length > nulls.length { let resizeLength = resizeLength(self.nulls) - var nulls = ArrowBuffer.createBuffer(resizeLength/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(resizeLength / 8 + 1, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: self.nulls.capacity) self.nulls = nulls } @@ -374,7 +374,7 @@ public final class StructBufferBuilder: BaseBufferBuilder, ArrowBufferBuilder { public func finish() -> [ArrowBuffer] { let length = self.length - var nulls = ArrowBuffer.createBuffer(length/8 + 1, size: UInt(MemoryLayout.size)) + var nulls = ArrowBuffer.createBuffer(length / 8 + 1, size: UInt(MemoryLayout.size)) ArrowBuffer.copyCurrent(self.nulls, to: &nulls, len: nulls.capacity) return [nulls] } diff --git a/Sources/Arrow/ArrowCExporter.swift b/Sources/Arrow/ArrowCExporter.swift index bb95b89..9ee4704 100644 --- a/Sources/Arrow/ArrowCExporter.swift +++ b/Sources/Arrow/ArrowCExporter.swift @@ -15,9 +15,9 @@ // specific language governing permissions and limitations // under the License. -import Foundation import ArrowC import Atomics +import Foundation // The memory used by UnsafeAtomic is not automatically // reclaimed. Since this value is initialized once @@ -44,9 +44,9 @@ public class ArrowCExporter { self.arrowType = arrowType // keeping the name str to ensure the cstring buffer remains valid self.name = name - self.arrowTypeName = try arrowType.cDataFormatId - self.nameCstr = (self.name as NSString).utf8String! - self.arrowTypeNameCstr = (self.arrowTypeName as NSString).utf8String! + arrowTypeName = try arrowType.cDataFormatId + nameCstr = (self.name as NSString).utf8String! + arrowTypeNameCstr = (arrowTypeName as NSString).utf8String! super.init() } } @@ -61,11 +61,11 @@ public class ArrowCExporter { // deallocated self.arrowData = arrowData for arrowBuffer in arrowData.buffers { - self.data.append(arrowBuffer.rawPointer) + data.append(arrowBuffer.rawPointer) } - self.buffers = UnsafeMutablePointer.allocate(capacity: self.data.count) - self.buffers.initialize(from: &self.data, count: self.data.count) + buffers = UnsafeMutablePointer.allocate(capacity: data.count) + buffers.initialize(from: &data, count: data.count) super.init() } @@ -79,14 +79,14 @@ public class ArrowCExporter { public init() {} public func exportType(_ cSchema: inout ArrowC.ArrowSchema, arrowType: ArrowType, name: String = "") -> - Result { + Result { do { let exportSchema = try ExportSchema(arrowType, name: name) cSchema.format = exportSchema.arrowTypeNameCstr cSchema.name = exportSchema.nameCstr cSchema.private_data = UnsafeMutableRawPointer(mutating: UnsafeRawPointer(bitPattern: exportSchema.id)) - cSchema.release = {(data: UnsafeMutablePointer?) in + cSchema.release = { (data: UnsafeMutablePointer?) in let arraySchema = data!.pointee let exportId = Int(bitPattern: arraySchema.private_data) guard ArrowCExporter.exportedData[exportId] != nil else { @@ -106,7 +106,7 @@ public class ArrowCExporter { } public func exportField(_ schema: inout ArrowC.ArrowSchema, field: ArrowField) -> - Result { + Result { return exportType(&schema, arrowType: field.type, name: field.name) } @@ -123,7 +123,7 @@ public class ArrowCExporter { cArray.dictionary = nil cArray.private_data = UnsafeMutableRawPointer(mutating: UnsafeRawPointer(bitPattern: exportArray.id)) - cArray.release = {(data: UnsafeMutablePointer?) in + cArray.release = { (data: UnsafeMutablePointer?) in let arrayData = data!.pointee let exportId = Int(bitPattern: arrayData.private_data) guard ArrowCExporter.exportedData[exportId] != nil else { diff --git a/Sources/Arrow/ArrowCImporter.swift b/Sources/Arrow/ArrowCImporter.swift index 8381152..4814902 100644 --- a/Sources/Arrow/ArrowCImporter.swift +++ b/Sources/Arrow/ArrowCImporter.swift @@ -15,19 +15,19 @@ // specific language governing permissions and limitations // under the License. -import Foundation import ArrowC +import Foundation public class ImportArrayHolder: ArrowArrayHolder { let cArrayPtr: UnsafePointer - public var type: ArrowType {self.holder.type} - public var length: UInt {self.holder.length} - public var nullCount: UInt {self.holder.nullCount} - public var array: AnyArray {self.holder.array} - public var data: ArrowData {self.holder.data} - public var getBufferData: () -> [Data] {self.holder.getBufferData} - public var getBufferDataSizes: () -> [Int] {self.holder.getBufferDataSizes} - public var getArrowColumn: (ArrowField, [ArrowArrayHolder]) throws -> ArrowColumn {self.holder.getArrowColumn} + public var type: ArrowType { holder.type } + public var length: UInt { holder.length } + public var nullCount: UInt { holder.nullCount } + public var array: AnyArray { holder.array } + public var data: ArrowData { holder.data } + public var getBufferData: () -> [Data] { holder.getBufferData } + public var getBufferDataSizes: () -> [Int] { holder.getBufferDataSizes } + public var getArrowColumn: (ArrowField, [ArrowArrayHolder]) throws -> ArrowColumn { self.holder.getArrowColumn } private let holder: ArrowArrayHolder init(_ holder: ArrowArrayHolder, cArrayPtr: UnsafePointer) { self.cArrayPtr = cArrayPtr @@ -45,7 +45,8 @@ public class ArrowCImporter { private func appendToBuffer( _ cBuffer: UnsafeRawPointer?, arrowBuffers: inout [ArrowBuffer], - length: UInt) { + length: UInt + ) { if cBuffer == nil { arrowBuffers.append(ArrowBuffer.createEmptyBuffer()) return @@ -59,7 +60,7 @@ public class ArrowCImporter { public init() {} public func importType(_ cArrow: String, name: String = "") -> - Result { + Result { do { let type = try ArrowType.fromCDataFormatId(cArrow) return .success(ArrowField(name, type: ArrowType(type.info), isNullable: true)) @@ -69,7 +70,7 @@ public class ArrowCImporter { } public func importField(_ cSchema: ArrowC.ArrowSchema) -> - Result { + Result { if cSchema.n_children > 0 { ArrowCImporter.release(cSchema) return .failure(.invalid("Children currently not supported")) @@ -79,11 +80,12 @@ public class ArrowCImporter { } switch importType( - String(cString: cSchema.format), name: String(cString: cSchema.name)) { - case .success(let field): + String(cString: cSchema.format), name: String(cString: cSchema.name) + ) { + case let .success(field): ArrowCImporter.release(cSchema) return .success(field) - case .failure(let err): + case let .failure(err): ArrowCImporter.release(cSchema) return .failure(err) } @@ -155,9 +157,9 @@ public class ArrowCImporter { switch makeArrayHolder(arrowField, buffers: arrowBuffers, nullCount: nullCount, children: nil, rbLength: 0) { - case .success(let holder): + case let .success(holder): return .success(ImportArrayHolder(holder, cArrayPtr: cArrayPtr)) - case .failure(let err): + case let .failure(err): ArrowCImporter.release(cArrayPtr) return .failure(err) } diff --git a/Sources/Arrow/ArrowData.swift b/Sources/Arrow/ArrowData.swift index 2728b9f..7899e9f 100644 --- a/Sources/Arrow/ArrowData.swift +++ b/Sources/Arrow/ArrowData.swift @@ -52,7 +52,7 @@ public class ArrowData { } } - self.type = arrowType + type = arrowType self.buffers = buffers self.children = children self.nullCount = nullCount diff --git a/Sources/Arrow/ArrowDecoder.swift b/Sources/Arrow/ArrowDecoder.swift index 35dd4dc..6f93438 100644 --- a/Sources/Arrow/ArrowDecoder.swift +++ b/Sources/Arrow/ArrowDecoder.swift @@ -26,58 +26,58 @@ public class ArrowDecoder: Decoder { public let nameToCol: [String: ArrowArrayHolder] public let columns: [ArrowArrayHolder] public init(_ decoder: ArrowDecoder) { - self.userInfo = decoder.userInfo - self.codingPath = decoder.codingPath - self.rb = decoder.rb - self.columns = decoder.columns - self.nameToCol = decoder.nameToCol - self.rbIndex = decoder.rbIndex + userInfo = decoder.userInfo + codingPath = decoder.codingPath + rb = decoder.rb + columns = decoder.columns + nameToCol = decoder.nameToCol + rbIndex = decoder.rbIndex } public init(_ rb: RecordBatch) { self.rb = rb var colMapping = [String: ArrowArrayHolder]() var columns = [ArrowArrayHolder]() - for index in 0..(_ type: [T: U].Type) throws -> [T: U] { + public func decode(_: [T: U].Type) throws -> [T: U] { var output = [T: U]() if rb.columnCount != 2 { throw ArrowError.invalid("RecordBatch column count of 2 is required to decode to map") } - for index in 0..(_ type: T.Type) throws -> [T] { var output = [T]() - for index in 0..(keyedBy type: Key.Type + public func container(keyedBy _: Key.Type ) -> KeyedDecodingContainer where Key: CodingKey { let container = ArrowKeyedDecoding(self, codingPath: codingPath) return KeyedDecodingContainer(container) @@ -92,7 +92,7 @@ public class ArrowDecoder: Decoder { } func getCol(_ name: String) throws -> AnyArray { - guard let col = self.nameToCol[name] else { + guard let col = nameToCol[name] else { throw ArrowError.invalid("Column for key \"\(name)\" not found") } @@ -100,31 +100,31 @@ public class ArrowDecoder: Decoder { } func getCol(_ index: Int) throws -> AnyArray { - if index >= self.columns.count { + if index >= columns.count { throw ArrowError.outOfBounds(index: Int64(index)) } - return self.columns[index].array + return columns[index].array } func doDecode(_ key: CodingKey) throws -> T? { - let array: AnyArray = try self.getCol(key.stringValue) - return array.asAny(self.rbIndex) as? T + let array: AnyArray = try getCol(key.stringValue) + return array.asAny(rbIndex) as? T } func doDecode(_ col: Int) throws -> T? { - let array: AnyArray = try self.getCol(col) - return array.asAny(self.rbIndex) as? T + let array: AnyArray = try getCol(col) + return array.asAny(rbIndex) as? T } func isNull(_ key: CodingKey) throws -> Bool { - let array: AnyArray = try self.getCol(key.stringValue) - return array.asAny(self.rbIndex) == nil + let array: AnyArray = try getCol(key.stringValue) + return array.asAny(rbIndex) == nil } func isNull(_ col: Int) throws -> Bool { - let array: AnyArray = try self.getCol(col) - return array.asAny(self.rbIndex) == nil + let array: AnyArray = try getCol(col) + return array.asAny(rbIndex) == nil } } @@ -138,17 +138,17 @@ private struct ArrowUnkeyedDecoding: UnkeyedDecodingContainer { init(_ decoder: ArrowDecoder, codingPath: [CodingKey]) { self.decoder = decoder self.codingPath = codingPath - self.count = self.decoder.columns.count + count = self.decoder.columns.count } mutating func increment() { - self.currentIndex += 1 - self.isAtEnd = self.currentIndex >= self.count! + currentIndex += 1 + isAtEnd = currentIndex >= count! } mutating func decodeNil() throws -> Bool { - defer {increment()} - return try self.decoder.isNull(self.currentIndex) + defer { increment() } + return try decoder.isNull(currentIndex) } mutating func decode(_ type: T.Type) throws -> T where T: Decodable { @@ -165,15 +165,15 @@ private struct ArrowUnkeyedDecoding: UnkeyedDecodingContainer { type == UInt32.self || type == UInt64.self || type == String.self || type == Double.self || type == Float.self || type == Date.self { - defer {increment()} - return try self.decoder.doDecode(self.currentIndex)! + defer { increment() } + return try decoder.doDecode(currentIndex)! } else { throw ArrowError.invalid("Type \(type) is currently not supported") } } func nestedContainer( - keyedBy type: NestedKey.Type + keyedBy _: NestedKey.Type ) throws -> KeyedDecodingContainer where NestedKey: CodingKey { throw ArrowError.invalid("Nested decoding is currently not supported.") } @@ -198,87 +198,87 @@ private struct ArrowKeyedDecoding: KeyedDecodingContainerProtoco } func contains(_ key: Key) -> Bool { - return self.decoder.nameToCol.keys.contains(key.stringValue) + return decoder.nameToCol.keys.contains(key.stringValue) } func decodeNil(forKey key: Key) throws -> Bool { - try self.decoder.isNull(key) + try decoder.isNull(key) } - func decode(_ type: Bool.Type, forKey key: Key) throws -> Bool { - return try self.decoder.doDecode(key)! + func decode(_: Bool.Type, forKey key: Key) throws -> Bool { + return try decoder.doDecode(key)! } - func decode(_ type: String.Type, forKey key: Key) throws -> String { - return try self.decoder.doDecode(key)! + func decode(_: String.Type, forKey key: Key) throws -> String { + return try decoder.doDecode(key)! } - func decode(_ type: Double.Type, forKey key: Key) throws -> Double { - return try self.decoder.doDecode(key)! + func decode(_: Double.Type, forKey key: Key) throws -> Double { + return try decoder.doDecode(key)! } - func decode(_ type: Float.Type, forKey key: Key) throws -> Float { - return try self.decoder.doDecode(key)! + func decode(_: Float.Type, forKey key: Key) throws -> Float { + return try decoder.doDecode(key)! } - func decode(_ type: Int.Type, forKey key: Key) throws -> Int { + func decode(_: Int.Type, forKey _: Key) throws -> Int { throw ArrowError.invalid( "Int type is not supported (please use Int8, Int16, Int32 or Int64)") } - func decode(_ type: Int8.Type, forKey key: Key) throws -> Int8 { - return try self.decoder.doDecode(key)! + func decode(_: Int8.Type, forKey key: Key) throws -> Int8 { + return try decoder.doDecode(key)! } - func decode(_ type: Int16.Type, forKey key: Key) throws -> Int16 { - return try self.decoder.doDecode(key)! + func decode(_: Int16.Type, forKey key: Key) throws -> Int16 { + return try decoder.doDecode(key)! } - func decode(_ type: Int32.Type, forKey key: Key) throws -> Int32 { - return try self.decoder.doDecode(key)! + func decode(_: Int32.Type, forKey key: Key) throws -> Int32 { + return try decoder.doDecode(key)! } - func decode(_ type: Int64.Type, forKey key: Key) throws -> Int64 { - return try self.decoder.doDecode(key)! + func decode(_: Int64.Type, forKey key: Key) throws -> Int64 { + return try decoder.doDecode(key)! } - func decode(_ type: UInt.Type, forKey key: Key) throws -> UInt { + func decode(_: UInt.Type, forKey _: Key) throws -> UInt { throw ArrowError.invalid( "UInt type is not supported (please use UInt8, UInt16, UInt32 or UInt64)") } - func decode(_ type: UInt8.Type, forKey key: Key) throws -> UInt8 { - return try self.decoder.doDecode(key)! + func decode(_: UInt8.Type, forKey key: Key) throws -> UInt8 { + return try decoder.doDecode(key)! } - func decode(_ type: UInt16.Type, forKey key: Key) throws -> UInt16 { - return try self.decoder.doDecode(key)! + func decode(_: UInt16.Type, forKey key: Key) throws -> UInt16 { + return try decoder.doDecode(key)! } - func decode(_ type: UInt32.Type, forKey key: Key) throws -> UInt32 { - return try self.decoder.doDecode(key)! + func decode(_: UInt32.Type, forKey key: Key) throws -> UInt32 { + return try decoder.doDecode(key)! } - func decode(_ type: UInt64.Type, forKey key: Key) throws -> UInt64 { - return try self.decoder.doDecode(key)! + func decode(_: UInt64.Type, forKey key: Key) throws -> UInt64 { + return try decoder.doDecode(key)! } func decode(_ type: T.Type, forKey key: Key) throws -> T where T: Decodable { if ArrowArrayBuilders.isValidBuilderType(type) || type == Date.self { - return try self.decoder.doDecode(key)! + return try decoder.doDecode(key)! } else { throw ArrowError.invalid("Type \(type) is currently not supported") } } func nestedContainer( - keyedBy type: NestedKey.Type, - forKey key: Key + keyedBy _: NestedKey.Type, + forKey _: Key ) throws -> KeyedDecodingContainer where NestedKey: CodingKey { throw ArrowError.invalid("Nested decoding is currently not supported.") } - func nestedUnkeyedContainer(forKey key: Key) throws -> UnkeyedDecodingContainer { + func nestedUnkeyedContainer(forKey _: Key) throws -> UnkeyedDecodingContainer { throw ArrowError.invalid("Nested decoding is currently not supported.") } @@ -286,7 +286,7 @@ private struct ArrowKeyedDecoding: KeyedDecodingContainerProtoco throw ArrowError.invalid("super decoding is currently not supported.") } - func superDecoder(forKey key: Key) throws -> Decoder { + func superDecoder(forKey _: Key) throws -> Decoder { throw ArrowError.invalid("super decoding is currently not supported.") } } @@ -302,73 +302,73 @@ private struct ArrowSingleValueDecoding: SingleValueDecodingContainer { func decodeNil() -> Bool { do { - return try self.decoder.isNull(self.decoder.singleRBCol) + return try decoder.isNull(decoder.singleRBCol) } catch { return false } } - func decode(_ type: Bool.Type) throws -> Bool { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Bool.Type) throws -> Bool { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: String.Type) throws -> String { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: String.Type) throws -> String { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Double.Type) throws -> Double { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Double.Type) throws -> Double { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Float.Type) throws -> Float { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Float.Type) throws -> Float { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Int.Type) throws -> Int { + func decode(_: Int.Type) throws -> Int { throw ArrowError.invalid( "Int type is not supported (please use Int8, Int16, Int32 or Int64)") } - func decode(_ type: Int8.Type) throws -> Int8 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Int8.Type) throws -> Int8 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Int16.Type) throws -> Int16 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Int16.Type) throws -> Int16 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Int32.Type) throws -> Int32 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Int32.Type) throws -> Int32 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: Int64.Type) throws -> Int64 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: Int64.Type) throws -> Int64 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: UInt.Type) throws -> UInt { + func decode(_: UInt.Type) throws -> UInt { throw ArrowError.invalid( "UInt type is not supported (please use UInt8, UInt16, UInt32 or UInt64)") } - func decode(_ type: UInt8.Type) throws -> UInt8 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: UInt8.Type) throws -> UInt8 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: UInt16.Type) throws -> UInt16 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: UInt16.Type) throws -> UInt16 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: UInt32.Type) throws -> UInt32 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: UInt32.Type) throws -> UInt32 { + return try decoder.doDecode(decoder.singleRBCol)! } - func decode(_ type: UInt64.Type) throws -> UInt64 { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + func decode(_: UInt64.Type) throws -> UInt64 { + return try decoder.doDecode(decoder.singleRBCol)! } func decode(_ type: T.Type) throws -> T where T: Decodable { if ArrowArrayBuilders.isValidBuilderType(type) || type == Date.self { - return try self.decoder.doDecode(self.decoder.singleRBCol)! + return try decoder.doDecode(decoder.singleRBCol)! } else { throw ArrowError.invalid("Type \(type) is currently not supported") } diff --git a/Sources/Arrow/ArrowEncoder.swift b/Sources/Arrow/ArrowEncoder.swift index 8c72c04..f0efae4 100644 --- a/Sources/Arrow/ArrowEncoder.swift +++ b/Sources/Arrow/ArrowEncoder.swift @@ -60,7 +60,7 @@ public class ArrowEncoder: Encoder { // this will check if T is a simple built in type // (UInt, Int, Int8, String, Date, etc...). if ArrowArrayBuilders.isValidBuilderType(T.self) { - let builders = ["col0": try ArrowArrayBuilders.loadBuilder(T.self)] + let builders = try ["col0": ArrowArrayBuilders.loadBuilder(T.self)] return ArrowEncoder(builders, byIndex: ["col0"]) } else { let encoder = ArrowEncoder() @@ -76,25 +76,25 @@ public class ArrowEncoder: Encoder { try throwIfInvalid() let batchBuilder = RecordBatch.Builder() for key in byIndex { - batchBuilder.addColumn(key, arrowArray: try builders[key]!.toHolder()) + try batchBuilder.addColumn(key, arrowArray: builders[key]!.toHolder()) } switch batchBuilder.finish() { - case .success(let rb): + case let .success(rb): return rb - case .failure(let error): + case let .failure(error): throw error } } - public func container(keyedBy type: Key.Type) -> KeyedEncodingContainer where Key: CodingKey { + public func container(keyedBy _: Key.Type) -> KeyedEncodingContainer where Key: CodingKey { var container = ArrowKeyedEncoding(self) container.codingPath = codingPath return KeyedEncodingContainer(container) } public func unkeyedContainer() -> UnkeyedEncodingContainer { - return ArrowUnkeyedEncoding(self, codingPath: self.codingPath) + return ArrowUnkeyedEncoding(self, codingPath: codingPath) } public func singleValueContainer() -> SingleValueEncodingContainer { @@ -115,7 +115,7 @@ public class ArrowEncoder: Encoder { // to limitations in the Swifts Mirror API (ex: it is unable to correctly // find the type for String? in [Int: String?]) @discardableResult - func ensureColumnExists(_ value: T, key: String) throws -> ArrowArrayHolderBuilder { + func ensureColumnExists(_: T, key: String) throws -> ArrowArrayHolderBuilder { try throwIfInvalid() var builder = builders[key] if builder == nil { @@ -128,12 +128,12 @@ public class ArrowEncoder: Encoder { } func getIndex(_ index: Int) -> Int { - return self.modForIndex == nil ? index : index % self.modForIndex! + return modForIndex == nil ? index : index % modForIndex! } func doEncodeNil(_ keyIndex: Int) throws { try throwIfInvalid() - let index = self.getIndex(keyIndex) + let index = getIndex(keyIndex) if index >= builders.count { throw ArrowError.outOfBounds(index: Int64(index)) } @@ -149,7 +149,7 @@ public class ArrowEncoder: Encoder { func doEncode(_ value: T, keyIndex: Int) throws { try throwIfInvalid() - let index = self.getIndex(keyIndex) + let index = getIndex(keyIndex) if index >= builders.count { if index == builders.count { try ensureColumnExists(value, key: "col\(index)") @@ -162,7 +162,7 @@ public class ArrowEncoder: Encoder { } func throwIfInvalid() throws { - if let errorMsg = self.errorMsg { + if let errorMsg = errorMsg { throw ArrowError.invalid(errorMsg) } } @@ -229,12 +229,12 @@ private struct ArrowKeyedEncoding: KeyedEncodingContainerProtoco try doEncodeIf(value, forKey: key) } - mutating func encode(_ value: Int, forKey key: Key) throws { + mutating func encode(_: Int, forKey _: Key) throws { throw ArrowError.invalid( "Int type is not supported (please use Int8, Int16, Int32 or Int64)") } - mutating func encodeIfPresent(_ value: Int?, forKey key: Key) throws { + mutating func encodeIfPresent(_: Int?, forKey _: Key) throws { throw ArrowError.invalid( "Int type is not supported (please use Int8, Int16, Int32 or Int64)") } @@ -271,12 +271,12 @@ private struct ArrowKeyedEncoding: KeyedEncodingContainerProtoco try doEncodeIf(value, forKey: key) } - mutating func encode(_ value: UInt, forKey key: Key) throws { + mutating func encode(_: UInt, forKey _: Key) throws { throw ArrowError.invalid( "UInt type is not supported (please use UInt8, UInt16, UInt32 or UInt64)") } - mutating func encodeIfPresent(_ value: UInt?, forKey key: Key) throws { + mutating func encodeIfPresent(_: UInt?, forKey _: Key) throws { throw ArrowError.invalid( "UInt type is not supported (please use UInt8, UInt16, UInt32 or UInt64)") } @@ -333,10 +333,11 @@ private struct ArrowKeyedEncoding: KeyedEncodingContainerProtoco // so setting an error mesg that will be throw by the encoder at the next // method call that throws mutating func nestedContainer( - keyedBy keyType: NestedKey.Type, - forKey key: Key) -> KeyedEncodingContainer { - self.encoder.errorMsg = "Nested decoding is currently not supported." - var container = ArrowKeyedEncoding(self.encoder) + keyedBy _: NestedKey.Type, + forKey _: Key + ) -> KeyedEncodingContainer { + encoder.errorMsg = "Nested decoding is currently not supported." + var container = ArrowKeyedEncoding(encoder) container.codingPath = codingPath return KeyedEncodingContainer(container) } @@ -344,25 +345,25 @@ private struct ArrowKeyedEncoding: KeyedEncodingContainerProtoco // nested container is currently not allowed. This method doesn't throw // so setting an error mesg that will be throw by the encoder at the next // method call that throws - mutating func nestedUnkeyedContainer(forKey key: Key) -> UnkeyedEncodingContainer { - self.encoder.errorMsg = "Nested decoding is currently not supported." - return ArrowUnkeyedEncoding(self.encoder, codingPath: self.codingPath) + mutating func nestedUnkeyedContainer(forKey _: Key) -> UnkeyedEncodingContainer { + encoder.errorMsg = "Nested decoding is currently not supported." + return ArrowUnkeyedEncoding(encoder, codingPath: codingPath) } // super encoding is currently not allowed. This method doesn't throw // so setting an error mesg that will be throw by the encoder at the next // method call that throws mutating func superEncoder() -> Encoder { - self.encoder.errorMsg = "super encoding is currently not supported." - return self.encoder + encoder.errorMsg = "super encoding is currently not supported." + return encoder } // super encoding is currently not allowed. This method doesn't throw // so setting an error mesg that will be throw by the encoder at the next // method call that throws - mutating func superEncoder(forKey key: Key) -> Encoder { - self.encoder.errorMsg = "super encoding is currently not supported." - return self.encoder + mutating func superEncoder(forKey _: Key) -> Encoder { + encoder.errorMsg = "super encoding is currently not supported." + return encoder } } @@ -372,13 +373,13 @@ private struct ArrowUnkeyedEncoding: UnkeyedEncodingContainer { var currentIndex: Int var count: Int = 0 - init(_ encoder: ArrowEncoder, codingPath: [CodingKey], currentIndex: Int = 0) { + init(_ encoder: ArrowEncoder, codingPath _: [CodingKey], currentIndex: Int = 0) { self.encoder = encoder self.currentIndex = currentIndex } mutating func increment() { - self.currentIndex += 1 + currentIndex += 1 } // If this method is called on row 0 and the encoder is @@ -391,14 +392,14 @@ private struct ArrowUnkeyedEncoding: UnkeyedEncodingContainer { // nullable types at the encode func level which is currently // not allowed) mutating func encodeNil() throws { - try encoder.doEncodeNil(self.currentIndex) + try encoder.doEncodeNil(currentIndex) } mutating func encode(_ value: T) throws where T: Encodable { let type = T.self if ArrowArrayBuilders.isValidBuilderType(type) { - defer {increment()} - return try self.encoder.doEncode(value, keyIndex: self.currentIndex) + defer { increment() } + return try encoder.doEncode(value, keyIndex: currentIndex) } else { throw ArrowError.invalid("Type \(type) is currently not supported") } @@ -407,10 +408,10 @@ private struct ArrowUnkeyedEncoding: UnkeyedEncodingContainer { // nested container is currently not allowed. This method doesn't throw // so setting an error mesg that will be throw by the encoder at the next // method call that throws - mutating func nestedContainer(keyedBy keyType: NestedKey.Type + mutating func nestedContainer(keyedBy _: NestedKey.Type ) -> KeyedEncodingContainer where NestedKey: CodingKey { - self.encoder.errorMsg = "Nested decoding is currently not supported." - var container = ArrowKeyedEncoding(self.encoder) + encoder.errorMsg = "Nested decoding is currently not supported." + var container = ArrowKeyedEncoding(encoder) container.codingPath = codingPath return KeyedEncodingContainer(container) } @@ -419,16 +420,16 @@ private struct ArrowUnkeyedEncoding: UnkeyedEncodingContainer { // so setting an error mesg that will be throw by the encoder at the next // method call that throws mutating func nestedUnkeyedContainer() -> UnkeyedEncodingContainer { - self.encoder.errorMsg = "Nested decoding is currently not supported." - return ArrowUnkeyedEncoding(self.encoder, codingPath: self.codingPath) + encoder.errorMsg = "Nested decoding is currently not supported." + return ArrowUnkeyedEncoding(encoder, codingPath: codingPath) } // super encoding is currently not allowed. This method doesn't throw // so setting an error mesg that will be throw by the encoder at the next // method call that throws mutating func superEncoder() -> Encoder { - self.encoder.errorMsg = "super encoding is currently not supported." - return self.encoder + encoder.errorMsg = "super encoding is currently not supported." + return encoder } } @@ -442,15 +443,16 @@ private struct ArrowSingleValueEncoding: SingleValueEncodingContainer { } mutating func encodeNil() throws { - return try self.encoder.doEncodeNil(0) + return try encoder.doEncodeNil(0) } mutating func encode(_ value: T) throws { if ArrowArrayBuilders.isValidBuilderType(T.self) { - return try self.encoder.doEncode(value, keyIndex: 0) + return try encoder.doEncode(value, keyIndex: 0) } else { throw ArrowError.invalid("Type \(T.self) is currently not supported") } } } + // swiftlint:disable:this file_length diff --git a/Sources/Arrow/ArrowReader.swift b/Sources/Arrow/ArrowReader.swift index bdf45d4..725fed8 100644 --- a/Sources/Arrow/ArrowReader.swift +++ b/Sources/Arrow/ArrowReader.swift @@ -19,7 +19,7 @@ import FlatBuffers import Foundation let FILEMARKER = "ARROW1" -let CONTINUATIONMARKER = UInt32(0xFFFFFFFF) +let CONTINUATIONMARKER = UInt32(0xFFFF_FFFF) public class ArrowReader { // swiftlint:disable:this type_body_length private class RecordBatchData { @@ -35,25 +35,25 @@ public class ArrowReader { // swiftlint:disable:this type_body_length } func nextNode() -> org_apache_arrow_flatbuf_FieldNode? { - if nodeIndex >= self.recordBatch.nodesCount {return nil} - defer {nodeIndex += 1} - return self.recordBatch.nodes(at: nodeIndex) + if nodeIndex >= recordBatch.nodesCount { return nil } + defer { nodeIndex += 1 } + return recordBatch.nodes(at: nodeIndex) } func nextBuffer() -> org_apache_arrow_flatbuf_Buffer? { - if bufferIndex >= self.recordBatch.buffersCount {return nil} - defer {bufferIndex += 1} - return self.recordBatch.buffers(at: bufferIndex) + if bufferIndex >= recordBatch.buffersCount { return nil } + defer { bufferIndex += 1 } + return recordBatch.buffers(at: bufferIndex) } func nextField() -> org_apache_arrow_flatbuf_Field? { - if fieldIndex >= self.schema.fieldsCount {return nil} - defer {fieldIndex += 1} - return self.schema.fields(at: fieldIndex) + if fieldIndex >= schema.fieldsCount { return nil } + defer { fieldIndex += 1 } + return schema.fields(at: fieldIndex) } func isDone() -> Bool { - return nodeIndex >= self.recordBatch.nodesCount + return nodeIndex >= recordBatch.nodesCount } } @@ -88,7 +88,7 @@ public class ArrowReader { // swiftlint:disable:this type_body_length private func loadStructData(_ loadInfo: DataLoadInfo, field: org_apache_arrow_flatbuf_Field) - -> Result { + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -101,12 +101,12 @@ public class ArrowReader { // swiftlint:disable:this type_body_length let arrowNullBuffer = makeBuffer(nullBuffer, fileData: loadInfo.fileData, length: nullLength, messageOffset: loadInfo.messageOffset) var children = [ArrowData]() - for index in 0.. Result { + field: org_apache_arrow_flatbuf_Field + ) + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -144,8 +145,9 @@ public class ArrowReader { // swiftlint:disable:this type_body_length private func loadVariableData( _ loadInfo: DataLoadInfo, - field: org_apache_arrow_flatbuf_Field) - -> Result { + field: org_apache_arrow_flatbuf_Field + ) + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -176,8 +178,9 @@ public class ArrowReader { // swiftlint:disable:this type_body_length private func loadField( _ loadInfo: DataLoadInfo, - field: org_apache_arrow_flatbuf_Field) - -> Result { + field: org_apache_arrow_flatbuf_Field + ) + -> Result { if isNestedType(field.typeType) { return loadStructData(loadInfo, field: field) } else if isFixedPrimitive(field.typeType) { @@ -206,9 +209,9 @@ public class ArrowReader { // swiftlint:disable:this type_body_length let result = loadField(loadInfo, field: field) switch result { - case .success(let holder): + case let .success(holder): columns.append(holder) - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -223,10 +226,10 @@ public class ArrowReader { // swiftlint:disable:this type_body_length */ public func readStreaming( // swiftlint:disable:this function_body_length _ input: Data, - useUnalignedBuffers: Bool = false + useUnalignedBuffers _: Bool = false ) -> Result { let result = ArrowReaderResult() - var offset: Int = 0 + var offset = 0 var length = getUInt32(input, offset: offset) var streamData = input var schemaMessage: org_apache_arrow_flatbuf_Schema? @@ -243,7 +246,8 @@ public class ArrowReader { // swiftlint:disable:this type_body_length streamData = input[offset...] let dataBuffer = ByteBuffer( data: streamData, - allowReadingUnalignedBuffers: true) + allowReadingUnalignedBuffers: true + ) let message = org_apache_arrow_flatbuf_Message.getRootAsMessage(bb: dataBuffer) switch message.headerType { case .recordbatch: @@ -253,11 +257,12 @@ public class ArrowReader { // swiftlint:disable:this type_body_length schema: schemaMessage!, arrowSchema: result.schema!, data: input, - messageEndOffset: (Int64(offset) + Int64(length))) + messageEndOffset: Int64(offset) + Int64(length) + ) switch recordBatchResult { - case .success(let recordBatch): + case let .success(recordBatch): result.batches.append(recordBatch) - case .failure(let error): + case let .failure(error): return .failure(error) } offset += Int(message.bodyLength + Int64(length)) @@ -266,9 +271,9 @@ public class ArrowReader { // swiftlint:disable:this type_body_length schemaMessage = message.header(type: org_apache_arrow_flatbuf_Schema.self)! let schemaResult = loadSchema(schemaMessage!) switch schemaResult { - case .success(let schema): + case let .success(schema): result.schema = schema - case .failure(let error): + case let .failure(error): return .failure(error) } offset += Int(message.bodyLength + Int64(length)) @@ -298,13 +303,14 @@ public class ArrowReader { // swiftlint:disable:this type_body_length let footerData = fileData[footerStartOffset...] let footerBuffer = ByteBuffer( data: footerData, - allowReadingUnalignedBuffers: useUnalignedBuffers) + allowReadingUnalignedBuffers: useUnalignedBuffers + ) let footer = org_apache_arrow_flatbuf_Footer.getRootAsFooter(bb: footerBuffer) let schemaResult = loadSchema(footer.schema!) switch schemaResult { - case .success(let schema): + case let .success(schema): result.schema = schema - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -320,7 +326,8 @@ public class ArrowReader { // swiftlint:disable:this type_body_length messageLength = fileData.withUnsafeBytes { rawBuffer in rawBuffer.loadUnaligned( fromByteOffset: Int(recordBatch.offset + Int64(MemoryLayout.size)), - as: UInt32.self) + as: UInt32.self + ) } } @@ -329,7 +336,8 @@ public class ArrowReader { // swiftlint:disable:this type_body_length let recordBatchData = fileData[messageStartOffset ..< messageEndOffset] let mbb = ByteBuffer( data: recordBatchData, - allowReadingUnalignedBuffers: useUnalignedBuffers) + allowReadingUnalignedBuffers: useUnalignedBuffers + ) let message = org_apache_arrow_flatbuf_Message.getRootAsMessage(bb: mbb) switch message.headerType { case .recordbatch: @@ -339,11 +347,12 @@ public class ArrowReader { // swiftlint:disable:this type_body_length schema: footer.schema!, arrowSchema: result.schema!, data: fileData, - messageEndOffset: messageEndOffset) + messageEndOffset: messageEndOffset + ) switch recordBatchResult { - case .success(let recordBatch): + case let .success(recordBatch): result.batches.append(recordBatch) - case .failure(let error): + case let .failure(error): return .failure(error) } default: @@ -363,14 +372,14 @@ public class ArrowReader { // swiftlint:disable:this type_body_length let markerLength = FILEMARKER.utf8.count let footerLengthEnd = Int(fileData.count - markerLength) - let data = fileData[..<(footerLengthEnd)] + let data = fileData[.. ArrowReaderResult { + public static func makeArrowReaderResult() -> ArrowReaderResult { return ArrowReaderResult() } @@ -382,35 +391,37 @@ public class ArrowReader { // swiftlint:disable:this type_body_length ) -> Result { let mbb = ByteBuffer( data: dataHeader, - allowReadingUnalignedBuffers: useUnalignedBuffers) + allowReadingUnalignedBuffers: useUnalignedBuffers + ) let message = org_apache_arrow_flatbuf_Message.getRootAsMessage(bb: mbb) switch message.headerType { case .schema: let sMessage = message.header(type: org_apache_arrow_flatbuf_Schema.self)! switch loadSchema(sMessage) { - case .success(let schema): + case let .success(schema): result.schema = schema result.messageSchema = sMessage return .success(()) - case .failure(let error): + case let .failure(error): return .failure(error) } case .recordbatch: let rbMessage = message.header(type: org_apache_arrow_flatbuf_RecordBatch.self)! let recordBatchResult = loadRecordBatch( rbMessage, schema: result.messageSchema!, arrowSchema: result.schema!, - data: dataBody, messageEndOffset: 0) + data: dataBody, messageEndOffset: 0 + ) switch recordBatchResult { - case .success(let recordBatch): + case let .success(recordBatch): result.batches.append(recordBatch) return .success(()) - case .failure(let error): + case let .failure(error): return .failure(error) } default: return .failure(.unknownError("Unhandled header type: \(message.headerType)")) } } - } + // swiftlint:disable:this file_length diff --git a/Sources/Arrow/ArrowReaderHelper.swift b/Sources/Arrow/ArrowReaderHelper.swift index 37f4680..495367c 100644 --- a/Sources/Arrow/ArrowReaderHelper.swift +++ b/Sources/Arrow/ArrowReaderHelper.swift @@ -23,7 +23,7 @@ private func makeBinaryHolder(_ buffers: [ArrowBuffer], do { let arrowType = ArrowType(ArrowType.ArrowBinary) let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try BinaryArray(arrowData))) + return try .success(ArrowArrayHolderImpl(BinaryArray(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -36,7 +36,7 @@ private func makeStringHolder(_ buffers: [ArrowBuffer], do { let arrowType = ArrowType(ArrowType.ArrowString) let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try StringArray(arrowData))) + return try .success(ArrowArrayHolderImpl(StringArray(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -46,16 +46,15 @@ private func makeStringHolder(_ buffers: [ArrowBuffer], private func makeDateHolder(_ field: ArrowField, buffers: [ArrowBuffer], - nullCount: UInt -) -> Result { + nullCount: UInt) -> Result { do { if field.type.id == .date32 { let arrowData = try ArrowData(field.type, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try Date32Array(arrowData))) + return try .success(ArrowArrayHolderImpl(Date32Array(arrowData))) } let arrowData = try ArrowData(field.type, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try Date64Array(arrowData))) + return try .success(ArrowArrayHolderImpl(Date64Array(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -65,13 +64,12 @@ private func makeDateHolder(_ field: ArrowField, private func makeTimeHolder(_ field: ArrowField, buffers: [ArrowBuffer], - nullCount: UInt -) -> Result { + nullCount: UInt) -> Result { do { if field.type.id == .time32 { if let arrowType = field.type as? ArrowTypeTime32 { let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try FixedArray(arrowData))) + return try .success(ArrowArrayHolderImpl(FixedArray(arrowData))) } else { return .failure(.invalid("Incorrect field type for time: \(field.type)")) } @@ -79,7 +77,7 @@ private func makeTimeHolder(_ field: ArrowField, if let arrowType = field.type as? ArrowTypeTime64 { let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try FixedArray(arrowData))) + return try .success(ArrowArrayHolderImpl(FixedArray(arrowData))) } else { return .failure(.invalid("Incorrect field type for time: \(field.type)")) } @@ -92,12 +90,11 @@ private func makeTimeHolder(_ field: ArrowField, private func makeTimestampHolder(_ field: ArrowField, buffers: [ArrowBuffer], - nullCount: UInt -) -> Result { + nullCount: UInt) -> Result { do { if let arrowType = field.type as? ArrowTypeTimestamp { let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try TimestampArray(arrowData))) + return try .success(ArrowArrayHolderImpl(TimestampArray(arrowData))) } else { return .failure(.invalid("Incorrect field type for timestamp: \(field.type)")) } @@ -113,7 +110,7 @@ private func makeBoolHolder(_ buffers: [ArrowBuffer], do { let arrowType = ArrowType(ArrowType.ArrowBool) let arrowData = try ArrowData(arrowType, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try BoolArray(arrowData))) + return try .success(ArrowArrayHolderImpl(BoolArray(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -127,7 +124,7 @@ private func makeFixedHolder( ) -> Result { do { let arrowData = try ArrowData(field.type, buffers: buffers, nullCount: nullCount) - return .success(ArrowArrayHolderImpl(try FixedArray(arrowData))) + return try .success(ArrowArrayHolderImpl(FixedArray(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -146,7 +143,7 @@ func makeStructHolder( let arrowData = try ArrowData(field.type, buffers: buffers, children: children, nullCount: nullCount, length: rbLength) - return .success(ArrowArrayHolderImpl(try StructArray(arrowData))) + return try .success(ArrowArrayHolderImpl(StructArray(arrowData))) } catch let error as ArrowError { return .failure(error) } catch { @@ -240,7 +237,8 @@ func isNestedType(_ type: org_apache_arrow_flatbuf_Type_) -> Bool { } func findArrowType( // swiftlint:disable:this cyclomatic_complexity function_body_length - _ field: org_apache_arrow_flatbuf_Field) -> ArrowType { + _ field: org_apache_arrow_flatbuf_Field +) -> ArrowType { let type = field.typeType switch type { case .int: @@ -300,7 +298,7 @@ func findArrowType( // swiftlint:disable:this cyclomatic_complexity function_bod case .struct_: _ = field.type(type: org_apache_arrow_flatbuf_Struct_.self)! var fields = [ArrowField]() - for index in 0.. ArrowField { - return self.fields[index] + return fields[index] } public func fieldIndex(_ name: String) -> Int? { - return self.fieldLookup[name] + return fieldLookup[name] } public class Builder { diff --git a/Sources/Arrow/ArrowTable.swift b/Sources/Arrow/ArrowTable.swift index dedf90f..ea8cf03 100644 --- a/Sources/Arrow/ArrowTable.swift +++ b/Sources/Arrow/ArrowTable.swift @@ -20,30 +20,30 @@ import Foundation public class ArrowColumn { public let field: ArrowField fileprivate let dataHolder: ChunkedArrayHolder - public var type: ArrowType {return self.dataHolder.type} - public var length: UInt {return self.dataHolder.length} - public var nullCount: UInt {return self.dataHolder.nullCount} + public var type: ArrowType { return dataHolder.type } + public var length: UInt { return dataHolder.length } + public var nullCount: UInt { return dataHolder.nullCount } public func data() -> ChunkedArray { - return (self.dataHolder.holder as! ChunkedArray) // swiftlint:disable:this force_cast + return (dataHolder.holder as! ChunkedArray) // swiftlint:disable:this force_cast } - public var name: String {return field.name} + public var name: String { return field.name } public init(_ field: ArrowField, chunked: ChunkedArrayHolder) { self.field = field - self.dataHolder = chunked + dataHolder = chunked } } public class ArrowTable { public let schema: ArrowSchema - public var columnCount: UInt {return UInt(self.columns.count)} + public var columnCount: UInt { return UInt(columns.count) } public let rowCount: UInt public let columns: [ArrowColumn] init(_ schema: ArrowSchema, columns: [ArrowColumn]) { self.schema = schema self.columns = columns - self.rowCount = columns[0].length + rowCount = columns[0].length } public static func from(recordBatches: [RecordBatch]) -> Result { @@ -54,7 +54,7 @@ public class ArrowTable { var holders = [[ArrowArrayHolder]]() let schema = recordBatches[0].schema for recordBatch in recordBatches { - for index in 0.. Result { do { - return .success(try holders[0].getArrowColumn(field, holders)) + return try .success(holders[0].getArrowColumn(field, holders)) } catch { return .failure(.runtimeError("\(error)")) } @@ -94,54 +94,54 @@ public class ArrowTable { @discardableResult public func addColumn(_ fieldName: String, arrowArray: ArrowArray) throws -> Builder { - return self.addColumn(fieldName, chunked: try ChunkedArray([arrowArray])) + return try addColumn(fieldName, chunked: ChunkedArray([arrowArray])) } @discardableResult public func addColumn(_ fieldName: String, chunked: ChunkedArray) -> Builder { let field = ArrowField(fieldName, type: chunked.type, isNullable: chunked.nullCount != 0) - self.schemaBuilder.addField(field) - self.columns.append(ArrowColumn(field, chunked: ChunkedArrayHolder(chunked))) + schemaBuilder.addField(field) + columns.append(ArrowColumn(field, chunked: ChunkedArrayHolder(chunked))) return self } @discardableResult public func addColumn(_ field: ArrowField, arrowArray: ArrowArray) throws -> Builder { - self.schemaBuilder.addField(field) - let holder = ChunkedArrayHolder(try ChunkedArray([arrowArray])) - self.columns.append(ArrowColumn(field, chunked: holder)) + schemaBuilder.addField(field) + let holder = try ChunkedArrayHolder(ChunkedArray([arrowArray])) + columns.append(ArrowColumn(field, chunked: holder)) return self } @discardableResult public func addColumn(_ field: ArrowField, chunked: ChunkedArray) -> Builder { - self.schemaBuilder.addField(field) - self.columns.append(ArrowColumn(field, chunked: ChunkedArrayHolder(chunked))) + schemaBuilder.addField(field) + columns.append(ArrowColumn(field, chunked: ChunkedArrayHolder(chunked))) return self } @discardableResult public func addColumn(_ column: ArrowColumn) -> Builder { - self.schemaBuilder.addField(column.field) - self.columns.append(column) + schemaBuilder.addField(column.field) + columns.append(column) return self } public func finish() -> ArrowTable { - return ArrowTable(self.schemaBuilder.finish(), columns: self.columns) + return ArrowTable(schemaBuilder.finish(), columns: columns) } } } public class RecordBatch { public let schema: ArrowSchema - public var columnCount: UInt {return UInt(self.columns.count)} + public var columnCount: UInt { return UInt(columns.count) } public let columns: [ArrowArrayHolder] public let length: UInt public init(_ schema: ArrowSchema, columns: [ArrowArrayHolder]) { self.schema = schema self.columns = columns - self.length = columns[0].length + length = columns[0].length } public class Builder { @@ -153,15 +153,15 @@ public class RecordBatch { @discardableResult public func addColumn(_ fieldName: String, arrowArray: ArrowArrayHolder) -> Builder { let field = ArrowField(fieldName, type: arrowArray.type, isNullable: arrowArray.nullCount != 0) - self.schemaBuilder.addField(field) - self.columns.append(arrowArray) + schemaBuilder.addField(field) + columns.append(arrowArray) return self } @discardableResult public func addColumn(_ field: ArrowField, arrowArray: ArrowArrayHolder) -> Builder { - self.schemaBuilder.addField(field) - self.columns.append(arrowArray) + schemaBuilder.addField(field) + columns.append(arrowArray) return self } @@ -174,7 +174,7 @@ public class RecordBatch { } } } - return .success(RecordBatch(self.schemaBuilder.finish(), columns: self.columns)) + return .success(RecordBatch(schemaBuilder.finish(), columns: columns)) } } @@ -189,12 +189,12 @@ public class RecordBatch { } public func column(_ index: Int) -> ArrowArrayHolder { - return self.columns[index] + return columns[index] } public func column(_ name: String) -> ArrowArrayHolder? { - if let index = self.schema.fieldIndex(name) { - return self.columns[index] + if let index = schema.fieldIndex(name) { + return columns[index] } return nil diff --git a/Sources/Arrow/ArrowType.swift b/Sources/Arrow/ArrowType.swift index 381078f..e9b7fb2 100644 --- a/Sources/Arrow/ArrowType.swift +++ b/Sources/Arrow/ArrowType.swift @@ -93,9 +93,9 @@ public class ArrowTypeTime32: ArrowType { super.init(ArrowType.ArrowTime32) } - public override var cDataFormatId: String { + override public var cDataFormatId: String { get throws { - switch self.unit { + switch unit { case .milliseconds: return "ttm" case .seconds: @@ -112,9 +112,9 @@ public class ArrowTypeTime64: ArrowType { super.init(ArrowType.ArrowTime64) } - public override var cDataFormatId: String { + override public var cDataFormatId: String { get throws { - switch self.unit { + switch unit { case .microseconds: return "ttu" case .nanoseconds: @@ -142,21 +142,21 @@ public class ArrowTypeTimestamp: ArrowType { super.init(ArrowType.ArrowTimestamp) } - public convenience init(type: ArrowTypeId) { + public convenience init(type _: ArrowTypeId) { self.init(.milliseconds, timezone: nil) } - public override var cDataFormatId: String { + override public var cDataFormatId: String { get throws { let unitChar: String - switch self.unit { + switch unit { case .seconds: unitChar = "s" case .milliseconds: unitChar = "m" case .microseconds: unitChar = "u" case .nanoseconds: unitChar = "n" } - if let timezone = self.timezone { + if let timezone = timezone { return "ts\(unitChar):\(timezone)" } else { return "ts\(unitChar)" @@ -201,14 +201,14 @@ public class ArrowType { } public var id: ArrowTypeId { - switch self.info { - case .primitiveInfo(let id): + switch info { + case let .primitiveInfo(id): return id - case .timeInfo(let id): + case let .timeInfo(id): return id - case .variableInfo(let id): + case let .variableInfo(id): return id - case .complexInfo(let id): + case let .complexInfo(id): return id } } @@ -221,7 +221,8 @@ public class ArrowType { } public static func infoForType( // swiftlint:disable:this cyclomatic_complexity - _ type: Any.Type) -> ArrowType.Info { + _ type: Any.Type + ) -> ArrowType.Info { if type == String.self { return ArrowType.ArrowString } else if type == Date.self { @@ -283,7 +284,7 @@ public class ArrowType { public func getStride( // swiftlint:disable:this cyclomatic_complexity ) -> Int { - switch self.id { + switch id { case .int8: return MemoryLayout.stride case .int16: @@ -329,7 +330,7 @@ public class ArrowType { public var cDataFormatId: String { get throws { - switch self.id { + switch id { case ArrowTypeId.int8: return "c" case ArrowTypeId.int16: @@ -382,40 +383,41 @@ public class ArrowType { } public static func fromCDataFormatId( // swiftlint:disable:this cyclomatic_complexity - _ from: String) throws -> ArrowType { + _ from: String + ) throws -> ArrowType { if from == "c" { return ArrowType(ArrowType.ArrowInt8) } else if from == "s" { return ArrowType(ArrowType.ArrowInt16) } else if from == "i" { return ArrowType(ArrowType.ArrowInt32) - } else if from == "l" { + } else if from == "l" { return ArrowType(ArrowType.ArrowInt64) - } else if from == "C" { + } else if from == "C" { return ArrowType(ArrowType.ArrowUInt8) - } else if from == "S" { + } else if from == "S" { return ArrowType(ArrowType.ArrowUInt16) - } else if from == "I" { + } else if from == "I" { return ArrowType(ArrowType.ArrowUInt32) - } else if from == "L" { + } else if from == "L" { return ArrowType(ArrowType.ArrowUInt64) - } else if from == "f" { + } else if from == "f" { return ArrowType(ArrowType.ArrowFloat) - } else if from == "g" { + } else if from == "g" { return ArrowType(ArrowType.ArrowDouble) - } else if from == "b" { + } else if from == "b" { return ArrowType(ArrowType.ArrowBool) - } else if from == "tdD" { + } else if from == "tdD" { return ArrowType(ArrowType.ArrowDate32) - } else if from == "tdm" { + } else if from == "tdm" { return ArrowType(ArrowType.ArrowDate64) - } else if from == "tts" { + } else if from == "tts" { return ArrowTypeTime32(.seconds) - } else if from == "ttm" { + } else if from == "ttm" { return ArrowTypeTime32(.milliseconds) - } else if from == "ttu" { + } else if from == "ttu" { return ArrowTypeTime64(.microseconds) - } else if from == "ttn" { + } else if from == "ttn" { return ArrowTypeTime64(.nanoseconds) } else if from.starts(with: "ts") { let components = from.split(separator: ":", maxSplits: 1) @@ -435,9 +437,9 @@ public class ArrowType { let timezone = components.count > 1 ? String(components[1]) : nil return ArrowTypeTimestamp(unit, timezone: timezone) - } else if from == "z" { + } else if from == "z" { return ArrowType(ArrowType.ArrowBinary) - } else if from == "u" { + } else if from == "u" { return ArrowType(ArrowType.ArrowString) } @@ -447,14 +449,14 @@ public class ArrowType { extension ArrowType.Info: Equatable { public static func == (lhs: ArrowType.Info, rhs: ArrowType.Info) -> Bool { - switch(lhs, rhs) { - case (.primitiveInfo(let lhsId), .primitiveInfo(let rhsId)): + switch (lhs, rhs) { + case let (.primitiveInfo(lhsId), .primitiveInfo(rhsId)): return lhsId == rhsId - case (.variableInfo(let lhsId), .variableInfo(let rhsId)): + case let (.variableInfo(lhsId), .variableInfo(rhsId)): return lhsId == rhsId - case (.timeInfo(let lhsId), .timeInfo(let rhsId)): + case let (.timeInfo(lhsId), .timeInfo(rhsId)): return lhsId == rhsId - case (.complexInfo(let lhsId), .complexInfo(let rhsId)): + case let (.complexInfo(lhsId), .complexInfo(rhsId)): return lhsId == rhsId default: return false @@ -471,4 +473,5 @@ func getBytesFor(_ data: T) -> Data? { return nil } } + // swiftlint:disable:this file_length diff --git a/Sources/Arrow/ArrowWriter.swift b/Sources/Arrow/ArrowWriter.swift index 3aa25b6..67c14fa 100644 --- a/Sources/Arrow/ArrowWriter.swift +++ b/Sources/Arrow/ArrowWriter.swift @@ -15,11 +15,11 @@ // specific language governing permissions and limitations // under the License. -import Foundation import FlatBuffers +import Foundation public protocol DataWriter { - var count: Int {get} + var count: Int { get } func append(_ data: Data) } @@ -30,6 +30,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length public init(_ data: Data) { self.data = data } + convenience init() { self.init(Data()) } @@ -48,8 +49,8 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length } public func append(_ data: Data) { - self.handle.write(data) - self.currentSize += data.count + handle.write(data) + currentSize += data.count } } @@ -76,9 +77,9 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length var offsets = [Offset]() for field in nestedField.fields { switch writeField(&fbb, field: field) { - case .success(let offset): + case let .success(offset): offsets.append(offset) - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -96,17 +97,17 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length } switch toFBTypeEnum(field.type) { - case .success(let type): + case let .success(type): org_apache_arrow_flatbuf_Field.add(typeType: type, &fbb) - case .failure(let error): + case let .failure(error): return .failure(error) } switch fieldTypeOffsetResult { - case .success(let offset): + case let .success(offset): org_apache_arrow_flatbuf_Field.add(type: offset, &fbb) return .success(org_apache_arrow_flatbuf_Field.endField(&fbb, start: startOffset)) - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -115,9 +116,9 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length var fieldOffsets = [Offset]() for field in schema.fields { switch writeField(&fbb, field: field) { - case .success(let offset): + case let .success(offset): fieldOffsets.append(offset) - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -128,7 +129,6 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length endianness: .little, fieldsVectorOffset: fieldsOffset) return .success(schemaOffset) - } private func writeRecordBatches( @@ -140,9 +140,9 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length for batch in batches { let startIndex = writer.count switch writeRecordBatch(batch: batch) { - case .success(let rbResult): - withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) {writer.append(Data($0))} - withUnsafeBytes(of: rbResult.1.o.littleEndian) {writer.append(Data($0))} + case let .success(rbResult): + withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) { writer.append(Data($0)) } + withUnsafeBytes(of: rbResult.1.o.littleEndian) { writer.append(Data($0)) } writer.append(rbResult.0) switch writeRecordBatchData(&writer, fields: batch.schema.fields, columns: batch.columns) { case .success: @@ -150,10 +150,10 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length org_apache_arrow_flatbuf_Block(offset: Int64(startIndex), metaDataLength: Int32(0), bodyLength: Int64(rbResult.1.o))) - case .failure(let error): + case let .failure(error): return .failure(error) } - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -238,8 +238,9 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length private func writeRecordBatchData( _ writer: inout DataWriter, fields: [ArrowField], - columns: [ArrowArrayHolder]) - -> Result { + columns: [ArrowArrayHolder] + ) + -> Result { for index in 0 ..< fields.count { let column = columns[index] let colBufferData = column.getBufferData() @@ -254,7 +255,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length switch writeRecordBatchData(&writer, fields: nestedType.fields, columns: structArray.arrowFields!) { case .success: continue - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -265,11 +266,10 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length } private func writeFooter(schema: ArrowSchema, - rbBlocks: [org_apache_arrow_flatbuf_Block] - ) -> Result { - var fbb: FlatBufferBuilder = FlatBufferBuilder() + rbBlocks: [org_apache_arrow_flatbuf_Block]) -> Result { + var fbb = FlatBufferBuilder() switch writeSchema(&fbb, schema: schema) { - case .success(let schemaOffset): + case let .success(schemaOffset): fbb.startVector(rbBlocks.count, elementSize: MemoryLayout.size) for blkInfo in rbBlocks.reversed() { fbb.create(struct: blkInfo) @@ -282,25 +282,25 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length let footerOffset = org_apache_arrow_flatbuf_Footer.endFooter(&fbb, start: footerStartOffset) fbb.finish(offset: footerOffset) return .success(fbb.data) - case .failure(let error): + case let .failure(error): return .failure(error) } } private func writeFile(_ writer: inout DataWriter, info: ArrowWriter.Info) -> Result { - var fbb: FlatBufferBuilder = FlatBufferBuilder() + var fbb = FlatBufferBuilder() switch writeSchema(&fbb, schema: info.schema) { - case .success(let schemaOffset): + case let .success(schemaOffset): fbb.finish(offset: schemaOffset) writer.append(fbb.data) - case .failure(let error): + case let .failure(error): return .failure(error) } switch writeRecordBatches(&writer, batches: info.batches) { - case .success(let rbBlocks): + case let .success(rbBlocks): switch writeFooter(schema: info.schema, rbBlocks: rbBlocks) { - case .success(let footerData): + case let .success(footerData): fbb.finish(offset: Offset(offset: fbb.buffer.size)) let footerOffset = writer.count writer.append(footerData) @@ -309,10 +309,10 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length withUnsafeBytes(of: Int32(0).littleEndian) { writer.append(Data($0)) } let footerDiff = (UInt32(writer.count) - UInt32(footerOffset)) withUnsafeBytes(of: footerDiff.littleEndian) { writer.append(Data($0)) } - case .failure(let error): + case let .failure(error): return .failure(error) } - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -322,28 +322,28 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length public func writeStreaming(_ info: ArrowWriter.Info) -> Result { let writer: any DataWriter = InMemDataWriter() switch toMessage(info.schema) { - case .success(let schemaData): - withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) {writer.append(Data($0))} - withUnsafeBytes(of: UInt32(schemaData.count).littleEndian) {writer.append(Data($0))} + case let .success(schemaData): + withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) { writer.append(Data($0)) } + withUnsafeBytes(of: UInt32(schemaData.count).littleEndian) { writer.append(Data($0)) } writer.append(schemaData) - case .failure(let error): + case let .failure(error): return .failure(error) } for batch in info.batches { switch toMessage(batch) { - case .success(let batchData): - withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) {writer.append(Data($0))} - withUnsafeBytes(of: UInt32(batchData[0].count).littleEndian) {writer.append(Data($0))} + case let .success(batchData): + withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) { writer.append(Data($0)) } + withUnsafeBytes(of: UInt32(batchData[0].count).littleEndian) { writer.append(Data($0)) } writer.append(batchData[0]) writer.append(batchData[1]) - case .failure(let error): + case let .failure(error): return .failure(error) } } - withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) {writer.append(Data($0))} - withUnsafeBytes(of: UInt32(0).littleEndian) {writer.append(Data($0))} + withUnsafeBytes(of: CONTINUATIONMARKER.littleEndian) { writer.append(Data($0)) } + withUnsafeBytes(of: UInt32(0).littleEndian) { writer.append(Data($0)) } if let memWriter = writer as? InMemDataWriter { return .success(memWriter.data) } else { @@ -360,7 +360,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length } else { return .failure(.invalid("Unable to cast writer")) } - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -383,7 +383,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length switch writeFile(&writer, info: info) { case .success: writer.append(FILEMARKER.data(using: .utf8)!) - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -393,7 +393,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length public func toMessage(_ batch: RecordBatch) -> Result<[Data], ArrowError> { var writer: any DataWriter = InMemDataWriter() switch writeRecordBatch(batch: batch) { - case .success(let message): + case let .success(message): writer.append(message.0) addPadForAlignment(&writer) var dataWriter: any DataWriter = InMemDataWriter() @@ -401,12 +401,12 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length case .success: return .success([ (writer as! InMemDataWriter).data, // swiftlint:disable:this force_cast - (dataWriter as! InMemDataWriter).data // swiftlint:disable:this force_cast + (dataWriter as! InMemDataWriter).data, // swiftlint:disable:this force_cast ]) - case .failure(let error): + case let .failure(error): return .failure(error) } - case .failure(let error): + case let .failure(error): return .failure(error) } } @@ -415,9 +415,9 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length var schemaSize: Int32 = 0 var fbb = FlatBufferBuilder() switch writeSchema(&fbb, schema: schema) { - case .success(let schemaOffset): + case let .success(schemaOffset): schemaSize = Int32(schemaOffset.o) - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -431,4 +431,5 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length return .success(fbb.data) } } + // swiftlint:disable:this file_length diff --git a/Sources/Arrow/ArrowWriterHelper.swift b/Sources/Arrow/ArrowWriterHelper.swift index 7ecb3ab..eb2a377 100644 --- a/Sources/Arrow/ArrowWriterHelper.swift +++ b/Sources/Arrow/ArrowWriterHelper.swift @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -import Foundation import FlatBuffers +import Foundation extension Data { func hexEncodedString() -> String { @@ -58,29 +58,36 @@ func toFBType( // swiftlint:disable:this cyclomatic_complexity function_body_len switch arrowType.id { case .int8, .uint8: return .success(org_apache_arrow_flatbuf_Int.createInt( - &fbb, bitWidth: 8, isSigned: infoType == ArrowType.ArrowInt8)) + &fbb, bitWidth: 8, isSigned: infoType == ArrowType.ArrowInt8 + )) case .int16, .uint16: return .success(org_apache_arrow_flatbuf_Int.createInt( - &fbb, bitWidth: 16, isSigned: infoType == ArrowType.ArrowInt16)) + &fbb, bitWidth: 16, isSigned: infoType == ArrowType.ArrowInt16 + )) case .int32, .uint32: return .success(org_apache_arrow_flatbuf_Int.createInt( - &fbb, bitWidth: 32, isSigned: infoType == ArrowType.ArrowInt32)) + &fbb, bitWidth: 32, isSigned: infoType == ArrowType.ArrowInt32 + )) case .int64, .uint64: return .success(org_apache_arrow_flatbuf_Int.createInt( - &fbb, bitWidth: 64, isSigned: infoType == ArrowType.ArrowInt64)) + &fbb, bitWidth: 64, isSigned: infoType == ArrowType.ArrowInt64 + )) case .float: return .success(org_apache_arrow_flatbuf_FloatingPoint.createFloatingPoint(&fbb, precision: .single)) case .double: return .success(org_apache_arrow_flatbuf_FloatingPoint.createFloatingPoint(&fbb, precision: .double)) case .string: return .success(org_apache_arrow_flatbuf_Utf8.endUtf8( - &fbb, start: org_apache_arrow_flatbuf_Utf8.startUtf8(&fbb))) + &fbb, start: org_apache_arrow_flatbuf_Utf8.startUtf8(&fbb) + )) case .binary: return .success(org_apache_arrow_flatbuf_Binary.endBinary( - &fbb, start: org_apache_arrow_flatbuf_Binary.startBinary(&fbb))) + &fbb, start: org_apache_arrow_flatbuf_Binary.startBinary(&fbb) + )) case .boolean: return .success(org_apache_arrow_flatbuf_Bool.endBool( - &fbb, start: org_apache_arrow_flatbuf_Bool.startBool(&fbb))) + &fbb, start: org_apache_arrow_flatbuf_Bool.startBool(&fbb) + )) case .date32: let startOffset = org_apache_arrow_flatbuf_Date.startDate(&fbb) org_apache_arrow_flatbuf_Date.add(unit: .day, &fbb) diff --git a/Sources/Arrow/BitUtility.swift b/Sources/Arrow/BitUtility.swift index 84edf98..4b91cf9 100644 --- a/Sources/Arrow/BitUtility.swift +++ b/Sources/Arrow/BitUtility.swift @@ -34,7 +34,7 @@ class BitUtility { static func clearBit(_ bit: UInt, buffer: ArrowBuffer) { let byteIndex = UInt(bit / 8) var theByte = buffer.rawPointer.load(fromByteOffset: Int(byteIndex), as: UInt8.self) - theByte &= ~(UInt8(1 << (bit % 8))) + theByte &= ~UInt8(1 << (bit % 8)) buffer.rawPointer.storeBytes(of: theByte, toByteOffset: Int(byteIndex), as: UInt8.self) } } diff --git a/Sources/Arrow/ChunkedArray.swift b/Sources/Arrow/ChunkedArray.swift index fb5734f..9b889dc 100644 --- a/Sources/Arrow/ChunkedArray.swift +++ b/Sources/Arrow/ChunkedArray.swift @@ -18,9 +18,9 @@ import Foundation public protocol AnyArray { - var arrowData: ArrowData {get} + var arrowData: ArrowData { get } func asAny(_ index: UInt) -> Any? - var length: UInt {get} + var length: UInt { get } } public protocol AsString { @@ -36,19 +36,19 @@ public class ChunkedArrayHolder { public let getBufferData: () -> Result<[Data], ArrowError> public let getBufferDataSizes: () -> Result<[Int], ArrowError> public init(_ chunked: ChunkedArray) { // swiftlint:disable:this cyclomatic_complexity - self.holder = chunked - self.length = chunked.length - self.type = chunked.type - self.nullCount = chunked.nullCount - self.getBufferData = {() -> Result<[Data], ArrowError> in + holder = chunked + length = chunked.length + type = chunked.type + nullCount = chunked.nullCount + getBufferData = { () -> Result<[Data], ArrowError> in var bufferData = [Data]() var numBuffers = 2 switch toFBTypeEnum(chunked.type) { - case .success(let fbType): + case let .success(fbType): if !isFixedPrimitive(fbType) { numBuffers = 3 } - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -65,16 +65,16 @@ public class ChunkedArrayHolder { return .success(bufferData) } - self.getBufferDataSizes = {() -> Result<[Int], ArrowError> in + getBufferDataSizes = { () -> Result<[Int], ArrowError> in var bufferDataSizes = [Int]() var numBuffers = 2 switch toFBTypeEnum(chunked.type) { - case .success(let fbType): + case let .success(fbType): if !isFixedPrimitive(fbType) { numBuffers = 3 } - case .failure(let error): + case let .failure(error): return .failure(error) } @@ -98,14 +98,14 @@ public class ChunkedArray: AsString { public let type: ArrowType public let nullCount: UInt public let length: UInt - public var arrayCount: UInt {return UInt(self.arrays.count)} + public var arrayCount: UInt { return UInt(arrays.count) } public init(_ arrays: [ArrowArray]) throws { if arrays.count == 0 { throw ArrowError.arrayHasNoElements } - self.type = arrays[0].arrowData.type + type = arrays[0].arrowData.type var len: UInt = 0 var nullCount: UInt = 0 for array in arrays { @@ -114,7 +114,7 @@ public class ChunkedArray: AsString { } self.arrays = arrays - self.length = len + length = len self.nullCount = nullCount } diff --git a/Sources/Arrow/MemoryAllocator.swift b/Sources/Arrow/MemoryAllocator.swift index 0f6e54e..d759728 100644 --- a/Sources/Arrow/MemoryAllocator.swift +++ b/Sources/Arrow/MemoryAllocator.swift @@ -26,6 +26,7 @@ public class MemoryAllocator { func allocateArray(_ byteCount: Int) -> UnsafeMutableRawPointer { return UnsafeMutableRawPointer.allocate( byteCount: byteCount, - alignment: self.alignment) + alignment: alignment + ) } } diff --git a/Sources/Arrow/ProtoUtil.swift b/Sources/Arrow/ProtoUtil.swift index e91580e..7031d56 100644 --- a/Sources/Arrow/ProtoUtil.swift +++ b/Sources/Arrow/ProtoUtil.swift @@ -82,7 +82,7 @@ func fromProto( // swiftlint:disable:this cyclomatic_complexity function_body_le arrowType = ArrowTypeTimestamp(arrowUnit, timezone: timezone?.isEmpty == true ? nil : timezone) case .struct_: var children = [ArrowField]() - for index in 0.. Void - ) -> BidirectionalStreamingCall - - func listFlights( - _ request: Arrow_Flight_Protocol_Criteria, - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_FlightInfo) -> Void - ) -> ServerStreamingCall - - func getFlightInfo( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? - ) -> UnaryCall - - func getSchema( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? - ) -> UnaryCall - - func doGet( - _ request: Arrow_Flight_Protocol_Ticket, - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void - ) -> ServerStreamingCall - - func doPut( - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_PutResult) -> Void - ) -> BidirectionalStreamingCall - - func doExchange( - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void - ) -> BidirectionalStreamingCall - - func doAction( - _ request: Arrow_Flight_Protocol_Action, - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_Result) -> Void - ) -> ServerStreamingCall - - func listActions( - _ request: Arrow_Flight_Protocol_Empty, - callOptions: CallOptions?, - handler: @escaping (Arrow_Flight_Protocol_ActionType) -> Void - ) -> ServerStreamingCall +protocol Arrow_Flight_Protocol_FlightServiceClientProtocol: GRPCClient { + var serviceName: String { get } + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { get } + + func handshake( + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_HandshakeResponse) -> Void + ) -> BidirectionalStreamingCall + + func listFlights( + _ request: Arrow_Flight_Protocol_Criteria, + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_FlightInfo) -> Void + ) -> ServerStreamingCall + + func getFlightInfo( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? + ) -> UnaryCall + + func getSchema( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? + ) -> UnaryCall + + func doGet( + _ request: Arrow_Flight_Protocol_Ticket, + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void + ) -> ServerStreamingCall + + func doPut( + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_PutResult) -> Void + ) -> BidirectionalStreamingCall + + func doExchange( + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void + ) -> BidirectionalStreamingCall + + func doAction( + _ request: Arrow_Flight_Protocol_Action, + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_Result) -> Void + ) -> ServerStreamingCall + + func listActions( + _ request: Arrow_Flight_Protocol_Empty, + callOptions: CallOptions?, + handler: @escaping (Arrow_Flight_Protocol_ActionType) -> Void + ) -> ServerStreamingCall } extension Arrow_Flight_Protocol_FlightServiceClientProtocol { - internal var serviceName: String { - return "arrow.flight.protocol.FlightService" - } - - /// - /// Handshake between client and server. Depending on the server, the - /// handshake may be required to determine the token that should be used for - /// future operations. Both request and response are streams to allow multiple - /// round-trips depending on auth mechanism. - /// - /// Callers should use the `send` method on the returned object to send messages - /// to the server. The caller should send an `.end` after the final message has been sent. - /// - /// - Parameters: - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. - internal func handshake( - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_HandshakeResponse) -> Void - ) -> BidirectionalStreamingCall { - return self.makeBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [], - handler: handler - ) - } - - /// - /// Get a list of available streams given a particular criteria. Most flight - /// services will expose one or more streams that are readily available for - /// retrieval. This api allows listing the streams available for - /// consumption. A user can also provide a criteria. The criteria can limit - /// the subset of streams that can be listed via this interface. Each flight - /// service allows its own definition of how to consume criteria. - /// - /// - Parameters: - /// - request: Request to send to ListFlights. - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. - internal func listFlights( - _ request: Arrow_Flight_Protocol_Criteria, - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_FlightInfo) -> Void - ) -> ServerStreamingCall { - return self.makeServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListFlightsInterceptors() ?? [], - handler: handler - ) - } - - /// - /// For a given FlightDescriptor, get information about how the flight can be - /// consumed. This is a useful interface if the consumer of the interface - /// already can identify the specific flight to consume. This interface can - /// also allow a consumer to generate a flight stream through a specified - /// descriptor. For example, a flight descriptor might be something that - /// includes a SQL statement or a Pickled Python operation that will be - /// executed. In those cases, the descriptor will not be previously available - /// within the list of available streams provided by ListFlights but will be - /// available for consumption for the duration defined by the specific flight - /// service. - /// - /// - Parameters: - /// - request: Request to send to GetFlightInfo. - /// - callOptions: Call options. - /// - Returns: A `UnaryCall` with futures for the metadata, status and response. - internal func getFlightInfo( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) -> UnaryCall { - return self.makeUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetFlightInfoInterceptors() ?? [] - ) - } - - /// - /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema - /// This is used when a consumer needs the Schema of flight stream. Similar to - /// GetFlightInfo this interface may generate a new flight that was not previously - /// available in ListFlights. - /// - /// - Parameters: - /// - request: Request to send to GetSchema. - /// - callOptions: Call options. - /// - Returns: A `UnaryCall` with futures for the metadata, status and response. - internal func getSchema( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) -> UnaryCall { - return self.makeUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetSchemaInterceptors() ?? [] - ) - } - - /// - /// Retrieve a single stream associated with a particular descriptor - /// associated with the referenced ticket. A Flight can be composed of one or - /// more streams where each stream can be retrieved using a separate opaque - /// ticket that the flight service uses for managing a collection of streams. - /// - /// - Parameters: - /// - request: Request to send to DoGet. - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. - internal func doGet( - _ request: Arrow_Flight_Protocol_Ticket, - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void - ) -> ServerStreamingCall { - return self.makeServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoGetInterceptors() ?? [], - handler: handler - ) - } - - /// - /// Push a stream to the flight service associated with a particular - /// flight stream. This allows a client of a flight service to upload a stream - /// of data. Depending on the particular flight service, a client consumer - /// could be allowed to upload a single stream per descriptor or an unlimited - /// number. In the latter, the service might implement a 'seal' action that - /// can be applied to a descriptor once all streams are uploaded. - /// - /// Callers should use the `send` method on the returned object to send messages - /// to the server. The caller should send an `.end` after the final message has been sent. - /// - /// - Parameters: - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. - internal func doPut( - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_PutResult) -> Void - ) -> BidirectionalStreamingCall { - return self.makeBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [], - handler: handler - ) - } - - /// - /// Open a bidirectional data channel for a given descriptor. This - /// allows clients to send and receive arbitrary Arrow data and - /// application-specific metadata in a single logical stream. In - /// contrast to DoGet/DoPut, this is more suited for clients - /// offloading computation (rather than storage) to a Flight service. - /// - /// Callers should use the `send` method on the returned object to send messages - /// to the server. The caller should send an `.end` after the final message has been sent. - /// - /// - Parameters: - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. - internal func doExchange( - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void - ) -> BidirectionalStreamingCall { - return self.makeBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [], - handler: handler - ) - } - - /// - /// Flight services can support an arbitrary number of simple actions in - /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut - /// operations that are potentially available. DoAction allows a flight client - /// to do a specific action against a flight service. An action includes - /// opaque request and response objects that are specific to the type action - /// being undertaken. - /// - /// - Parameters: - /// - request: Request to send to DoAction. - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. - internal func doAction( - _ request: Arrow_Flight_Protocol_Action, - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_Result) -> Void - ) -> ServerStreamingCall { - return self.makeServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoActionInterceptors() ?? [], - handler: handler - ) - } - - /// - /// A flight service exposes all of the available action types that it has - /// along with descriptions. This allows different flight consumers to - /// understand the capabilities of the flight service. - /// - /// - Parameters: - /// - request: Request to send to ListActions. - /// - callOptions: Call options. - /// - handler: A closure called when each response is received from the server. - /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. - internal func listActions( - _ request: Arrow_Flight_Protocol_Empty, - callOptions: CallOptions? = nil, - handler: @escaping (Arrow_Flight_Protocol_ActionType) -> Void - ) -> ServerStreamingCall { - return self.makeServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListActionsInterceptors() ?? [], - handler: handler - ) - } + var serviceName: String { + return "arrow.flight.protocol.FlightService" + } + + /// + /// Handshake between client and server. Depending on the server, the + /// handshake may be required to determine the token that should be used for + /// future operations. Both request and response are streams to allow multiple + /// round-trips depending on auth mechanism. + /// + /// Callers should use the `send` method on the returned object to send messages + /// to the server. The caller should send an `.end` after the final message has been sent. + /// + /// - Parameters: + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. + func handshake( + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_HandshakeResponse) -> Void + ) -> BidirectionalStreamingCall { + return makeBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeHandshakeInterceptors() ?? [], + handler: handler + ) + } + + /// + /// Get a list of available streams given a particular criteria. Most flight + /// services will expose one or more streams that are readily available for + /// retrieval. This api allows listing the streams available for + /// consumption. A user can also provide a criteria. The criteria can limit + /// the subset of streams that can be listed via this interface. Each flight + /// service allows its own definition of how to consume criteria. + /// + /// - Parameters: + /// - request: Request to send to ListFlights. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + func listFlights( + _ request: Arrow_Flight_Protocol_Criteria, + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_FlightInfo) -> Void + ) -> ServerStreamingCall { + return makeServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListFlightsInterceptors() ?? [], + handler: handler + ) + } + + /// + /// For a given FlightDescriptor, get information about how the flight can be + /// consumed. This is a useful interface if the consumer of the interface + /// already can identify the specific flight to consume. This interface can + /// also allow a consumer to generate a flight stream through a specified + /// descriptor. For example, a flight descriptor might be something that + /// includes a SQL statement or a Pickled Python operation that will be + /// executed. In those cases, the descriptor will not be previously available + /// within the list of available streams provided by ListFlights but will be + /// available for consumption for the duration defined by the specific flight + /// service. + /// + /// - Parameters: + /// - request: Request to send to GetFlightInfo. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + func getFlightInfo( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return makeUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetFlightInfoInterceptors() ?? [] + ) + } + + /// + /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema + /// This is used when a consumer needs the Schema of flight stream. Similar to + /// GetFlightInfo this interface may generate a new flight that was not previously + /// available in ListFlights. + /// + /// - Parameters: + /// - request: Request to send to GetSchema. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + func getSchema( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return makeUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetSchemaInterceptors() ?? [] + ) + } + + /// + /// Retrieve a single stream associated with a particular descriptor + /// associated with the referenced ticket. A Flight can be composed of one or + /// more streams where each stream can be retrieved using a separate opaque + /// ticket that the flight service uses for managing a collection of streams. + /// + /// - Parameters: + /// - request: Request to send to DoGet. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + func doGet( + _ request: Arrow_Flight_Protocol_Ticket, + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void + ) -> ServerStreamingCall { + return makeServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoGetInterceptors() ?? [], + handler: handler + ) + } + + /// + /// Push a stream to the flight service associated with a particular + /// flight stream. This allows a client of a flight service to upload a stream + /// of data. Depending on the particular flight service, a client consumer + /// could be allowed to upload a single stream per descriptor or an unlimited + /// number. In the latter, the service might implement a 'seal' action that + /// can be applied to a descriptor once all streams are uploaded. + /// + /// Callers should use the `send` method on the returned object to send messages + /// to the server. The caller should send an `.end` after the final message has been sent. + /// + /// - Parameters: + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. + func doPut( + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_PutResult) -> Void + ) -> BidirectionalStreamingCall { + return makeBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoPutInterceptors() ?? [], + handler: handler + ) + } + + /// + /// Open a bidirectional data channel for a given descriptor. This + /// allows clients to send and receive arbitrary Arrow data and + /// application-specific metadata in a single logical stream. In + /// contrast to DoGet/DoPut, this is more suited for clients + /// offloading computation (rather than storage) to a Flight service. + /// + /// Callers should use the `send` method on the returned object to send messages + /// to the server. The caller should send an `.end` after the final message has been sent. + /// + /// - Parameters: + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ClientStreamingCall` with futures for the metadata and status. + func doExchange( + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_FlightData) -> Void + ) -> BidirectionalStreamingCall { + return makeBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [], + handler: handler + ) + } + + /// + /// Flight services can support an arbitrary number of simple actions in + /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut + /// operations that are potentially available. DoAction allows a flight client + /// to do a specific action against a flight service. An action includes + /// opaque request and response objects that are specific to the type action + /// being undertaken. + /// + /// - Parameters: + /// - request: Request to send to DoAction. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + func doAction( + _ request: Arrow_Flight_Protocol_Action, + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_Result) -> Void + ) -> ServerStreamingCall { + return makeServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoActionInterceptors() ?? [], + handler: handler + ) + } + + /// + /// A flight service exposes all of the available action types that it has + /// along with descriptions. This allows different flight consumers to + /// understand the capabilities of the flight service. + /// + /// - Parameters: + /// - request: Request to send to ListActions. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + func listActions( + _ request: Arrow_Flight_Protocol_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Arrow_Flight_Protocol_ActionType) -> Void + ) -> ServerStreamingCall { + return makeServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListActionsInterceptors() ?? [], + handler: handler + ) + } } @available(*, deprecated) extension Arrow_Flight_Protocol_FlightServiceClient: @unchecked Sendable {} @available(*, deprecated, renamed: "Arrow_Flight_Protocol_FlightServiceNIOClient") -internal final class Arrow_Flight_Protocol_FlightServiceClient: Arrow_Flight_Protocol_FlightServiceClientProtocol { - private let lock = Lock() - private var _defaultCallOptions: CallOptions - private var _interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? - internal let channel: GRPCChannel - internal var defaultCallOptions: CallOptions { - get { self.lock.withLock { return self._defaultCallOptions } } - set { self.lock.withLockVoid { self._defaultCallOptions = newValue } } - } - internal var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { - get { self.lock.withLock { return self._interceptors } } - set { self.lock.withLockVoid { self._interceptors = newValue } } - } - - /// Creates a client for the arrow.flight.protocol.FlightService service. - /// - /// - Parameters: - /// - channel: `GRPCChannel` to the service host. - /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. - /// - interceptors: A factory providing interceptors for each RPC. - internal init( - channel: GRPCChannel, - defaultCallOptions: CallOptions = CallOptions(), - interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil - ) { - self.channel = channel - self._defaultCallOptions = defaultCallOptions - self._interceptors = interceptors - } +final class Arrow_Flight_Protocol_FlightServiceClient: Arrow_Flight_Protocol_FlightServiceClientProtocol { + private let lock = Lock() + private var _defaultCallOptions: CallOptions + private var _interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? + let channel: GRPCChannel + var defaultCallOptions: CallOptions { + get { lock.withLock { self._defaultCallOptions } } + set { lock.withLockVoid { self._defaultCallOptions = newValue } } + } + + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { + get { lock.withLock { self._interceptors } } + set { lock.withLockVoid { self._interceptors = newValue } } + } + + /// Creates a client for the arrow.flight.protocol.FlightService service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + _defaultCallOptions = defaultCallOptions + _interceptors = interceptors + } } -internal struct Arrow_Flight_Protocol_FlightServiceNIOClient: Arrow_Flight_Protocol_FlightServiceClientProtocol { - internal var channel: GRPCChannel - internal var defaultCallOptions: CallOptions - internal var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? - - /// Creates a client for the arrow.flight.protocol.FlightService service. - /// - /// - Parameters: - /// - channel: `GRPCChannel` to the service host. - /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. - /// - interceptors: A factory providing interceptors for each RPC. - internal init( - channel: GRPCChannel, - defaultCallOptions: CallOptions = CallOptions(), - interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil - ) { - self.channel = channel - self.defaultCallOptions = defaultCallOptions - self.interceptors = interceptors - } +struct Arrow_Flight_Protocol_FlightServiceNIOClient: Arrow_Flight_Protocol_FlightServiceClientProtocol { + var channel: GRPCChannel + var defaultCallOptions: CallOptions + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? + + /// Creates a client for the arrow.flight.protocol.FlightService service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } } /// @@ -388,432 +388,431 @@ internal struct Arrow_Flight_Protocol_FlightServiceNIOClient: Arrow_Flight_Proto /// accessed using the Arrow Flight Protocol. Additionally, a flight service /// can expose a set of actions that are available. @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) -internal protocol Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol: GRPCClient { - static var serviceDescriptor: GRPCServiceDescriptor { get } - var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { get } - - func makeHandshakeCall( - callOptions: CallOptions? - ) -> GRPCAsyncBidirectionalStreamingCall - - func makeListFlightsCall( - _ request: Arrow_Flight_Protocol_Criteria, - callOptions: CallOptions? - ) -> GRPCAsyncServerStreamingCall - - func makeGetFlightInfoCall( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? - ) -> GRPCAsyncUnaryCall - - func makeGetSchemaCall( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? - ) -> GRPCAsyncUnaryCall - - func makeDoGetCall( - _ request: Arrow_Flight_Protocol_Ticket, - callOptions: CallOptions? - ) -> GRPCAsyncServerStreamingCall - - func makeDoPutCall( - callOptions: CallOptions? - ) -> GRPCAsyncBidirectionalStreamingCall - - func makeDoExchangeCall( - callOptions: CallOptions? - ) -> GRPCAsyncBidirectionalStreamingCall - - func makeDoActionCall( - _ request: Arrow_Flight_Protocol_Action, - callOptions: CallOptions? - ) -> GRPCAsyncServerStreamingCall - - func makeListActionsCall( - _ request: Arrow_Flight_Protocol_Empty, - callOptions: CallOptions? - ) -> GRPCAsyncServerStreamingCall +protocol Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol: GRPCClient { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { get } + + func makeHandshakeCall( + callOptions: CallOptions? + ) -> GRPCAsyncBidirectionalStreamingCall + + func makeListFlightsCall( + _ request: Arrow_Flight_Protocol_Criteria, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeGetFlightInfoCall( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeGetSchemaCall( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeDoGetCall( + _ request: Arrow_Flight_Protocol_Ticket, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeDoPutCall( + callOptions: CallOptions? + ) -> GRPCAsyncBidirectionalStreamingCall + + func makeDoExchangeCall( + callOptions: CallOptions? + ) -> GRPCAsyncBidirectionalStreamingCall + + func makeDoActionCall( + _ request: Arrow_Flight_Protocol_Action, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeListActionsCall( + _ request: Arrow_Flight_Protocol_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall } @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) extension Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol { - internal static var serviceDescriptor: GRPCServiceDescriptor { - return Arrow_Flight_Protocol_FlightServiceClientMetadata.serviceDescriptor - } - - internal var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { - return nil - } - - internal func makeHandshakeCall( - callOptions: CallOptions? = nil - ) -> GRPCAsyncBidirectionalStreamingCall { - return self.makeAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [] - ) - } - - internal func makeListFlightsCall( - _ request: Arrow_Flight_Protocol_Criteria, - callOptions: CallOptions? = nil - ) -> GRPCAsyncServerStreamingCall { - return self.makeAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListFlightsInterceptors() ?? [] - ) - } - - internal func makeGetFlightInfoCall( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) -> GRPCAsyncUnaryCall { - return self.makeAsyncUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetFlightInfoInterceptors() ?? [] - ) - } - - internal func makeGetSchemaCall( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) -> GRPCAsyncUnaryCall { - return self.makeAsyncUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetSchemaInterceptors() ?? [] - ) - } - - internal func makeDoGetCall( - _ request: Arrow_Flight_Protocol_Ticket, - callOptions: CallOptions? = nil - ) -> GRPCAsyncServerStreamingCall { - return self.makeAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoGetInterceptors() ?? [] - ) - } - - internal func makeDoPutCall( - callOptions: CallOptions? = nil - ) -> GRPCAsyncBidirectionalStreamingCall { - return self.makeAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [] - ) - } - - internal func makeDoExchangeCall( - callOptions: CallOptions? = nil - ) -> GRPCAsyncBidirectionalStreamingCall { - return self.makeAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [] - ) - } - - internal func makeDoActionCall( - _ request: Arrow_Flight_Protocol_Action, - callOptions: CallOptions? = nil - ) -> GRPCAsyncServerStreamingCall { - return self.makeAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoActionInterceptors() ?? [] - ) - } - - internal func makeListActionsCall( - _ request: Arrow_Flight_Protocol_Empty, - callOptions: CallOptions? = nil - ) -> GRPCAsyncServerStreamingCall { - return self.makeAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListActionsInterceptors() ?? [] - ) - } + static var serviceDescriptor: GRPCServiceDescriptor { + return Arrow_Flight_Protocol_FlightServiceClientMetadata.serviceDescriptor + } + + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? { + return nil + } + + func makeHandshakeCall( + callOptions: CallOptions? = nil + ) -> GRPCAsyncBidirectionalStreamingCall { + return makeAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeHandshakeInterceptors() ?? [] + ) + } + + func makeListFlightsCall( + _ request: Arrow_Flight_Protocol_Criteria, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return makeAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListFlightsInterceptors() ?? [] + ) + } + + func makeGetFlightInfoCall( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return makeAsyncUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetFlightInfoInterceptors() ?? [] + ) + } + + func makeGetSchemaCall( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return makeAsyncUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetSchemaInterceptors() ?? [] + ) + } + + func makeDoGetCall( + _ request: Arrow_Flight_Protocol_Ticket, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return makeAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoGetInterceptors() ?? [] + ) + } + + func makeDoPutCall( + callOptions: CallOptions? = nil + ) -> GRPCAsyncBidirectionalStreamingCall { + return makeAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoPutInterceptors() ?? [] + ) + } + + func makeDoExchangeCall( + callOptions: CallOptions? = nil + ) -> GRPCAsyncBidirectionalStreamingCall { + return makeAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [] + ) + } + + func makeDoActionCall( + _ request: Arrow_Flight_Protocol_Action, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return makeAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoActionInterceptors() ?? [] + ) + } + + func makeListActionsCall( + _ request: Arrow_Flight_Protocol_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return makeAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListActionsInterceptors() ?? [] + ) + } } @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) extension Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol { - internal func handshake( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_HandshakeRequest { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [] - ) - } - - internal func handshake( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_HandshakeRequest { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [] - ) - } - - internal func listFlights( - _ request: Arrow_Flight_Protocol_Criteria, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream { - return self.performAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListFlightsInterceptors() ?? [] - ) - } - - internal func getFlightInfo( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) async throws -> Arrow_Flight_Protocol_FlightInfo { - return try await self.performAsyncUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetFlightInfoInterceptors() ?? [] - ) - } - - internal func getSchema( - _ request: Arrow_Flight_Protocol_FlightDescriptor, - callOptions: CallOptions? = nil - ) async throws -> Arrow_Flight_Protocol_SchemaResult { - return try await self.performAsyncUnaryCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeGetSchemaInterceptors() ?? [] - ) - } - - internal func doGet( - _ request: Arrow_Flight_Protocol_Ticket, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream { - return self.performAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoGetInterceptors() ?? [] - ) - } - - internal func doPut( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_FlightData { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [] - ) - } - - internal func doPut( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_FlightData { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [] - ) - } - - internal func doExchange( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_FlightData { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [] - ) - } - - internal func doExchange( - _ requests: RequestStream, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_FlightData { - return self.performAsyncBidirectionalStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, - requests: requests, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [] - ) - } - - internal func doAction( - _ request: Arrow_Flight_Protocol_Action, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream { - return self.performAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeDoActionInterceptors() ?? [] - ) - } - - internal func listActions( - _ request: Arrow_Flight_Protocol_Empty, - callOptions: CallOptions? = nil - ) -> GRPCAsyncResponseStream { - return self.performAsyncServerStreamingCall( - path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, - request: request, - callOptions: callOptions ?? self.defaultCallOptions, - interceptors: self.interceptors?.makeListActionsInterceptors() ?? [] - ) - } -} + func handshake( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_HandshakeRequest { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeHandshakeInterceptors() ?? [] + ) + } -@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) -internal struct Arrow_Flight_Protocol_FlightServiceAsyncClient: Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol { - internal var channel: GRPCChannel - internal var defaultCallOptions: CallOptions - internal var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? - - internal init( - channel: GRPCChannel, - defaultCallOptions: CallOptions = CallOptions(), - interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil - ) { - self.channel = channel - self.defaultCallOptions = defaultCallOptions - self.interceptors = interceptors - } -} + func handshake( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_HandshakeRequest { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeHandshakeInterceptors() ?? [] + ) + } -internal protocol Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol: Sendable { + func listFlights( + _ request: Arrow_Flight_Protocol_Criteria, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return performAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListFlightsInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'handshake'. - func makeHandshakeInterceptors() -> [ClientInterceptor] + func getFlightInfo( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) async throws -> Arrow_Flight_Protocol_FlightInfo { + return try await performAsyncUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetFlightInfoInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'listFlights'. - func makeListFlightsInterceptors() -> [ClientInterceptor] + func getSchema( + _ request: Arrow_Flight_Protocol_FlightDescriptor, + callOptions: CallOptions? = nil + ) async throws -> Arrow_Flight_Protocol_SchemaResult { + return try await performAsyncUnaryCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeGetSchemaInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'getFlightInfo'. - func makeGetFlightInfoInterceptors() -> [ClientInterceptor] + func doGet( + _ request: Arrow_Flight_Protocol_Ticket, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return performAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoGetInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'getSchema'. - func makeGetSchemaInterceptors() -> [ClientInterceptor] + func doPut( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_FlightData { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoPutInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'doGet'. - func makeDoGetInterceptors() -> [ClientInterceptor] + func doPut( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_FlightData { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoPutInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'doPut'. - func makeDoPutInterceptors() -> [ClientInterceptor] + func doExchange( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: Sequence, RequestStream.Element == Arrow_Flight_Protocol_FlightData { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'doExchange'. - func makeDoExchangeInterceptors() -> [ClientInterceptor] + func doExchange( + _ requests: RequestStream, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream where RequestStream: AsyncSequence & Sendable, RequestStream.Element == Arrow_Flight_Protocol_FlightData { + return performAsyncBidirectionalStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange.path, + requests: requests, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'doAction'. - func makeDoActionInterceptors() -> [ClientInterceptor] + func doAction( + _ request: Arrow_Flight_Protocol_Action, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return performAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeDoActionInterceptors() ?? [] + ) + } - /// - Returns: Interceptors to use when invoking 'listActions'. - func makeListActionsInterceptors() -> [ClientInterceptor] + func listActions( + _ request: Arrow_Flight_Protocol_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return performAsyncServerStreamingCall( + path: Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions.path, + request: request, + callOptions: callOptions ?? defaultCallOptions, + interceptors: interceptors?.makeListActionsInterceptors() ?? [] + ) + } } -internal enum Arrow_Flight_Protocol_FlightServiceClientMetadata { - internal static let serviceDescriptor = GRPCServiceDescriptor( - name: "FlightService", - fullName: "arrow.flight.protocol.FlightService", - methods: [ - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions, - ] - ) - - internal enum Methods { - internal static let handshake = GRPCMethodDescriptor( - name: "Handshake", - path: "/arrow.flight.protocol.FlightService/Handshake", - type: GRPCCallType.bidirectionalStreaming - ) +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +struct Arrow_Flight_Protocol_FlightServiceAsyncClient: Arrow_Flight_Protocol_FlightServiceAsyncClientProtocol { + var channel: GRPCChannel + var defaultCallOptions: CallOptions + var interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? + + init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} - internal static let listFlights = GRPCMethodDescriptor( - name: "ListFlights", - path: "/arrow.flight.protocol.FlightService/ListFlights", - type: GRPCCallType.serverStreaming - ) +protocol Arrow_Flight_Protocol_FlightServiceClientInterceptorFactoryProtocol: Sendable { + /// - Returns: Interceptors to use when invoking 'handshake'. + func makeHandshakeInterceptors() -> [ClientInterceptor] - internal static let getFlightInfo = GRPCMethodDescriptor( - name: "GetFlightInfo", - path: "/arrow.flight.protocol.FlightService/GetFlightInfo", - type: GRPCCallType.unary - ) + /// - Returns: Interceptors to use when invoking 'listFlights'. + func makeListFlightsInterceptors() -> [ClientInterceptor] - internal static let getSchema = GRPCMethodDescriptor( - name: "GetSchema", - path: "/arrow.flight.protocol.FlightService/GetSchema", - type: GRPCCallType.unary - ) + /// - Returns: Interceptors to use when invoking 'getFlightInfo'. + func makeGetFlightInfoInterceptors() -> [ClientInterceptor] - internal static let doGet = GRPCMethodDescriptor( - name: "DoGet", - path: "/arrow.flight.protocol.FlightService/DoGet", - type: GRPCCallType.serverStreaming - ) + /// - Returns: Interceptors to use when invoking 'getSchema'. + func makeGetSchemaInterceptors() -> [ClientInterceptor] - internal static let doPut = GRPCMethodDescriptor( - name: "DoPut", - path: "/arrow.flight.protocol.FlightService/DoPut", - type: GRPCCallType.bidirectionalStreaming - ) + /// - Returns: Interceptors to use when invoking 'doGet'. + func makeDoGetInterceptors() -> [ClientInterceptor] - internal static let doExchange = GRPCMethodDescriptor( - name: "DoExchange", - path: "/arrow.flight.protocol.FlightService/DoExchange", - type: GRPCCallType.bidirectionalStreaming - ) + /// - Returns: Interceptors to use when invoking 'doPut'. + func makeDoPutInterceptors() -> [ClientInterceptor] - internal static let doAction = GRPCMethodDescriptor( - name: "DoAction", - path: "/arrow.flight.protocol.FlightService/DoAction", - type: GRPCCallType.serverStreaming - ) + /// - Returns: Interceptors to use when invoking 'doExchange'. + func makeDoExchangeInterceptors() -> [ClientInterceptor] - internal static let listActions = GRPCMethodDescriptor( - name: "ListActions", - path: "/arrow.flight.protocol.FlightService/ListActions", - type: GRPCCallType.serverStreaming + /// - Returns: Interceptors to use when invoking 'doAction'. + func makeDoActionInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'listActions'. + func makeListActionsInterceptors() -> [ClientInterceptor] +} + +enum Arrow_Flight_Protocol_FlightServiceClientMetadata { + static let serviceDescriptor = GRPCServiceDescriptor( + name: "FlightService", + fullName: "arrow.flight.protocol.FlightService", + methods: [ + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.handshake, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listFlights, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getFlightInfo, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.getSchema, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doGet, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions, + ] ) - } + + enum Methods { + static let handshake = GRPCMethodDescriptor( + name: "Handshake", + path: "/arrow.flight.protocol.FlightService/Handshake", + type: GRPCCallType.bidirectionalStreaming + ) + + static let listFlights = GRPCMethodDescriptor( + name: "ListFlights", + path: "/arrow.flight.protocol.FlightService/ListFlights", + type: GRPCCallType.serverStreaming + ) + + static let getFlightInfo = GRPCMethodDescriptor( + name: "GetFlightInfo", + path: "/arrow.flight.protocol.FlightService/GetFlightInfo", + type: GRPCCallType.unary + ) + + static let getSchema = GRPCMethodDescriptor( + name: "GetSchema", + path: "/arrow.flight.protocol.FlightService/GetSchema", + type: GRPCCallType.unary + ) + + static let doGet = GRPCMethodDescriptor( + name: "DoGet", + path: "/arrow.flight.protocol.FlightService/DoGet", + type: GRPCCallType.serverStreaming + ) + + static let doPut = GRPCMethodDescriptor( + name: "DoPut", + path: "/arrow.flight.protocol.FlightService/DoPut", + type: GRPCCallType.bidirectionalStreaming + ) + + static let doExchange = GRPCMethodDescriptor( + name: "DoExchange", + path: "/arrow.flight.protocol.FlightService/DoExchange", + type: GRPCCallType.bidirectionalStreaming + ) + + static let doAction = GRPCMethodDescriptor( + name: "DoAction", + path: "/arrow.flight.protocol.FlightService/DoAction", + type: GRPCCallType.serverStreaming + ) + + static let listActions = GRPCMethodDescriptor( + name: "ListActions", + path: "/arrow.flight.protocol.FlightService/ListActions", + type: GRPCCallType.serverStreaming + ) + } } /// @@ -823,182 +822,182 @@ internal enum Arrow_Flight_Protocol_FlightServiceClientMetadata { /// can expose a set of actions that are available. /// /// To build a server, implement a class that conforms to this protocol. -internal protocol Arrow_Flight_Protocol_FlightServiceProvider: CallHandlerProvider { - var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { get } - - /// - /// Handshake between client and server. Depending on the server, the - /// handshake may be required to determine the token that should be used for - /// future operations. Both request and response are streams to allow multiple - /// round-trips depending on auth mechanism. - func handshake(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> - - /// - /// Get a list of available streams given a particular criteria. Most flight - /// services will expose one or more streams that are readily available for - /// retrieval. This api allows listing the streams available for - /// consumption. A user can also provide a criteria. The criteria can limit - /// the subset of streams that can be listed via this interface. Each flight - /// service allows its own definition of how to consume criteria. - func listFlights(request: Arrow_Flight_Protocol_Criteria, context: StreamingResponseCallContext) -> EventLoopFuture - - /// - /// For a given FlightDescriptor, get information about how the flight can be - /// consumed. This is a useful interface if the consumer of the interface - /// already can identify the specific flight to consume. This interface can - /// also allow a consumer to generate a flight stream through a specified - /// descriptor. For example, a flight descriptor might be something that - /// includes a SQL statement or a Pickled Python operation that will be - /// executed. In those cases, the descriptor will not be previously available - /// within the list of available streams provided by ListFlights but will be - /// available for consumption for the duration defined by the specific flight - /// service. - func getFlightInfo(request: Arrow_Flight_Protocol_FlightDescriptor, context: StatusOnlyCallContext) -> EventLoopFuture - - /// - /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema - /// This is used when a consumer needs the Schema of flight stream. Similar to - /// GetFlightInfo this interface may generate a new flight that was not previously - /// available in ListFlights. - func getSchema(request: Arrow_Flight_Protocol_FlightDescriptor, context: StatusOnlyCallContext) -> EventLoopFuture - - /// - /// Retrieve a single stream associated with a particular descriptor - /// associated with the referenced ticket. A Flight can be composed of one or - /// more streams where each stream can be retrieved using a separate opaque - /// ticket that the flight service uses for managing a collection of streams. - func doGet(request: Arrow_Flight_Protocol_Ticket, context: StreamingResponseCallContext) -> EventLoopFuture - - /// - /// Push a stream to the flight service associated with a particular - /// flight stream. This allows a client of a flight service to upload a stream - /// of data. Depending on the particular flight service, a client consumer - /// could be allowed to upload a single stream per descriptor or an unlimited - /// number. In the latter, the service might implement a 'seal' action that - /// can be applied to a descriptor once all streams are uploaded. - func doPut(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> - - /// - /// Open a bidirectional data channel for a given descriptor. This - /// allows clients to send and receive arbitrary Arrow data and - /// application-specific metadata in a single logical stream. In - /// contrast to DoGet/DoPut, this is more suited for clients - /// offloading computation (rather than storage) to a Flight service. - func doExchange(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> - - /// - /// Flight services can support an arbitrary number of simple actions in - /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut - /// operations that are potentially available. DoAction allows a flight client - /// to do a specific action against a flight service. An action includes - /// opaque request and response objects that are specific to the type action - /// being undertaken. - func doAction(request: Arrow_Flight_Protocol_Action, context: StreamingResponseCallContext) -> EventLoopFuture - - /// - /// A flight service exposes all of the available action types that it has - /// along with descriptions. This allows different flight consumers to - /// understand the capabilities of the flight service. - func listActions(request: Arrow_Flight_Protocol_Empty, context: StreamingResponseCallContext) -> EventLoopFuture +protocol Arrow_Flight_Protocol_FlightServiceProvider: CallHandlerProvider { + var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { get } + + /// + /// Handshake between client and server. Depending on the server, the + /// handshake may be required to determine the token that should be used for + /// future operations. Both request and response are streams to allow multiple + /// round-trips depending on auth mechanism. + func handshake(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> + + /// + /// Get a list of available streams given a particular criteria. Most flight + /// services will expose one or more streams that are readily available for + /// retrieval. This api allows listing the streams available for + /// consumption. A user can also provide a criteria. The criteria can limit + /// the subset of streams that can be listed via this interface. Each flight + /// service allows its own definition of how to consume criteria. + func listFlights(request: Arrow_Flight_Protocol_Criteria, context: StreamingResponseCallContext) -> EventLoopFuture + + /// + /// For a given FlightDescriptor, get information about how the flight can be + /// consumed. This is a useful interface if the consumer of the interface + /// already can identify the specific flight to consume. This interface can + /// also allow a consumer to generate a flight stream through a specified + /// descriptor. For example, a flight descriptor might be something that + /// includes a SQL statement or a Pickled Python operation that will be + /// executed. In those cases, the descriptor will not be previously available + /// within the list of available streams provided by ListFlights but will be + /// available for consumption for the duration defined by the specific flight + /// service. + func getFlightInfo(request: Arrow_Flight_Protocol_FlightDescriptor, context: StatusOnlyCallContext) -> EventLoopFuture + + /// + /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema + /// This is used when a consumer needs the Schema of flight stream. Similar to + /// GetFlightInfo this interface may generate a new flight that was not previously + /// available in ListFlights. + func getSchema(request: Arrow_Flight_Protocol_FlightDescriptor, context: StatusOnlyCallContext) -> EventLoopFuture + + /// + /// Retrieve a single stream associated with a particular descriptor + /// associated with the referenced ticket. A Flight can be composed of one or + /// more streams where each stream can be retrieved using a separate opaque + /// ticket that the flight service uses for managing a collection of streams. + func doGet(request: Arrow_Flight_Protocol_Ticket, context: StreamingResponseCallContext) -> EventLoopFuture + + /// + /// Push a stream to the flight service associated with a particular + /// flight stream. This allows a client of a flight service to upload a stream + /// of data. Depending on the particular flight service, a client consumer + /// could be allowed to upload a single stream per descriptor or an unlimited + /// number. In the latter, the service might implement a 'seal' action that + /// can be applied to a descriptor once all streams are uploaded. + func doPut(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> + + /// + /// Open a bidirectional data channel for a given descriptor. This + /// allows clients to send and receive arbitrary Arrow data and + /// application-specific metadata in a single logical stream. In + /// contrast to DoGet/DoPut, this is more suited for clients + /// offloading computation (rather than storage) to a Flight service. + func doExchange(context: StreamingResponseCallContext) -> EventLoopFuture<(StreamEvent) -> Void> + + /// + /// Flight services can support an arbitrary number of simple actions in + /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut + /// operations that are potentially available. DoAction allows a flight client + /// to do a specific action against a flight service. An action includes + /// opaque request and response objects that are specific to the type action + /// being undertaken. + func doAction(request: Arrow_Flight_Protocol_Action, context: StreamingResponseCallContext) -> EventLoopFuture + + /// + /// A flight service exposes all of the available action types that it has + /// along with descriptions. This allows different flight consumers to + /// understand the capabilities of the flight service. + func listActions(request: Arrow_Flight_Protocol_Empty, context: StreamingResponseCallContext) -> EventLoopFuture } extension Arrow_Flight_Protocol_FlightServiceProvider { - internal var serviceName: Substring { - return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor.fullName[...] - } - - /// Determines, calls and returns the appropriate request handler, depending on the request's method. - /// Returns nil for methods not handled by this service. - internal func handle( - method name: Substring, - context: CallHandlerContext - ) -> GRPCServerHandlerProtocol? { - switch name { - case "Handshake": - return BidirectionalStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [], - observerFactory: self.handshake(context:) - ) - - case "ListFlights": - return ServerStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeListFlightsInterceptors() ?? [], - userFunction: self.listFlights(request:context:) - ) - - case "GetFlightInfo": - return UnaryServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeGetFlightInfoInterceptors() ?? [], - userFunction: self.getFlightInfo(request:context:) - ) - - case "GetSchema": - return UnaryServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeGetSchemaInterceptors() ?? [], - userFunction: self.getSchema(request:context:) - ) - - case "DoGet": - return ServerStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoGetInterceptors() ?? [], - userFunction: self.doGet(request:context:) - ) - - case "DoPut": - return BidirectionalStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [], - observerFactory: self.doPut(context:) - ) - - case "DoExchange": - return BidirectionalStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [], - observerFactory: self.doExchange(context:) - ) - - case "DoAction": - return ServerStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoActionInterceptors() ?? [], - userFunction: self.doAction(request:context:) - ) - - case "ListActions": - return ServerStreamingServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeListActionsInterceptors() ?? [], - userFunction: self.listActions(request:context:) - ) - - default: - return nil + var serviceName: Substring { + return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor.fullName[...] + } + + /// Determines, calls and returns the appropriate request handler, depending on the request's method. + /// Returns nil for methods not handled by this service. + func handle( + method name: Substring, + context: CallHandlerContext + ) -> GRPCServerHandlerProtocol? { + switch name { + case "Handshake": + return BidirectionalStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeHandshakeInterceptors() ?? [], + observerFactory: handshake(context:) + ) + + case "ListFlights": + return ServerStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeListFlightsInterceptors() ?? [], + userFunction: listFlights(request:context:) + ) + + case "GetFlightInfo": + return UnaryServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeGetFlightInfoInterceptors() ?? [], + userFunction: getFlightInfo(request:context:) + ) + + case "GetSchema": + return UnaryServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeGetSchemaInterceptors() ?? [], + userFunction: getSchema(request:context:) + ) + + case "DoGet": + return ServerStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoGetInterceptors() ?? [], + userFunction: doGet(request:context:) + ) + + case "DoPut": + return BidirectionalStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoPutInterceptors() ?? [], + observerFactory: doPut(context:) + ) + + case "DoExchange": + return BidirectionalStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [], + observerFactory: doExchange(context:) + ) + + case "DoAction": + return ServerStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoActionInterceptors() ?? [], + userFunction: doAction(request:context:) + ) + + case "ListActions": + return ServerStreamingServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeListActionsInterceptors() ?? [], + userFunction: listActions(request:context:) + ) + + default: + return nil + } } - } } /// @@ -1009,335 +1008,334 @@ extension Arrow_Flight_Protocol_FlightServiceProvider { /// /// To implement a server, implement an object which conforms to this protocol. @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) -internal protocol Arrow_Flight_Protocol_FlightServiceAsyncProvider: CallHandlerProvider, Sendable { - static var serviceDescriptor: GRPCServiceDescriptor { get } - var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { get } - - /// - /// Handshake between client and server. Depending on the server, the - /// handshake may be required to determine the token that should be used for - /// future operations. Both request and response are streams to allow multiple - /// round-trips depending on auth mechanism. - func handshake( - requestStream: GRPCAsyncRequestStream, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// Get a list of available streams given a particular criteria. Most flight - /// services will expose one or more streams that are readily available for - /// retrieval. This api allows listing the streams available for - /// consumption. A user can also provide a criteria. The criteria can limit - /// the subset of streams that can be listed via this interface. Each flight - /// service allows its own definition of how to consume criteria. - func listFlights( - request: Arrow_Flight_Protocol_Criteria, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// For a given FlightDescriptor, get information about how the flight can be - /// consumed. This is a useful interface if the consumer of the interface - /// already can identify the specific flight to consume. This interface can - /// also allow a consumer to generate a flight stream through a specified - /// descriptor. For example, a flight descriptor might be something that - /// includes a SQL statement or a Pickled Python operation that will be - /// executed. In those cases, the descriptor will not be previously available - /// within the list of available streams provided by ListFlights but will be - /// available for consumption for the duration defined by the specific flight - /// service. - func getFlightInfo( - request: Arrow_Flight_Protocol_FlightDescriptor, - context: GRPCAsyncServerCallContext - ) async throws -> Arrow_Flight_Protocol_FlightInfo - - /// - /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema - /// This is used when a consumer needs the Schema of flight stream. Similar to - /// GetFlightInfo this interface may generate a new flight that was not previously - /// available in ListFlights. - func getSchema( - request: Arrow_Flight_Protocol_FlightDescriptor, - context: GRPCAsyncServerCallContext - ) async throws -> Arrow_Flight_Protocol_SchemaResult - - /// - /// Retrieve a single stream associated with a particular descriptor - /// associated with the referenced ticket. A Flight can be composed of one or - /// more streams where each stream can be retrieved using a separate opaque - /// ticket that the flight service uses for managing a collection of streams. - func doGet( - request: Arrow_Flight_Protocol_Ticket, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// Push a stream to the flight service associated with a particular - /// flight stream. This allows a client of a flight service to upload a stream - /// of data. Depending on the particular flight service, a client consumer - /// could be allowed to upload a single stream per descriptor or an unlimited - /// number. In the latter, the service might implement a 'seal' action that - /// can be applied to a descriptor once all streams are uploaded. - func doPut( - requestStream: GRPCAsyncRequestStream, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// Open a bidirectional data channel for a given descriptor. This - /// allows clients to send and receive arbitrary Arrow data and - /// application-specific metadata in a single logical stream. In - /// contrast to DoGet/DoPut, this is more suited for clients - /// offloading computation (rather than storage) to a Flight service. - func doExchange( - requestStream: GRPCAsyncRequestStream, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// Flight services can support an arbitrary number of simple actions in - /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut - /// operations that are potentially available. DoAction allows a flight client - /// to do a specific action against a flight service. An action includes - /// opaque request and response objects that are specific to the type action - /// being undertaken. - func doAction( - request: Arrow_Flight_Protocol_Action, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws - - /// - /// A flight service exposes all of the available action types that it has - /// along with descriptions. This allows different flight consumers to - /// understand the capabilities of the flight service. - func listActions( - request: Arrow_Flight_Protocol_Empty, - responseStream: GRPCAsyncResponseStreamWriter, - context: GRPCAsyncServerCallContext - ) async throws +protocol Arrow_Flight_Protocol_FlightServiceAsyncProvider: CallHandlerProvider, Sendable { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { get } + + /// + /// Handshake between client and server. Depending on the server, the + /// handshake may be required to determine the token that should be used for + /// future operations. Both request and response are streams to allow multiple + /// round-trips depending on auth mechanism. + func handshake( + requestStream: GRPCAsyncRequestStream, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// Get a list of available streams given a particular criteria. Most flight + /// services will expose one or more streams that are readily available for + /// retrieval. This api allows listing the streams available for + /// consumption. A user can also provide a criteria. The criteria can limit + /// the subset of streams that can be listed via this interface. Each flight + /// service allows its own definition of how to consume criteria. + func listFlights( + request: Arrow_Flight_Protocol_Criteria, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// For a given FlightDescriptor, get information about how the flight can be + /// consumed. This is a useful interface if the consumer of the interface + /// already can identify the specific flight to consume. This interface can + /// also allow a consumer to generate a flight stream through a specified + /// descriptor. For example, a flight descriptor might be something that + /// includes a SQL statement or a Pickled Python operation that will be + /// executed. In those cases, the descriptor will not be previously available + /// within the list of available streams provided by ListFlights but will be + /// available for consumption for the duration defined by the specific flight + /// service. + func getFlightInfo( + request: Arrow_Flight_Protocol_FlightDescriptor, + context: GRPCAsyncServerCallContext + ) async throws -> Arrow_Flight_Protocol_FlightInfo + + /// + /// For a given FlightDescriptor, get the Schema as described in Schema.fbs::Schema + /// This is used when a consumer needs the Schema of flight stream. Similar to + /// GetFlightInfo this interface may generate a new flight that was not previously + /// available in ListFlights. + func getSchema( + request: Arrow_Flight_Protocol_FlightDescriptor, + context: GRPCAsyncServerCallContext + ) async throws -> Arrow_Flight_Protocol_SchemaResult + + /// + /// Retrieve a single stream associated with a particular descriptor + /// associated with the referenced ticket. A Flight can be composed of one or + /// more streams where each stream can be retrieved using a separate opaque + /// ticket that the flight service uses for managing a collection of streams. + func doGet( + request: Arrow_Flight_Protocol_Ticket, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// Push a stream to the flight service associated with a particular + /// flight stream. This allows a client of a flight service to upload a stream + /// of data. Depending on the particular flight service, a client consumer + /// could be allowed to upload a single stream per descriptor or an unlimited + /// number. In the latter, the service might implement a 'seal' action that + /// can be applied to a descriptor once all streams are uploaded. + func doPut( + requestStream: GRPCAsyncRequestStream, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// Open a bidirectional data channel for a given descriptor. This + /// allows clients to send and receive arbitrary Arrow data and + /// application-specific metadata in a single logical stream. In + /// contrast to DoGet/DoPut, this is more suited for clients + /// offloading computation (rather than storage) to a Flight service. + func doExchange( + requestStream: GRPCAsyncRequestStream, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// Flight services can support an arbitrary number of simple actions in + /// addition to the possible ListFlights, GetFlightInfo, DoGet, DoPut + /// operations that are potentially available. DoAction allows a flight client + /// to do a specific action against a flight service. An action includes + /// opaque request and response objects that are specific to the type action + /// being undertaken. + func doAction( + request: Arrow_Flight_Protocol_Action, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws + + /// + /// A flight service exposes all of the available action types that it has + /// along with descriptions. This allows different flight consumers to + /// understand the capabilities of the flight service. + func listActions( + request: Arrow_Flight_Protocol_Empty, + responseStream: GRPCAsyncResponseStreamWriter, + context: GRPCAsyncServerCallContext + ) async throws } @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) extension Arrow_Flight_Protocol_FlightServiceAsyncProvider { - internal static var serviceDescriptor: GRPCServiceDescriptor { - return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor - } - - internal var serviceName: Substring { - return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor.fullName[...] - } - - internal var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { - return nil - } - - internal func handle( - method name: Substring, - context: CallHandlerContext - ) -> GRPCServerHandlerProtocol? { - switch name { - case "Handshake": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeHandshakeInterceptors() ?? [], - wrapping: { try await self.handshake(requestStream: $0, responseStream: $1, context: $2) } - ) - - case "ListFlights": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeListFlightsInterceptors() ?? [], - wrapping: { try await self.listFlights(request: $0, responseStream: $1, context: $2) } - ) - - case "GetFlightInfo": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeGetFlightInfoInterceptors() ?? [], - wrapping: { try await self.getFlightInfo(request: $0, context: $1) } - ) - - case "GetSchema": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeGetSchemaInterceptors() ?? [], - wrapping: { try await self.getSchema(request: $0, context: $1) } - ) - - case "DoGet": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoGetInterceptors() ?? [], - wrapping: { try await self.doGet(request: $0, responseStream: $1, context: $2) } - ) - - case "DoPut": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoPutInterceptors() ?? [], - wrapping: { try await self.doPut(requestStream: $0, responseStream: $1, context: $2) } - ) - - case "DoExchange": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoExchangeInterceptors() ?? [], - wrapping: { try await self.doExchange(requestStream: $0, responseStream: $1, context: $2) } - ) - - case "DoAction": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeDoActionInterceptors() ?? [], - wrapping: { try await self.doAction(request: $0, responseStream: $1, context: $2) } - ) - - case "ListActions": - return GRPCAsyncServerHandler( - context: context, - requestDeserializer: ProtobufDeserializer(), - responseSerializer: ProtobufSerializer(), - interceptors: self.interceptors?.makeListActionsInterceptors() ?? [], - wrapping: { try await self.listActions(request: $0, responseStream: $1, context: $2) } - ) - - default: - return nil + static var serviceDescriptor: GRPCServiceDescriptor { + return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor } - } -} - -internal protocol Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol: Sendable { - - /// - Returns: Interceptors to use when handling 'handshake'. - /// Defaults to calling `self.makeInterceptors()`. - func makeHandshakeInterceptors() -> [ServerInterceptor] - - /// - Returns: Interceptors to use when handling 'listFlights'. - /// Defaults to calling `self.makeInterceptors()`. - func makeListFlightsInterceptors() -> [ServerInterceptor] - - /// - Returns: Interceptors to use when handling 'getFlightInfo'. - /// Defaults to calling `self.makeInterceptors()`. - func makeGetFlightInfoInterceptors() -> [ServerInterceptor] - /// - Returns: Interceptors to use when handling 'getSchema'. - /// Defaults to calling `self.makeInterceptors()`. - func makeGetSchemaInterceptors() -> [ServerInterceptor] - - /// - Returns: Interceptors to use when handling 'doGet'. - /// Defaults to calling `self.makeInterceptors()`. - func makeDoGetInterceptors() -> [ServerInterceptor] + var serviceName: Substring { + return Arrow_Flight_Protocol_FlightServiceServerMetadata.serviceDescriptor.fullName[...] + } - /// - Returns: Interceptors to use when handling 'doPut'. - /// Defaults to calling `self.makeInterceptors()`. - func makeDoPutInterceptors() -> [ServerInterceptor] + var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { + return nil + } - /// - Returns: Interceptors to use when handling 'doExchange'. - /// Defaults to calling `self.makeInterceptors()`. - func makeDoExchangeInterceptors() -> [ServerInterceptor] + func handle( + method name: Substring, + context: CallHandlerContext + ) -> GRPCServerHandlerProtocol? { + switch name { + case "Handshake": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeHandshakeInterceptors() ?? [], + wrapping: { try await self.handshake(requestStream: $0, responseStream: $1, context: $2) } + ) + + case "ListFlights": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeListFlightsInterceptors() ?? [], + wrapping: { try await self.listFlights(request: $0, responseStream: $1, context: $2) } + ) + + case "GetFlightInfo": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeGetFlightInfoInterceptors() ?? [], + wrapping: { try await self.getFlightInfo(request: $0, context: $1) } + ) + + case "GetSchema": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeGetSchemaInterceptors() ?? [], + wrapping: { try await self.getSchema(request: $0, context: $1) } + ) + + case "DoGet": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoGetInterceptors() ?? [], + wrapping: { try await self.doGet(request: $0, responseStream: $1, context: $2) } + ) + + case "DoPut": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoPutInterceptors() ?? [], + wrapping: { try await self.doPut(requestStream: $0, responseStream: $1, context: $2) } + ) + + case "DoExchange": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoExchangeInterceptors() ?? [], + wrapping: { try await self.doExchange(requestStream: $0, responseStream: $1, context: $2) } + ) + + case "DoAction": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeDoActionInterceptors() ?? [], + wrapping: { try await self.doAction(request: $0, responseStream: $1, context: $2) } + ) + + case "ListActions": + return GRPCAsyncServerHandler( + context: context, + requestDeserializer: ProtobufDeserializer(), + responseSerializer: ProtobufSerializer(), + interceptors: interceptors?.makeListActionsInterceptors() ?? [], + wrapping: { try await self.listActions(request: $0, responseStream: $1, context: $2) } + ) + + default: + return nil + } + } +} - /// - Returns: Interceptors to use when handling 'doAction'. - /// Defaults to calling `self.makeInterceptors()`. - func makeDoActionInterceptors() -> [ServerInterceptor] +protocol Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol: Sendable { + /// - Returns: Interceptors to use when handling 'handshake'. + /// Defaults to calling `self.makeInterceptors()`. + func makeHandshakeInterceptors() -> [ServerInterceptor] - /// - Returns: Interceptors to use when handling 'listActions'. - /// Defaults to calling `self.makeInterceptors()`. - func makeListActionsInterceptors() -> [ServerInterceptor] -} + /// - Returns: Interceptors to use when handling 'listFlights'. + /// Defaults to calling `self.makeInterceptors()`. + func makeListFlightsInterceptors() -> [ServerInterceptor] -internal enum Arrow_Flight_Protocol_FlightServiceServerMetadata { - internal static let serviceDescriptor = GRPCServiceDescriptor( - name: "FlightService", - fullName: "arrow.flight.protocol.FlightService", - methods: [ - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.handshake, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listFlights, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.getFlightInfo, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.getSchema, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doGet, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doPut, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doExchange, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doAction, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listActions, - ] - ) - - internal enum Methods { - internal static let handshake = GRPCMethodDescriptor( - name: "Handshake", - path: "/arrow.flight.protocol.FlightService/Handshake", - type: GRPCCallType.bidirectionalStreaming - ) + /// - Returns: Interceptors to use when handling 'getFlightInfo'. + /// Defaults to calling `self.makeInterceptors()`. + func makeGetFlightInfoInterceptors() -> [ServerInterceptor] - internal static let listFlights = GRPCMethodDescriptor( - name: "ListFlights", - path: "/arrow.flight.protocol.FlightService/ListFlights", - type: GRPCCallType.serverStreaming - ) + /// - Returns: Interceptors to use when handling 'getSchema'. + /// Defaults to calling `self.makeInterceptors()`. + func makeGetSchemaInterceptors() -> [ServerInterceptor] - internal static let getFlightInfo = GRPCMethodDescriptor( - name: "GetFlightInfo", - path: "/arrow.flight.protocol.FlightService/GetFlightInfo", - type: GRPCCallType.unary - ) + /// - Returns: Interceptors to use when handling 'doGet'. + /// Defaults to calling `self.makeInterceptors()`. + func makeDoGetInterceptors() -> [ServerInterceptor] - internal static let getSchema = GRPCMethodDescriptor( - name: "GetSchema", - path: "/arrow.flight.protocol.FlightService/GetSchema", - type: GRPCCallType.unary - ) + /// - Returns: Interceptors to use when handling 'doPut'. + /// Defaults to calling `self.makeInterceptors()`. + func makeDoPutInterceptors() -> [ServerInterceptor] - internal static let doGet = GRPCMethodDescriptor( - name: "DoGet", - path: "/arrow.flight.protocol.FlightService/DoGet", - type: GRPCCallType.serverStreaming - ) + /// - Returns: Interceptors to use when handling 'doExchange'. + /// Defaults to calling `self.makeInterceptors()`. + func makeDoExchangeInterceptors() -> [ServerInterceptor] - internal static let doPut = GRPCMethodDescriptor( - name: "DoPut", - path: "/arrow.flight.protocol.FlightService/DoPut", - type: GRPCCallType.bidirectionalStreaming - ) + /// - Returns: Interceptors to use when handling 'doAction'. + /// Defaults to calling `self.makeInterceptors()`. + func makeDoActionInterceptors() -> [ServerInterceptor] - internal static let doExchange = GRPCMethodDescriptor( - name: "DoExchange", - path: "/arrow.flight.protocol.FlightService/DoExchange", - type: GRPCCallType.bidirectionalStreaming - ) + /// - Returns: Interceptors to use when handling 'listActions'. + /// Defaults to calling `self.makeInterceptors()`. + func makeListActionsInterceptors() -> [ServerInterceptor] +} - internal static let doAction = GRPCMethodDescriptor( - name: "DoAction", - path: "/arrow.flight.protocol.FlightService/DoAction", - type: GRPCCallType.serverStreaming +enum Arrow_Flight_Protocol_FlightServiceServerMetadata { + static let serviceDescriptor = GRPCServiceDescriptor( + name: "FlightService", + fullName: "arrow.flight.protocol.FlightService", + methods: [ + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.handshake, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listFlights, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.getFlightInfo, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.getSchema, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doGet, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doPut, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doExchange, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doAction, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listActions, + ] ) - internal static let listActions = GRPCMethodDescriptor( - name: "ListActions", - path: "/arrow.flight.protocol.FlightService/ListActions", - type: GRPCCallType.serverStreaming - ) - } + enum Methods { + static let handshake = GRPCMethodDescriptor( + name: "Handshake", + path: "/arrow.flight.protocol.FlightService/Handshake", + type: GRPCCallType.bidirectionalStreaming + ) + + static let listFlights = GRPCMethodDescriptor( + name: "ListFlights", + path: "/arrow.flight.protocol.FlightService/ListFlights", + type: GRPCCallType.serverStreaming + ) + + static let getFlightInfo = GRPCMethodDescriptor( + name: "GetFlightInfo", + path: "/arrow.flight.protocol.FlightService/GetFlightInfo", + type: GRPCCallType.unary + ) + + static let getSchema = GRPCMethodDescriptor( + name: "GetSchema", + path: "/arrow.flight.protocol.FlightService/GetSchema", + type: GRPCCallType.unary + ) + + static let doGet = GRPCMethodDescriptor( + name: "DoGet", + path: "/arrow.flight.protocol.FlightService/DoGet", + type: GRPCCallType.serverStreaming + ) + + static let doPut = GRPCMethodDescriptor( + name: "DoPut", + path: "/arrow.flight.protocol.FlightService/DoPut", + type: GRPCCallType.bidirectionalStreaming + ) + + static let doExchange = GRPCMethodDescriptor( + name: "DoExchange", + path: "/arrow.flight.protocol.FlightService/DoExchange", + type: GRPCCallType.bidirectionalStreaming + ) + + static let doAction = GRPCMethodDescriptor( + name: "DoAction", + path: "/arrow.flight.protocol.FlightService/DoAction", + type: GRPCCallType.serverStreaming + ) + + static let listActions = GRPCMethodDescriptor( + name: "ListActions", + path: "/arrow.flight.protocol.FlightService/ListActions", + type: GRPCCallType.serverStreaming + ) + } } diff --git a/Sources/ArrowFlight/Flight.pb.swift b/Sources/ArrowFlight/Flight.pb.swift index b50d406..175bd86 100644 --- a/Sources/ArrowFlight/Flight.pb.swift +++ b/Sources/ArrowFlight/Flight.pb.swift @@ -32,9 +32,9 @@ import SwiftProtobuf // incompatible with the version of SwiftProtobuf to which you are linking. // Please ensure that you are building against the same version of the API // that was used to generate this file. -fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { - struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} - typealias Version = _2 +private struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 } /// @@ -42,176 +42,175 @@ fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAP /// /// This is used by CancelFlightInfoResult.status. enum Arrow_Flight_Protocol_CancelStatus: SwiftProtobuf.Enum { - typealias RawValue = Int - - /// The cancellation status is unknown. Servers should avoid using - /// this value (send a NOT_FOUND error if the requested query is - /// not known). Clients can retry the request. - case unspecified // = 0 - - /// The cancellation request is complete. Subsequent requests with - /// the same payload may return CANCELLED or a NOT_FOUND error. - case cancelled // = 1 - - /// The cancellation request is in progress. The client may retry - /// the cancellation request. - case cancelling // = 2 - - /// The query is not cancellable. The client should not retry the - /// cancellation request. - case notCancellable // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .unspecified - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .unspecified - case 1: self = .cancelled - case 2: self = .cancelling - case 3: self = .notCancellable - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .unspecified: return 0 - case .cancelled: return 1 - case .cancelling: return 2 - case .notCancellable: return 3 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + + /// The cancellation status is unknown. Servers should avoid using + /// this value (send a NOT_FOUND error if the requested query is + /// not known). Clients can retry the request. + case unspecified // = 0 + + /// The cancellation request is complete. Subsequent requests with + /// the same payload may return CANCELLED or a NOT_FOUND error. + case cancelled // = 1 + + /// The cancellation request is in progress. The client may retry + /// the cancellation request. + case cancelling // = 2 + + /// The query is not cancellable. The client should not retry the + /// cancellation request. + case notCancellable // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .cancelled + case 2: self = .cancelling + case 3: self = .notCancellable + default: self = .UNRECOGNIZED(rawValue) + } + } + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .cancelled: return 1 + case .cancelling: return 2 + case .notCancellable: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_CancelStatus: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_CancelStatus] = [ - .unspecified, - .cancelled, - .cancelling, - .notCancellable, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_CancelStatus] = [ + .unspecified, + .cancelled, + .cancelling, + .notCancellable, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) /// /// The request that a client provides to a server on handshake. struct Arrow_Flight_Protocol_HandshakeRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// - /// A defined protocol version - var protocolVersion: UInt64 = 0 + /// + /// A defined protocol version + var protocolVersion: UInt64 = 0 - /// - /// Arbitrary auth/handshake info. - var payload: Data = Data() + /// + /// Arbitrary auth/handshake info. + var payload: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } struct Arrow_Flight_Protocol_HandshakeResponse { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// - /// A defined protocol version - var protocolVersion: UInt64 = 0 + /// + /// A defined protocol version + var protocolVersion: UInt64 = 0 - /// - /// Arbitrary auth/handshake info. - var payload: Data = Data() + /// + /// Arbitrary auth/handshake info. + var payload: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// A message for doing simple auth. struct Arrow_Flight_Protocol_BasicAuth { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var username: String = String() + var username: String = .init() - var password: String = String() + var password: String = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } struct Arrow_Flight_Protocol_Empty { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Describes an available action, including both the name used for execution /// along with a short description of the purpose of the action. struct Arrow_Flight_Protocol_ActionType { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var type: String = String() + var type: String = .init() - var description_p: String = String() + var description_p: String = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// A service specific expression that can be used to return a limited set /// of available Arrow Flight streams. struct Arrow_Flight_Protocol_Criteria { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var expression: Data = Data() + var expression: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// An opaque action specific for the service. struct Arrow_Flight_Protocol_Action { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var type: String = String() + var type: String = .init() - var body: Data = Data() + var body: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -219,24 +218,25 @@ struct Arrow_Flight_Protocol_Action { /// /// The request should be stored in Action.body. struct Arrow_Flight_Protocol_CancelFlightInfoRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var info: Arrow_Flight_Protocol_FlightInfo { - get {return _info ?? Arrow_Flight_Protocol_FlightInfo()} - set {_info = newValue} - } - /// Returns true if `info` has been explicitly set. - var hasInfo: Bool {return self._info != nil} - /// Clears the value of `info`. Subsequent reads from it will return its default value. - mutating func clearInfo() {self._info = nil} + var info: Arrow_Flight_Protocol_FlightInfo { + get { return _info ?? Arrow_Flight_Protocol_FlightInfo() } + set { _info = newValue } + } + + /// Returns true if `info` has been explicitly set. + var hasInfo: Bool { return _info != nil } + /// Clears the value of `info`. Subsequent reads from it will return its default value. + mutating func clearInfo() { _info = nil } - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} - fileprivate var _info: Arrow_Flight_Protocol_FlightInfo? = nil + fileprivate var _info: Arrow_Flight_Protocol_FlightInfo? } /// @@ -244,38 +244,39 @@ struct Arrow_Flight_Protocol_CancelFlightInfoRequest { /// /// The request should be stored in Action.body. struct Arrow_Flight_Protocol_RenewFlightEndpointRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var endpoint: Arrow_Flight_Protocol_FlightEndpoint { - get {return _endpoint ?? Arrow_Flight_Protocol_FlightEndpoint()} - set {_endpoint = newValue} - } - /// Returns true if `endpoint` has been explicitly set. - var hasEndpoint: Bool {return self._endpoint != nil} - /// Clears the value of `endpoint`. Subsequent reads from it will return its default value. - mutating func clearEndpoint() {self._endpoint = nil} + var endpoint: Arrow_Flight_Protocol_FlightEndpoint { + get { return _endpoint ?? Arrow_Flight_Protocol_FlightEndpoint() } + set { _endpoint = newValue } + } - var unknownFields = SwiftProtobuf.UnknownStorage() + /// Returns true if `endpoint` has been explicitly set. + var hasEndpoint: Bool { return _endpoint != nil } + /// Clears the value of `endpoint`. Subsequent reads from it will return its default value. + mutating func clearEndpoint() { _endpoint = nil } - init() {} + var unknownFields = SwiftProtobuf.UnknownStorage() - fileprivate var _endpoint: Arrow_Flight_Protocol_FlightEndpoint? = nil + init() {} + + fileprivate var _endpoint: Arrow_Flight_Protocol_FlightEndpoint? } /// /// An opaque result returned after executing an action. struct Arrow_Flight_Protocol_Result { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var body: Data = Data() + var body: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -283,251 +284,253 @@ struct Arrow_Flight_Protocol_Result { /// /// The result should be stored in Result.body. struct Arrow_Flight_Protocol_CancelFlightInfoResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var status: Arrow_Flight_Protocol_CancelStatus = .unspecified + var status: Arrow_Flight_Protocol_CancelStatus = .unspecified - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Wrap the result of a getSchema call struct Arrow_Flight_Protocol_SchemaResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The schema of the dataset in its IPC form: - /// 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix - /// 4 bytes - the byte length of the payload - /// a flatbuffer Message whose header is the Schema - var schema: Data = Data() + /// The schema of the dataset in its IPC form: + /// 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix + /// 4 bytes - the byte length of the payload + /// a flatbuffer Message whose header is the Schema + var schema: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// The name or tag for a Flight. May be used as a way to retrieve or generate /// a flight or be used to expose a set of previously defined flights. struct Arrow_Flight_Protocol_FlightDescriptor { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - var type: Arrow_Flight_Protocol_FlightDescriptor.DescriptorType = .unknown + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// - /// Opaque value used to express a command. Should only be defined when - /// type = CMD. - var cmd: Data = Data() - - /// - /// List of strings identifying a particular dataset. Should only be defined - /// when type = PATH. - var path: [String] = [] - - var unknownFields = SwiftProtobuf.UnknownStorage() - - /// - /// Describes what type of descriptor is defined. - enum DescriptorType: SwiftProtobuf.Enum { - typealias RawValue = Int - - /// Protobuf pattern, not used. - case unknown // = 0 + var type: Arrow_Flight_Protocol_FlightDescriptor.DescriptorType = .unknown /// - /// A named path that identifies a dataset. A path is composed of a string - /// or list of strings describing a particular dataset. This is conceptually - /// similar to a path inside a filesystem. - case path // = 1 + /// Opaque value used to express a command. Should only be defined when + /// type = CMD. + var cmd: Data = .init() /// - /// An opaque command to generate a dataset. - case cmd // = 2 - case UNRECOGNIZED(Int) + /// List of strings identifying a particular dataset. Should only be defined + /// when type = PATH. + var path: [String] = [] - init() { - self = .unknown - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .unknown - case 1: self = .path - case 2: self = .cmd - default: self = .UNRECOGNIZED(rawValue) - } - } + var unknownFields = SwiftProtobuf.UnknownStorage() - var rawValue: Int { - switch self { - case .unknown: return 0 - case .path: return 1 - case .cmd: return 2 - case .UNRECOGNIZED(let i): return i - } + /// + /// Describes what type of descriptor is defined. + enum DescriptorType: SwiftProtobuf.Enum { + typealias RawValue = Int + + /// Protobuf pattern, not used. + case unknown // = 0 + + /// + /// A named path that identifies a dataset. A path is composed of a string + /// or list of strings describing a particular dataset. This is conceptually + /// similar to a path inside a filesystem. + case path // = 1 + + /// + /// An opaque command to generate a dataset. + case cmd // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unknown + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unknown + case 1: self = .path + case 2: self = .cmd + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unknown: return 0 + case .path: return 1 + case .cmd: return 2 + case let .UNRECOGNIZED(i): return i + } + } } - } - - init() {} + init() {} } #if swift(>=4.2) extension Arrow_Flight_Protocol_FlightDescriptor.DescriptorType: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_FlightDescriptor.DescriptorType] = [ - .unknown, - .path, - .cmd, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_FlightDescriptor.DescriptorType] = [ + .unknown, + .path, + .cmd, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) /// /// The access coordinates for retrieval of a dataset. With a FlightInfo, a /// consumer is able to determine how to retrieve a dataset. struct Arrow_Flight_Protocol_FlightInfo { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// The schema of the dataset in its IPC form: - /// 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix - /// 4 bytes - the byte length of the payload - /// a flatbuffer Message whose header is the Schema - var schema: Data = Data() - - /// - /// The descriptor associated with this info. - var flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor { - get {return _flightDescriptor ?? Arrow_Flight_Protocol_FlightDescriptor()} - set {_flightDescriptor = newValue} - } - /// Returns true if `flightDescriptor` has been explicitly set. - var hasFlightDescriptor: Bool {return self._flightDescriptor != nil} - /// Clears the value of `flightDescriptor`. Subsequent reads from it will return its default value. - mutating func clearFlightDescriptor() {self._flightDescriptor = nil} - - /// - /// A list of endpoints associated with the flight. To consume the - /// whole flight, all endpoints (and hence all Tickets) must be - /// consumed. Endpoints can be consumed in any order. - /// - /// In other words, an application can use multiple endpoints to - /// represent partitioned data. - /// - /// If the returned data has an ordering, an application can use - /// "FlightInfo.ordered = true" or should return the all data in a - /// single endpoint. Otherwise, there is no ordering defined on - /// endpoints or the data within. - /// - /// A client can read ordered data by reading data from returned - /// endpoints, in order, from front to back. - /// - /// Note that a client may ignore "FlightInfo.ordered = true". If an - /// ordering is important for an application, an application must - /// choose one of them: - /// - /// * An application requires that all clients must read data in - /// returned endpoints order. - /// * An application must return the all data in a single endpoint. - var endpoint: [Arrow_Flight_Protocol_FlightEndpoint] = [] - - /// Set these to -1 if unknown. - var totalRecords: Int64 = 0 - - var totalBytes: Int64 = 0 - - /// - /// FlightEndpoints are in the same order as the data. - var ordered: Bool = false - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The schema of the dataset in its IPC form: + /// 4 bytes - an optional IPC_CONTINUATION_TOKEN prefix + /// 4 bytes - the byte length of the payload + /// a flatbuffer Message whose header is the Schema + var schema: Data = .init() + + /// + /// The descriptor associated with this info. + var flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor { + get { return _flightDescriptor ?? Arrow_Flight_Protocol_FlightDescriptor() } + set { _flightDescriptor = newValue } + } + + /// Returns true if `flightDescriptor` has been explicitly set. + var hasFlightDescriptor: Bool { return _flightDescriptor != nil } + /// Clears the value of `flightDescriptor`. Subsequent reads from it will return its default value. + mutating func clearFlightDescriptor() { _flightDescriptor = nil } + + /// + /// A list of endpoints associated with the flight. To consume the + /// whole flight, all endpoints (and hence all Tickets) must be + /// consumed. Endpoints can be consumed in any order. + /// + /// In other words, an application can use multiple endpoints to + /// represent partitioned data. + /// + /// If the returned data has an ordering, an application can use + /// "FlightInfo.ordered = true" or should return the all data in a + /// single endpoint. Otherwise, there is no ordering defined on + /// endpoints or the data within. + /// + /// A client can read ordered data by reading data from returned + /// endpoints, in order, from front to back. + /// + /// Note that a client may ignore "FlightInfo.ordered = true". If an + /// ordering is important for an application, an application must + /// choose one of them: + /// + /// * An application requires that all clients must read data in + /// returned endpoints order. + /// * An application must return the all data in a single endpoint. + var endpoint: [Arrow_Flight_Protocol_FlightEndpoint] = [] + + /// Set these to -1 if unknown. + var totalRecords: Int64 = 0 + + var totalBytes: Int64 = 0 + + /// + /// FlightEndpoints are in the same order as the data. + var ordered: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? } /// /// A particular stream or split associated with a flight. struct Arrow_Flight_Protocol_FlightEndpoint { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Token used to retrieve this stream. - var ticket: Arrow_Flight_Protocol_Ticket { - get {return _ticket ?? Arrow_Flight_Protocol_Ticket()} - set {_ticket = newValue} - } - /// Returns true if `ticket` has been explicitly set. - var hasTicket: Bool {return self._ticket != nil} - /// Clears the value of `ticket`. Subsequent reads from it will return its default value. - mutating func clearTicket() {self._ticket = nil} - - /// - /// A list of URIs where this ticket can be redeemed via DoGet(). - /// - /// If the list is empty, the expectation is that the ticket can only - /// be redeemed on the current service where the ticket was - /// generated. - /// - /// If the list is not empty, the expectation is that the ticket can - /// be redeemed at any of the locations, and that the data returned - /// will be equivalent. In this case, the ticket may only be redeemed - /// at one of the given locations, and not (necessarily) on the - /// current service. - /// - /// In other words, an application can use multiple locations to - /// represent redundant and/or load balanced services. - var location: [Arrow_Flight_Protocol_Location] = [] - - /// - /// Expiration time of this stream. If present, clients may assume - /// they can retry DoGet requests. Otherwise, it is - /// application-defined whether DoGet requests may be retried. - var expirationTime: SwiftProtobuf.Google_Protobuf_Timestamp { - get {return _expirationTime ?? SwiftProtobuf.Google_Protobuf_Timestamp()} - set {_expirationTime = newValue} - } - /// Returns true if `expirationTime` has been explicitly set. - var hasExpirationTime: Bool {return self._expirationTime != nil} - /// Clears the value of `expirationTime`. Subsequent reads from it will return its default value. - mutating func clearExpirationTime() {self._expirationTime = nil} - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _ticket: Arrow_Flight_Protocol_Ticket? = nil - fileprivate var _expirationTime: SwiftProtobuf.Google_Protobuf_Timestamp? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Token used to retrieve this stream. + var ticket: Arrow_Flight_Protocol_Ticket { + get { return _ticket ?? Arrow_Flight_Protocol_Ticket() } + set { _ticket = newValue } + } + + /// Returns true if `ticket` has been explicitly set. + var hasTicket: Bool { return _ticket != nil } + /// Clears the value of `ticket`. Subsequent reads from it will return its default value. + mutating func clearTicket() { _ticket = nil } + + /// + /// A list of URIs where this ticket can be redeemed via DoGet(). + /// + /// If the list is empty, the expectation is that the ticket can only + /// be redeemed on the current service where the ticket was + /// generated. + /// + /// If the list is not empty, the expectation is that the ticket can + /// be redeemed at any of the locations, and that the data returned + /// will be equivalent. In this case, the ticket may only be redeemed + /// at one of the given locations, and not (necessarily) on the + /// current service. + /// + /// In other words, an application can use multiple locations to + /// represent redundant and/or load balanced services. + var location: [Arrow_Flight_Protocol_Location] = [] + + /// + /// Expiration time of this stream. If present, clients may assume + /// they can retry DoGet requests. Otherwise, it is + /// application-defined whether DoGet requests may be retried. + var expirationTime: SwiftProtobuf.Google_Protobuf_Timestamp { + get { return _expirationTime ?? SwiftProtobuf.Google_Protobuf_Timestamp() } + set { _expirationTime = newValue } + } + + /// Returns true if `expirationTime` has been explicitly set. + var hasExpirationTime: Bool { return _expirationTime != nil } + /// Clears the value of `expirationTime`. Subsequent reads from it will return its default value. + mutating func clearExpirationTime() { _expirationTime = nil } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _ticket: Arrow_Flight_Protocol_Ticket? + fileprivate var _expirationTime: SwiftProtobuf.Google_Protobuf_Timestamp? } /// /// A location where a Flight service will accept retrieval of a particular /// stream given a ticket. struct Arrow_Flight_Protocol_Location { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var uri: String = String() + var uri: String = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -537,70 +540,71 @@ struct Arrow_Flight_Protocol_Location { /// Tickets are meant to be single use. It is an error/application-defined /// behavior to reuse a ticket. struct Arrow_Flight_Protocol_Ticket { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var ticket: Data = Data() + var ticket: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// A batch of Arrow data as part of a stream of batches. struct Arrow_Flight_Protocol_FlightData { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// The descriptor of the data. This is only relevant when a client is - /// starting a new DoPut stream. - var flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor { - get {return _flightDescriptor ?? Arrow_Flight_Protocol_FlightDescriptor()} - set {_flightDescriptor = newValue} - } - /// Returns true if `flightDescriptor` has been explicitly set. - var hasFlightDescriptor: Bool {return self._flightDescriptor != nil} - /// Clears the value of `flightDescriptor`. Subsequent reads from it will return its default value. - mutating func clearFlightDescriptor() {self._flightDescriptor = nil} - - /// - /// Header for message data as described in Message.fbs::Message. - var dataHeader: Data = Data() - - /// - /// Application-defined metadata. - var appMetadata: Data = Data() - - /// - /// The actual batch of Arrow data. Preferably handled with minimal-copies - /// coming last in the definition to help with sidecar patterns (it is - /// expected that some implementations will fetch this field off the wire - /// with specialized code to avoid extra memory copies). - var dataBody: Data = Data() - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// The descriptor of the data. This is only relevant when a client is + /// starting a new DoPut stream. + var flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor { + get { return _flightDescriptor ?? Arrow_Flight_Protocol_FlightDescriptor() } + set { _flightDescriptor = newValue } + } + + /// Returns true if `flightDescriptor` has been explicitly set. + var hasFlightDescriptor: Bool { return _flightDescriptor != nil } + /// Clears the value of `flightDescriptor`. Subsequent reads from it will return its default value. + mutating func clearFlightDescriptor() { _flightDescriptor = nil } + + /// + /// Header for message data as described in Message.fbs::Message. + var dataHeader: Data = .init() + + /// + /// Application-defined metadata. + var appMetadata: Data = .init() + + /// + /// The actual batch of Arrow data. Preferably handled with minimal-copies + /// coming last in the definition to help with sidecar patterns (it is + /// expected that some implementations will fetch this field off the wire + /// with specialized code to avoid extra memory copies). + var dataBody: Data = .init() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? } -///* +/// * /// The response message associated with the submission of a DoPut. struct Arrow_Flight_Protocol_PutResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var appMetadata: Data = Data() + var appMetadata: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } #if swift(>=5.5) && canImport(_Concurrency) @@ -625,742 +629,741 @@ extension Arrow_Flight_Protocol_Location: @unchecked Sendable {} extension Arrow_Flight_Protocol_Ticket: @unchecked Sendable {} extension Arrow_Flight_Protocol_FlightData: @unchecked Sendable {} extension Arrow_Flight_Protocol_PutResult: @unchecked Sendable {} -#endif // swift(>=5.5) && canImport(_Concurrency) +#endif // swift(>=5.5) && canImport(_Concurrency) // MARK: - Code below here is support for the SwiftProtobuf runtime. -fileprivate let _protobuf_package = "arrow.flight.protocol" +private let _protobuf_package = "arrow.flight.protocol" extension Arrow_Flight_Protocol_CancelStatus: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "CANCEL_STATUS_UNSPECIFIED"), - 1: .same(proto: "CANCEL_STATUS_CANCELLED"), - 2: .same(proto: "CANCEL_STATUS_CANCELLING"), - 3: .same(proto: "CANCEL_STATUS_NOT_CANCELLABLE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "CANCEL_STATUS_UNSPECIFIED"), + 1: .same(proto: "CANCEL_STATUS_CANCELLED"), + 2: .same(proto: "CANCEL_STATUS_CANCELLING"), + 3: .same(proto: "CANCEL_STATUS_NOT_CANCELLABLE"), + ] } extension Arrow_Flight_Protocol_HandshakeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".HandshakeRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "protocol_version"), - 2: .same(proto: "payload"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.protocolVersion) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if self.protocolVersion != 0 { - try visitor.visitSingularUInt64Field(value: self.protocolVersion, fieldNumber: 1) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_HandshakeRequest, rhs: Arrow_Flight_Protocol_HandshakeRequest) -> Bool { - if lhs.protocolVersion != rhs.protocolVersion {return false} - if lhs.payload != rhs.payload {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".HandshakeRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "protocol_version"), + 2: .same(proto: "payload"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularUInt64Field(value: &protocolVersion) + case 2: try decoder.decodeSingularBytesField(value: &payload) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if protocolVersion != 0 { + try visitor.visitSingularUInt64Field(value: protocolVersion, fieldNumber: 1) + } + if !payload.isEmpty { + try visitor.visitSingularBytesField(value: payload, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_HandshakeRequest, rhs: Arrow_Flight_Protocol_HandshakeRequest) -> Bool { + if lhs.protocolVersion != rhs.protocolVersion { return false } + if lhs.payload != rhs.payload { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_HandshakeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".HandshakeResponse" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "protocol_version"), - 2: .same(proto: "payload"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.protocolVersion) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if self.protocolVersion != 0 { - try visitor.visitSingularUInt64Field(value: self.protocolVersion, fieldNumber: 1) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_HandshakeResponse, rhs: Arrow_Flight_Protocol_HandshakeResponse) -> Bool { - if lhs.protocolVersion != rhs.protocolVersion {return false} - if lhs.payload != rhs.payload {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".HandshakeResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "protocol_version"), + 2: .same(proto: "payload"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularUInt64Field(value: &protocolVersion) + case 2: try decoder.decodeSingularBytesField(value: &payload) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if protocolVersion != 0 { + try visitor.visitSingularUInt64Field(value: protocolVersion, fieldNumber: 1) + } + if !payload.isEmpty { + try visitor.visitSingularBytesField(value: payload, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_HandshakeResponse, rhs: Arrow_Flight_Protocol_HandshakeResponse) -> Bool { + if lhs.protocolVersion != rhs.protocolVersion { return false } + if lhs.payload != rhs.payload { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_BasicAuth: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".BasicAuth" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 2: .same(proto: "username"), - 3: .same(proto: "password"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 2: try { try decoder.decodeSingularStringField(value: &self.username) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.password) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.username.isEmpty { - try visitor.visitSingularStringField(value: self.username, fieldNumber: 2) - } - if !self.password.isEmpty { - try visitor.visitSingularStringField(value: self.password, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_BasicAuth, rhs: Arrow_Flight_Protocol_BasicAuth) -> Bool { - if lhs.username != rhs.username {return false} - if lhs.password != rhs.password {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".BasicAuth" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 2: .same(proto: "username"), + 3: .same(proto: "password"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 2: try decoder.decodeSingularStringField(value: &username) + case 3: try decoder.decodeSingularStringField(value: &password) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !username.isEmpty { + try visitor.visitSingularStringField(value: username, fieldNumber: 2) + } + if !password.isEmpty { + try visitor.visitSingularStringField(value: password, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_BasicAuth, rhs: Arrow_Flight_Protocol_BasicAuth) -> Bool { + if lhs.username != rhs.username { return false } + if lhs.password != rhs.password { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Empty: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Empty" - static let _protobuf_nameMap = SwiftProtobuf._NameMap() + static let protoMessageName: String = _protobuf_package + ".Empty" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() - mutating func decodeMessage(decoder: inout D) throws { - while let _ = try decoder.nextFieldNumber() { + mutating func decodeMessage(decoder: inout D) throws { + while let _ = try decoder.nextFieldNumber() {} } - } - func traverse(visitor: inout V) throws { - try unknownFields.traverse(visitor: &visitor) - } + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } - static func ==(lhs: Arrow_Flight_Protocol_Empty, rhs: Arrow_Flight_Protocol_Empty) -> Bool { - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Empty, rhs: Arrow_Flight_Protocol_Empty) -> Bool { + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_ActionType: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionType" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "type"), - 2: .same(proto: "description"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.type) }() - case 2: try { try decoder.decodeSingularStringField(value: &self.description_p) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.type.isEmpty { - try visitor.visitSingularStringField(value: self.type, fieldNumber: 1) - } - if !self.description_p.isEmpty { - try visitor.visitSingularStringField(value: self.description_p, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_ActionType, rhs: Arrow_Flight_Protocol_ActionType) -> Bool { - if lhs.type != rhs.type {return false} - if lhs.description_p != rhs.description_p {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionType" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "type"), + 2: .same(proto: "description"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &type) + case 2: try decoder.decodeSingularStringField(value: &description_p) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !type.isEmpty { + try visitor.visitSingularStringField(value: type, fieldNumber: 1) + } + if !description_p.isEmpty { + try visitor.visitSingularStringField(value: description_p, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_ActionType, rhs: Arrow_Flight_Protocol_ActionType) -> Bool { + if lhs.type != rhs.type { return false } + if lhs.description_p != rhs.description_p { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Criteria: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Criteria" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "expression"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.expression) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.expression.isEmpty { - try visitor.visitSingularBytesField(value: self.expression, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Criteria, rhs: Arrow_Flight_Protocol_Criteria) -> Bool { - if lhs.expression != rhs.expression {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".Criteria" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "expression"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &expression) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !expression.isEmpty { + try visitor.visitSingularBytesField(value: expression, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Criteria, rhs: Arrow_Flight_Protocol_Criteria) -> Bool { + if lhs.expression != rhs.expression { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Action: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Action" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "type"), - 2: .same(proto: "body"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.type) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.body) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.type.isEmpty { - try visitor.visitSingularStringField(value: self.type, fieldNumber: 1) - } - if !self.body.isEmpty { - try visitor.visitSingularBytesField(value: self.body, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Action, rhs: Arrow_Flight_Protocol_Action) -> Bool { - if lhs.type != rhs.type {return false} - if lhs.body != rhs.body {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".Action" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "type"), + 2: .same(proto: "body"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &type) + case 2: try decoder.decodeSingularBytesField(value: &body) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !type.isEmpty { + try visitor.visitSingularStringField(value: type, fieldNumber: 1) + } + if !body.isEmpty { + try visitor.visitSingularBytesField(value: body, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Action, rhs: Arrow_Flight_Protocol_Action) -> Bool { + if lhs.type != rhs.type { return false } + if lhs.body != rhs.body { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_CancelFlightInfoRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._info) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._info { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_CancelFlightInfoRequest, rhs: Arrow_Flight_Protocol_CancelFlightInfoRequest) -> Bool { - if lhs._info != rhs._info {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "info"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_info) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._info { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_CancelFlightInfoRequest, rhs: Arrow_Flight_Protocol_CancelFlightInfoRequest) -> Bool { + if lhs._info != rhs._info { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_RenewFlightEndpointRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".RenewFlightEndpointRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "endpoint"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._endpoint) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._endpoint { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_RenewFlightEndpointRequest, rhs: Arrow_Flight_Protocol_RenewFlightEndpointRequest) -> Bool { - if lhs._endpoint != rhs._endpoint {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".RenewFlightEndpointRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "endpoint"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_endpoint) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._endpoint { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_RenewFlightEndpointRequest, rhs: Arrow_Flight_Protocol_RenewFlightEndpointRequest) -> Bool { + if lhs._endpoint != rhs._endpoint { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Result: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Result" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "body"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.body) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.body.isEmpty { - try visitor.visitSingularBytesField(value: self.body, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Result, rhs: Arrow_Flight_Protocol_Result) -> Bool { - if lhs.body != rhs.body {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".Result" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "body"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &body) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !body.isEmpty { + try visitor.visitSingularBytesField(value: body, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Result, rhs: Arrow_Flight_Protocol_Result) -> Bool { + if lhs.body != rhs.body { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_CancelFlightInfoResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "status"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularEnumField(value: &self.status) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if self.status != .unspecified { - try visitor.visitSingularEnumField(value: self.status, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_CancelFlightInfoResult, rhs: Arrow_Flight_Protocol_CancelFlightInfoResult) -> Bool { - if lhs.status != rhs.status {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "status"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularEnumField(value: &status) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if status != .unspecified { + try visitor.visitSingularEnumField(value: status, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_CancelFlightInfoResult, rhs: Arrow_Flight_Protocol_CancelFlightInfoResult) -> Bool { + if lhs.status != rhs.status { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_SchemaResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".SchemaResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "schema"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.schema) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.schema.isEmpty { - try visitor.visitSingularBytesField(value: self.schema, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_SchemaResult, rhs: Arrow_Flight_Protocol_SchemaResult) -> Bool { - if lhs.schema != rhs.schema {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".SchemaResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &schema) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !schema.isEmpty { + try visitor.visitSingularBytesField(value: schema, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_SchemaResult, rhs: Arrow_Flight_Protocol_SchemaResult) -> Bool { + if lhs.schema != rhs.schema { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_FlightDescriptor: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".FlightDescriptor" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "type"), - 2: .same(proto: "cmd"), - 3: .same(proto: "path"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularEnumField(value: &self.type) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.cmd) }() - case 3: try { try decoder.decodeRepeatedStringField(value: &self.path) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if self.type != .unknown { - try visitor.visitSingularEnumField(value: self.type, fieldNumber: 1) - } - if !self.cmd.isEmpty { - try visitor.visitSingularBytesField(value: self.cmd, fieldNumber: 2) - } - if !self.path.isEmpty { - try visitor.visitRepeatedStringField(value: self.path, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_FlightDescriptor, rhs: Arrow_Flight_Protocol_FlightDescriptor) -> Bool { - if lhs.type != rhs.type {return false} - if lhs.cmd != rhs.cmd {return false} - if lhs.path != rhs.path {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".FlightDescriptor" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "type"), + 2: .same(proto: "cmd"), + 3: .same(proto: "path"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularEnumField(value: &type) + case 2: try decoder.decodeSingularBytesField(value: &cmd) + case 3: try decoder.decodeRepeatedStringField(value: &path) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if type != .unknown { + try visitor.visitSingularEnumField(value: type, fieldNumber: 1) + } + if !cmd.isEmpty { + try visitor.visitSingularBytesField(value: cmd, fieldNumber: 2) + } + if !path.isEmpty { + try visitor.visitRepeatedStringField(value: path, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_FlightDescriptor, rhs: Arrow_Flight_Protocol_FlightDescriptor) -> Bool { + if lhs.type != rhs.type { return false } + if lhs.cmd != rhs.cmd { return false } + if lhs.path != rhs.path { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_FlightDescriptor.DescriptorType: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "UNKNOWN"), - 1: .same(proto: "PATH"), - 2: .same(proto: "CMD"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "UNKNOWN"), + 1: .same(proto: "PATH"), + 2: .same(proto: "CMD"), + ] } extension Arrow_Flight_Protocol_FlightInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".FlightInfo" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "schema"), - 2: .standard(proto: "flight_descriptor"), - 3: .same(proto: "endpoint"), - 4: .standard(proto: "total_records"), - 5: .standard(proto: "total_bytes"), - 6: .same(proto: "ordered"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.schema) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._flightDescriptor) }() - case 3: try { try decoder.decodeRepeatedMessageField(value: &self.endpoint) }() - case 4: try { try decoder.decodeSingularInt64Field(value: &self.totalRecords) }() - case 5: try { try decoder.decodeSingularInt64Field(value: &self.totalBytes) }() - case 6: try { try decoder.decodeSingularBoolField(value: &self.ordered) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - if !self.schema.isEmpty { - try visitor.visitSingularBytesField(value: self.schema, fieldNumber: 1) - } - try { if let v = self._flightDescriptor { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } }() - if !self.endpoint.isEmpty { - try visitor.visitRepeatedMessageField(value: self.endpoint, fieldNumber: 3) - } - if self.totalRecords != 0 { - try visitor.visitSingularInt64Field(value: self.totalRecords, fieldNumber: 4) - } - if self.totalBytes != 0 { - try visitor.visitSingularInt64Field(value: self.totalBytes, fieldNumber: 5) - } - if self.ordered != false { - try visitor.visitSingularBoolField(value: self.ordered, fieldNumber: 6) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_FlightInfo, rhs: Arrow_Flight_Protocol_FlightInfo) -> Bool { - if lhs.schema != rhs.schema {return false} - if lhs._flightDescriptor != rhs._flightDescriptor {return false} - if lhs.endpoint != rhs.endpoint {return false} - if lhs.totalRecords != rhs.totalRecords {return false} - if lhs.totalBytes != rhs.totalBytes {return false} - if lhs.ordered != rhs.ordered {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".FlightInfo" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "schema"), + 2: .standard(proto: "flight_descriptor"), + 3: .same(proto: "endpoint"), + 4: .standard(proto: "total_records"), + 5: .standard(proto: "total_bytes"), + 6: .same(proto: "ordered"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &schema) + case 2: try decoder.decodeSingularMessageField(value: &_flightDescriptor) + case 3: try decoder.decodeRepeatedMessageField(value: &endpoint) + case 4: try decoder.decodeSingularInt64Field(value: &totalRecords) + case 5: try decoder.decodeSingularInt64Field(value: &totalBytes) + case 6: try decoder.decodeSingularBoolField(value: &ordered) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !schema.isEmpty { + try visitor.visitSingularBytesField(value: schema, fieldNumber: 1) + } + try { if let v = self._flightDescriptor { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !endpoint.isEmpty { + try visitor.visitRepeatedMessageField(value: endpoint, fieldNumber: 3) + } + if totalRecords != 0 { + try visitor.visitSingularInt64Field(value: totalRecords, fieldNumber: 4) + } + if totalBytes != 0 { + try visitor.visitSingularInt64Field(value: totalBytes, fieldNumber: 5) + } + if ordered != false { + try visitor.visitSingularBoolField(value: ordered, fieldNumber: 6) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_FlightInfo, rhs: Arrow_Flight_Protocol_FlightInfo) -> Bool { + if lhs.schema != rhs.schema { return false } + if lhs._flightDescriptor != rhs._flightDescriptor { return false } + if lhs.endpoint != rhs.endpoint { return false } + if lhs.totalRecords != rhs.totalRecords { return false } + if lhs.totalBytes != rhs.totalBytes { return false } + if lhs.ordered != rhs.ordered { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_FlightEndpoint: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".FlightEndpoint" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "ticket"), - 2: .same(proto: "location"), - 3: .standard(proto: "expiration_time"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._ticket) }() - case 2: try { try decoder.decodeRepeatedMessageField(value: &self.location) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._expirationTime) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._ticket { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - if !self.location.isEmpty { - try visitor.visitRepeatedMessageField(value: self.location, fieldNumber: 2) - } - try { if let v = self._expirationTime { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_FlightEndpoint, rhs: Arrow_Flight_Protocol_FlightEndpoint) -> Bool { - if lhs._ticket != rhs._ticket {return false} - if lhs.location != rhs.location {return false} - if lhs._expirationTime != rhs._expirationTime {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".FlightEndpoint" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "ticket"), + 2: .same(proto: "location"), + 3: .standard(proto: "expiration_time"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_ticket) + case 2: try decoder.decodeRepeatedMessageField(value: &location) + case 3: try decoder.decodeSingularMessageField(value: &_expirationTime) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._ticket { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !location.isEmpty { + try visitor.visitRepeatedMessageField(value: location, fieldNumber: 2) + } + try { if let v = self._expirationTime { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_FlightEndpoint, rhs: Arrow_Flight_Protocol_FlightEndpoint) -> Bool { + if lhs._ticket != rhs._ticket { return false } + if lhs.location != rhs.location { return false } + if lhs._expirationTime != rhs._expirationTime { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Location: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Location" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "uri"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.uri) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.uri.isEmpty { - try visitor.visitSingularStringField(value: self.uri, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Location, rhs: Arrow_Flight_Protocol_Location) -> Bool { - if lhs.uri != rhs.uri {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".Location" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "uri"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &uri) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !uri.isEmpty { + try visitor.visitSingularStringField(value: uri, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Location, rhs: Arrow_Flight_Protocol_Location) -> Bool { + if lhs.uri != rhs.uri { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Ticket: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".Ticket" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "ticket"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.ticket) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.ticket.isEmpty { - try visitor.visitSingularBytesField(value: self.ticket, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Ticket, rhs: Arrow_Flight_Protocol_Ticket) -> Bool { - if lhs.ticket != rhs.ticket {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".Ticket" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "ticket"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &ticket) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !ticket.isEmpty { + try visitor.visitSingularBytesField(value: ticket, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Ticket, rhs: Arrow_Flight_Protocol_Ticket) -> Bool { + if lhs.ticket != rhs.ticket { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_FlightData: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".FlightData" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "flight_descriptor"), - 2: .standard(proto: "data_header"), - 3: .standard(proto: "app_metadata"), - 1000: .standard(proto: "data_body"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._flightDescriptor) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.dataHeader) }() - case 3: try { try decoder.decodeSingularBytesField(value: &self.appMetadata) }() - case 1000: try { try decoder.decodeSingularBytesField(value: &self.dataBody) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._flightDescriptor { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - if !self.dataHeader.isEmpty { - try visitor.visitSingularBytesField(value: self.dataHeader, fieldNumber: 2) - } - if !self.appMetadata.isEmpty { - try visitor.visitSingularBytesField(value: self.appMetadata, fieldNumber: 3) - } - if !self.dataBody.isEmpty { - try visitor.visitSingularBytesField(value: self.dataBody, fieldNumber: 1000) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_FlightData, rhs: Arrow_Flight_Protocol_FlightData) -> Bool { - if lhs._flightDescriptor != rhs._flightDescriptor {return false} - if lhs.dataHeader != rhs.dataHeader {return false} - if lhs.appMetadata != rhs.appMetadata {return false} - if lhs.dataBody != rhs.dataBody {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".FlightData" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "flight_descriptor"), + 2: .standard(proto: "data_header"), + 3: .standard(proto: "app_metadata"), + 1000: .standard(proto: "data_body"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_flightDescriptor) + case 2: try decoder.decodeSingularBytesField(value: &dataHeader) + case 3: try decoder.decodeSingularBytesField(value: &appMetadata) + case 1000: try decoder.decodeSingularBytesField(value: &dataBody) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._flightDescriptor { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !dataHeader.isEmpty { + try visitor.visitSingularBytesField(value: dataHeader, fieldNumber: 2) + } + if !appMetadata.isEmpty { + try visitor.visitSingularBytesField(value: appMetadata, fieldNumber: 3) + } + if !dataBody.isEmpty { + try visitor.visitSingularBytesField(value: dataBody, fieldNumber: 1000) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_FlightData, rhs: Arrow_Flight_Protocol_FlightData) -> Bool { + if lhs._flightDescriptor != rhs._flightDescriptor { return false } + if lhs.dataHeader != rhs.dataHeader { return false } + if lhs.appMetadata != rhs.appMetadata { return false } + if lhs.dataBody != rhs.dataBody { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_PutResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".PutResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "app_metadata"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.appMetadata) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.appMetadata.isEmpty { - try visitor.visitSingularBytesField(value: self.appMetadata, fieldNumber: 1) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_PutResult, rhs: Arrow_Flight_Protocol_PutResult) -> Bool { - if lhs.appMetadata != rhs.appMetadata {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".PutResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "app_metadata"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &appMetadata) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !appMetadata.isEmpty { + try visitor.visitSingularBytesField(value: appMetadata, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_PutResult, rhs: Arrow_Flight_Protocol_PutResult) -> Bool { + if lhs.appMetadata != rhs.appMetadata { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } diff --git a/Sources/ArrowFlight/FlightAction.swift b/Sources/ArrowFlight/FlightAction.swift index 8db12aa..60d2791 100644 --- a/Sources/ArrowFlight/FlightAction.swift +++ b/Sources/ArrowFlight/FlightAction.swift @@ -21,8 +21,8 @@ public class FlightAction { public let type: String public let body: Data init(_ action: Arrow_Flight_Protocol_Action) { - self.type = action.type - self.body = action.body + type = action.type + body = action.body } public init(_ type: String, body: Data = Data()) { @@ -32,8 +32,8 @@ public class FlightAction { func toProtocol() -> Arrow_Flight_Protocol_Action { var flightAction = Arrow_Flight_Protocol_Action() - flightAction.type = self.type - flightAction.body = self.body + flightAction.type = type + flightAction.body = body return flightAction } } diff --git a/Sources/ArrowFlight/FlightActionType.swift b/Sources/ArrowFlight/FlightActionType.swift index 0b4778c..9099cfd 100644 --- a/Sources/ArrowFlight/FlightActionType.swift +++ b/Sources/ArrowFlight/FlightActionType.swift @@ -20,10 +20,10 @@ public class FlightActionType { public let type: String public let description: String init(_ actionType: Arrow_Flight_Protocol_ActionType) { - self.type = actionType.type - self.description = actionType.description_p - + type = actionType.type + description = actionType.description_p } + public init(_ type: String, description: String) { self.type = type self.description = description @@ -31,8 +31,8 @@ public class FlightActionType { func toProtocol() -> Arrow_Flight_Protocol_ActionType { var actionType = Arrow_Flight_Protocol_ActionType() - actionType.type = self.type - actionType.description_p = self.description + actionType.type = type + actionType.description_p = description return actionType } } diff --git a/Sources/ArrowFlight/FlightClient.swift b/Sources/ArrowFlight/FlightClient.swift index ef3e4fa..7f061f2 100644 --- a/Sources/ArrowFlight/FlightClient.swift +++ b/Sources/ArrowFlight/FlightClient.swift @@ -15,18 +15,18 @@ // specific language governing permissions and limitations // under the License. +import Arrow import struct Foundation.Data import struct Foundation.URL import GRPC import NIOCore import NIOPosix -import Arrow public class FlightClient { let client: Arrow_Flight_Protocol_FlightServiceAsyncClient let allowReadingUnalignedBuffers: Bool - public init(channel: GRPCChannel, allowReadingUnalignedBuffers: Bool = false ) { + public init(channel: GRPCChannel, allowReadingUnalignedBuffers: Bool = false) { client = Arrow_Flight_Protocol_FlightServiceAsyncClient(channel: channel) self.allowReadingUnalignedBuffers = allowReadingUnalignedBuffers } @@ -41,10 +41,11 @@ public class FlightClient { data.dataHeader, dataBody: data.dataBody, result: arrowResult, - useUnalignedBuffers: allowReadingUnalignedBuffers) { + useUnalignedBuffers: allowReadingUnalignedBuffers + ) { case .success: continue - case .failure(let error): + case let .failure(error): throw error } } @@ -59,26 +60,28 @@ public class FlightClient { ) async throws { let writer = ArrowWriter() switch writer.toMessage(recordBatches[0].schema) { - case .success(let schemaData): + case let .success(schemaData): try await requestStream.send( FlightData( schemaData, dataBody: Data(), - flightDescriptor: descriptor).toProtocol()) + flightDescriptor: descriptor + ).toProtocol()) for recordBatch in recordBatches { switch writer.toMessage(recordBatch) { - case .success(let data): + case let .success(data): try await requestStream.send( FlightData( data[0], dataBody: data[1], - flightDescriptor: descriptor).toProtocol()) - case .failure(let error): + flightDescriptor: descriptor + ).toProtocol()) + case let .failure(error): throw error } } requestStream.finish() - case .failure(let error): + case let .failure(error): throw error } } @@ -92,7 +95,8 @@ public class FlightClient { public func listFlights( _ criteria: FlightCriteria, - closure: (FlightInfo) throws -> Void) async throws { + closure: (FlightInfo) throws -> Void + ) async throws { let listFlights = client.makeListFlightsCall(criteria.toProtocol()) for try await data in listFlights.responseStream { try closure(FlightInfo(data)) @@ -108,19 +112,21 @@ public class FlightClient { public func getSchema(_ descriptor: FlightDescriptor) async throws -> FlightSchemaResult { let schemaResultResponse = client.makeGetSchemaCall(descriptor.toProtocol()) - return FlightSchemaResult(try await schemaResultResponse.response) + return try FlightSchemaResult(await schemaResultResponse.response) } public func doGet( _ ticket: FlightTicket, - readerResultClosure: (ArrowReader.ArrowReaderResult) throws -> Void) async throws { + readerResultClosure: (ArrowReader.ArrowReaderResult) throws -> Void + ) async throws { let getResult = client.makeDoGetCall(ticket.toProtocol()) - try readerResultClosure(try await readMessages(getResult.responseStream)) + try readerResultClosure(await readMessages(getResult.responseStream)) } public func doGet( _ ticket: FlightTicket, - flightDataClosure: (FlightData) throws -> Void) async throws { + flightDataClosure: (FlightData) throws -> Void + ) async throws { let getResult = client.makeDoGetCall(ticket.toProtocol()) for try await data in getResult.responseStream { try flightDataClosure(FlightData(data)) @@ -130,7 +136,8 @@ public class FlightClient { public func doPut( _ descriptor: FlightDescriptor, recordBatches: [RecordBatch], - closure: (FlightPutResult) throws -> Void) async throws { + closure: (FlightPutResult) throws -> Void + ) async throws { if recordBatches.isEmpty { throw ArrowFlightError.emptyCollection } @@ -166,14 +173,15 @@ public class FlightClient { public func doExchange( _ descriptor: FlightDescriptor, recordBatches: [RecordBatch], - closure: (ArrowReader.ArrowReaderResult) throws -> Void) async throws { + closure: (ArrowReader.ArrowReaderResult) throws -> Void + ) async throws { if recordBatches.isEmpty { throw ArrowFlightError.emptyCollection } let exchangeCall = client.makeDoExchangeCall() try await writeBatches(exchangeCall.requestStream, descriptor: descriptor, recordBatches: recordBatches) - try closure(try await readMessages(exchangeCall.responseStream)) + try closure(await readMessages(exchangeCall.responseStream)) } public func doExchange(flightData: FlightData, closure: (FlightData) throws -> Void) async throws { diff --git a/Sources/ArrowFlight/FlightData.swift b/Sources/ArrowFlight/FlightData.swift index 84db8c5..d9533a7 100644 --- a/Sources/ArrowFlight/FlightData.swift +++ b/Sources/ArrowFlight/FlightData.swift @@ -33,17 +33,17 @@ public class FlightData { public init(_ dataHeader: Data, dataBody: Data, flightDescriptor: FlightDescriptor? = nil) { if flightDescriptor != nil { - self.flightData = Arrow_Flight_Protocol_FlightData.with { + flightData = Arrow_Flight_Protocol_FlightData.with { $0.dataHeader = dataHeader $0.dataBody = dataBody $0.flightDescriptor = flightDescriptor!.toProtocol() } } else { - self.flightData = Arrow_Flight_Protocol_FlightData.with { + flightData = Arrow_Flight_Protocol_FlightData.with { $0.dataBody = dataBody } } } - func toProtocol() -> Arrow_Flight_Protocol_FlightData { self.flightData } + func toProtocol() -> Arrow_Flight_Protocol_FlightData { flightData } } diff --git a/Sources/ArrowFlight/FlightDescriptor.swift b/Sources/ArrowFlight/FlightDescriptor.swift index 02712aa..0facb7c 100644 --- a/Sources/ArrowFlight/FlightDescriptor.swift +++ b/Sources/ArrowFlight/FlightDescriptor.swift @@ -29,28 +29,28 @@ public class FlightDescriptor { public let paths: [String] init(_ descriptor: Arrow_Flight_Protocol_FlightDescriptor) { - self.type = descriptor.type == .cmd ? .cmd : .path - self.cmd = descriptor.cmd - self.paths = descriptor.path + type = descriptor.type == .cmd ? .cmd : .path + cmd = descriptor.cmd + paths = descriptor.path } public init(cmd: Data) { - self.type = .cmd + type = .cmd self.cmd = cmd - self.paths = [String]() + paths = [String]() } public init(paths: [String]) { - self.type = .path - self.cmd = Data() + type = .path + cmd = Data() self.paths = paths } func toProtocol() -> Arrow_Flight_Protocol_FlightDescriptor { var descriptor = Arrow_Flight_Protocol_FlightDescriptor() - descriptor.type = self.type == .cmd ? .cmd : .path - descriptor.cmd = self.cmd - descriptor.path = self.paths + descriptor.type = type == .cmd ? .cmd : .path + descriptor.cmd = cmd + descriptor.path = paths return descriptor } } diff --git a/Sources/ArrowFlight/FlightEndpoint.swift b/Sources/ArrowFlight/FlightEndpoint.swift index 0493772..85aec08 100644 --- a/Sources/ArrowFlight/FlightEndpoint.swift +++ b/Sources/ArrowFlight/FlightEndpoint.swift @@ -20,8 +20,8 @@ public class FlightEndpoint { let ticket: FlightTicket let locations: [FlightLocation] init(_ endpoint: Arrow_Flight_Protocol_FlightEndpoint) { - self.ticket = FlightTicket(endpoint.ticket.ticket) - self.locations = endpoint.location.map {return FlightLocation($0)} + ticket = FlightTicket(endpoint.ticket.ticket) + locations = endpoint.location.map { FlightLocation($0) } } public init(_ ticket: FlightTicket, locations: [FlightLocation]) { @@ -31,8 +31,8 @@ public class FlightEndpoint { func toProtocol() -> Arrow_Flight_Protocol_FlightEndpoint { var endpoint = Arrow_Flight_Protocol_FlightEndpoint() - endpoint.ticket = self.ticket.toProtocol() - endpoint.location = self.locations.map { $0.toProtocol() } + endpoint.ticket = ticket.toProtocol() + endpoint.location = locations.map { $0.toProtocol() } return endpoint } } diff --git a/Sources/ArrowFlight/FlightInfo.swift b/Sources/ArrowFlight/FlightInfo.swift index eb43aa3..6929c69 100644 --- a/Sources/ArrowFlight/FlightInfo.swift +++ b/Sources/ArrowFlight/FlightInfo.swift @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -import Foundation import Arrow +import Foundation public class FlightInfo { let flightInfo: Arrow_Flight_Protocol_FlightInfo @@ -25,10 +25,11 @@ public class FlightInfo { } public var endpoints: [FlightEndpoint] { - return self.flightInfo.endpoint.map { FlightEndpoint($0) } + return flightInfo.endpoint.map { FlightEndpoint($0) } } + public var schema: ArrowSchema? { - return schemaFromMessage(self.flightInfo.schema) + return schemaFromMessage(flightInfo.schema) } var endpoint: [Arrow_Flight_Protocol_FlightEndpoint] = [] @@ -38,13 +39,13 @@ public class FlightInfo { public init(_ schema: Data, endpoints: [FlightEndpoint] = [FlightEndpoint](), descriptor: FlightDescriptor? = nil) { if let localDescriptor = descriptor { - self.flightInfo = Arrow_Flight_Protocol_FlightInfo.with { + flightInfo = Arrow_Flight_Protocol_FlightInfo.with { $0.schema = schema $0.flightDescriptor = localDescriptor.toProtocol() $0.endpoint = endpoints.map { $0.toProtocol() } } } else { - self.flightInfo = Arrow_Flight_Protocol_FlightInfo.with { + flightInfo = Arrow_Flight_Protocol_FlightInfo.with { $0.schema = schema $0.endpoint = endpoints.map { $0.toProtocol() } } @@ -52,6 +53,6 @@ public class FlightInfo { } func toProtocol() -> Arrow_Flight_Protocol_FlightInfo { - return self.flightInfo + return flightInfo } } diff --git a/Sources/ArrowFlight/FlightLocation.swift b/Sources/ArrowFlight/FlightLocation.swift index 9c89d10..bbdd2da 100644 --- a/Sources/ArrowFlight/FlightLocation.swift +++ b/Sources/ArrowFlight/FlightLocation.swift @@ -21,7 +21,7 @@ public class FlightLocation { public let uri: String init(_ location: Arrow_Flight_Protocol_Location) { - self.uri = location.uri + uri = location.uri } public init(_ uri: String) { diff --git a/Sources/ArrowFlight/FlightPutResult.swift b/Sources/ArrowFlight/FlightPutResult.swift index 3b22f8f..4d6760d 100644 --- a/Sources/ArrowFlight/FlightPutResult.swift +++ b/Sources/ArrowFlight/FlightPutResult.swift @@ -24,12 +24,12 @@ public class FlightPutResult { } init(_ putResult: Arrow_Flight_Protocol_PutResult) { - self.appMetadata = putResult.appMetadata + appMetadata = putResult.appMetadata } func toProtocol() -> Arrow_Flight_Protocol_PutResult { var putResult = Arrow_Flight_Protocol_PutResult() - putResult.appMetadata = self.appMetadata + putResult.appMetadata = appMetadata return putResult } } diff --git a/Sources/ArrowFlight/FlightResult.swift b/Sources/ArrowFlight/FlightResult.swift index d7cf828..21be08f 100644 --- a/Sources/ArrowFlight/FlightResult.swift +++ b/Sources/ArrowFlight/FlightResult.swift @@ -20,7 +20,7 @@ import Foundation public class FlightResult { public let body: Data init(_ result: Arrow_Flight_Protocol_Result) { - self.body = result.body + body = result.body } public init(_ body: Data) { @@ -29,7 +29,7 @@ public class FlightResult { func toProtocol() -> Arrow_Flight_Protocol_Result { var result = Arrow_Flight_Protocol_Result() - result.body = self.body + result.body = body return result } } diff --git a/Sources/ArrowFlight/FlightSchemaResult.swift b/Sources/ArrowFlight/FlightSchemaResult.swift index 7dea98a..49f8c46 100644 --- a/Sources/ArrowFlight/FlightSchemaResult.swift +++ b/Sources/ArrowFlight/FlightSchemaResult.swift @@ -15,18 +15,18 @@ // specific language governing permissions and limitations // under the License. -import Foundation import Arrow +import Foundation public class FlightSchemaResult { let schemaResult: Arrow_Flight_Protocol_SchemaResult public var schema: ArrowSchema? { - return schemaFromMessage(self.schemaResult.schema) + return schemaFromMessage(schemaResult.schema) } public init(_ schema: Data) { - self.schemaResult = Arrow_Flight_Protocol_SchemaResult.with { + schemaResult = Arrow_Flight_Protocol_SchemaResult.with { $0.schema = schema } } diff --git a/Sources/ArrowFlight/FlightServer.swift b/Sources/ArrowFlight/FlightServer.swift index 8db44ab..a78439a 100644 --- a/Sources/ArrowFlight/FlightServer.swift +++ b/Sources/ArrowFlight/FlightServer.swift @@ -15,12 +15,12 @@ // specific language governing permissions and limitations // under the License. +import Arrow import Foundation import GRPC import NIO import NIOConcurrencyHelpers import SwiftProtobuf -import Arrow public enum ArrowFlightError: Error { case unknown(String?) @@ -32,13 +32,13 @@ public enum ArrowFlightError: Error { public func schemaToMessage(_ schema: ArrowSchema) throws -> Data { let arrowWriter = ArrowWriter() switch arrowWriter.toMessage(schema) { - case .success(let result): + case let .success(result): var outputResult = Data() - withUnsafeBytes(of: Int32(0).littleEndian) {outputResult.append(Data($0))} - withUnsafeBytes(of: Int32(result.count).littleEndian) {outputResult.append(Data($0))} + withUnsafeBytes(of: Int32(0).littleEndian) { outputResult.append(Data($0)) } + withUnsafeBytes(of: Int32(result.count).littleEndian) { outputResult.append(Data($0)) } outputResult.append(result) return outputResult - case .failure(let error): + case let .failure(error): throw error } } @@ -84,22 +84,22 @@ public func makeFlightServer(_ handler: ArrowFlightServer) -> CallHandlerProvide return InternalFlightServer(handler) } -internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAsyncProvider { +final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAsyncProvider { let arrowFlightServer: ArrowFlightServer? init(_ arrowFlightServer: ArrowFlightServer?) { self.arrowFlightServer = arrowFlightServer } - func handshake(requestStream: GRPC.GRPCAsyncRequestStream, - responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + func handshake(requestStream _: GRPC.GRPCAsyncRequestStream, + responseStream _: GRPC.GRPCAsyncResponseStreamWriter, + context _: GRPC.GRPCAsyncServerCallContext) async throws { throw ArrowFlightError.notImplemented() } func listFlights(request: Arrow_Flight_Protocol_Criteria, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { let writer = FlightInfoStreamWriter(responseStream) try await server.listFlights(FlightCriteria(request), writer: writer) @@ -110,7 +110,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs } func getFlightInfo(request: Arrow_Flight_Protocol_FlightDescriptor, - context: GRPC.GRPCAsyncServerCallContext) async throws -> Arrow_Flight_Protocol_FlightInfo { + context _: GRPC.GRPCAsyncServerCallContext) async throws -> Arrow_Flight_Protocol_FlightInfo { if let server = arrowFlightServer { return try await server.getFlightInfo(FlightDescriptor(request)).toProtocol() } @@ -119,7 +119,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs } func getSchema(request: Arrow_Flight_Protocol_FlightDescriptor, - context: GRPC.GRPCAsyncServerCallContext) async throws -> Arrow_Flight_Protocol_SchemaResult { + context _: GRPC.GRPCAsyncServerCallContext) async throws -> Arrow_Flight_Protocol_SchemaResult { if let server = arrowFlightServer { return try await server.getSchema(FlightDescriptor(request)).toProtocol() } @@ -129,7 +129,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs func doGet(request: Arrow_Flight_Protocol_Ticket, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { let writer = RecordBatchStreamWriter(responseStream) let ticket = FlightTicket(request) @@ -142,7 +142,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs func doPut(requestStream: GRPC.GRPCAsyncRequestStream, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { let reader = RecordBatchStreamReader(requestStream) let writer = PutResultDataStreamWriter(responseStream) @@ -155,7 +155,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs func doExchange(requestStream: GRPC.GRPCAsyncRequestStream, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { let reader = RecordBatchStreamReader(requestStream) let writer = RecordBatchStreamWriter(responseStream) @@ -168,7 +168,7 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs func doAction(request: Arrow_Flight_Protocol_Action, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { try await server.doAction(FlightAction(request), writer: ResultStreamWriter(responseStream)) return @@ -177,9 +177,9 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs throw ArrowFlightError.notImplemented() } - func listActions(request: Arrow_Flight_Protocol_Empty, + func listActions(request _: Arrow_Flight_Protocol_Empty, responseStream: GRPC.GRPCAsyncResponseStreamWriter, - context: GRPC.GRPCAsyncServerCallContext) async throws { + context _: GRPC.GRPCAsyncServerCallContext) async throws { if let server = arrowFlightServer { let writer = ActionTypeStreamWriter(responseStream) try await server.listActions(writer) @@ -189,6 +189,5 @@ internal final class InternalFlightServer: Arrow_Flight_Protocol_FlightServiceAs throw ArrowFlightError.notImplemented() } - internal var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { return nil } - + var interceptors: Arrow_Flight_Protocol_FlightServiceServerInterceptorFactoryProtocol? { return nil } } diff --git a/Sources/ArrowFlight/FlightSql.pb.swift b/Sources/ArrowFlight/FlightSql.pb.swift index 18b839f..629a0b2 100644 --- a/Sources/ArrowFlight/FlightSql.pb.swift +++ b/Sources/ArrowFlight/FlightSql.pb.swift @@ -32,2235 +32,2213 @@ import SwiftProtobuf // incompatible with the version of SwiftProtobuf to which you are linking. // Please ensure that you are building against the same version of the API // that was used to generate this file. -fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { - struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} - typealias Version = _2 +private struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 } /// Options for CommandGetSqlInfo. enum Arrow_Flight_Protocol_Sql_SqlInfo: SwiftProtobuf.Enum { - typealias RawValue = Int - - /// Retrieves a UTF-8 string with the name of the Flight SQL Server. - case flightSqlServerName // = 0 - - /// Retrieves a UTF-8 string with the native version of the Flight SQL Server. - case flightSqlServerVersion // = 1 - - /// Retrieves a UTF-8 string with the Arrow format version of the Flight SQL Server. - case flightSqlServerArrowVersion // = 2 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server is read only. - /// - /// Returns: - /// - false: if read-write - /// - true: if read only - case flightSqlServerReadOnly // = 3 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports executing - /// SQL queries. - /// - /// Note that the absence of this info (as opposed to a false value) does not necessarily - /// mean that SQL is not supported, as this property was not originally defined. - case flightSqlServerSql // = 4 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports executing - /// Substrait plans. - case flightSqlServerSubstrait // = 5 - - /// - /// Retrieves a string value indicating the minimum supported Substrait version, or null - /// if Substrait is not supported. - case flightSqlServerSubstraitMinVersion // = 6 - - /// - /// Retrieves a string value indicating the maximum supported Substrait version, or null - /// if Substrait is not supported. - case flightSqlServerSubstraitMaxVersion // = 7 - - /// - /// Retrieves an int32 indicating whether the Flight SQL Server supports the - /// BeginTransaction/EndTransaction/BeginSavepoint/EndSavepoint actions. - /// - /// Even if this is not supported, the database may still support explicit "BEGIN - /// TRANSACTION"/"COMMIT" SQL statements (see SQL_TRANSACTIONS_SUPPORTED); this property - /// is only about whether the server implements the Flight SQL API endpoints. - /// - /// The possible values are listed in `SqlSupportedTransaction`. - case flightSqlServerTransaction // = 8 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports explicit - /// query cancellation (the CancelQuery action). - case flightSqlServerCancel // = 9 - - /// - /// Retrieves an int32 indicating the timeout (in milliseconds) for prepared statement handles. - /// - /// If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. - case flightSqlServerStatementTimeout // = 100 - - /// - /// Retrieves an int32 indicating the timeout (in milliseconds) for transactions, since transactions are not tied to a connection. - /// - /// If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. - case flightSqlServerTransactionTimeout // = 101 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of catalogs. - /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of catalogs. - /// - true: if it supports CREATE and DROP of catalogs. - case sqlDdlCatalog // = 500 - - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of schemas. - /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of schemas. - /// - true: if it supports CREATE and DROP of schemas. - case sqlDdlSchema // = 501 - - /// - /// Indicates whether the Flight SQL Server supports CREATE and DROP of tables. - /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of tables. - /// - true: if it supports CREATE and DROP of tables. - case sqlDdlTable // = 502 - - /// - /// Retrieves a int32 ordinal representing the case sensitivity of catalog, table, schema and table names. - /// - /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. - case sqlIdentifierCase // = 503 - - /// Retrieves a UTF-8 string with the supported character(s) used to surround a delimited identifier. - case sqlIdentifierQuoteChar // = 504 - - /// - /// Retrieves a int32 describing the case sensitivity of quoted identifiers. - /// - /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. - case sqlQuotedIdentifierCase // = 505 - - /// - /// Retrieves a boolean value indicating whether all tables are selectable. - /// - /// Returns: - /// - false: if not all tables are selectable or if none are; - /// - true: if all tables are selectable. - case sqlAllTablesAreSelectable // = 506 - - /// - /// Retrieves the null ordering. - /// - /// Returns a int32 ordinal for the null ordering being used, as described in - /// `arrow.flight.protocol.sql.SqlNullOrdering`. - case sqlNullOrdering // = 507 - - /// Retrieves a UTF-8 string list with values of the supported keywords. - case sqlKeywords // = 508 - - /// Retrieves a UTF-8 string list with values of the supported numeric functions. - case sqlNumericFunctions // = 509 - - /// Retrieves a UTF-8 string list with values of the supported string functions. - case sqlStringFunctions // = 510 - - /// Retrieves a UTF-8 string list with values of the supported system functions. - case sqlSystemFunctions // = 511 - - /// Retrieves a UTF-8 string list with values of the supported datetime functions. - case sqlDatetimeFunctions // = 512 - - /// - /// Retrieves the UTF-8 string that can be used to escape wildcard characters. - /// This is the string that can be used to escape '_' or '%' in the catalog search parameters that are a pattern - /// (and therefore use one of the wildcard characters). - /// The '_' character represents any single character; the '%' character represents any sequence of zero or more - /// characters. - case sqlSearchStringEscape // = 513 - - /// - /// Retrieves a UTF-8 string with all the "extra" characters that can be used in unquoted identifier names - /// (those beyond a-z, A-Z, 0-9 and _). - case sqlExtraNameCharacters // = 514 - - /// - /// Retrieves a boolean value indicating whether column aliasing is supported. - /// If so, the SQL AS clause can be used to provide names for computed columns or to provide alias names for columns - /// as required. - /// - /// Returns: - /// - false: if column aliasing is unsupported; - /// - true: if column aliasing is supported. - case sqlSupportsColumnAliasing // = 515 - - /// - /// Retrieves a boolean value indicating whether concatenations between null and non-null values being - /// null are supported. - /// - /// - Returns: - /// - false: if concatenations between null and non-null values being null are unsupported; - /// - true: if concatenations between null and non-null values being null are supported. - case sqlNullPlusNullIsNull // = 516 - - /// - /// Retrieves a map where the key is the type to convert from and the value is a list with the types to convert to, - /// indicating the supported conversions. Each key and each item on the list value is a value to a predefined type on - /// SqlSupportsConvert enum. - /// The returned map will be: map> - case sqlSupportsConvert // = 517 - - /// - /// Retrieves a boolean value indicating whether, when table correlation names are supported, - /// they are restricted to being different from the names of the tables. - /// - /// Returns: - /// - false: if table correlation names are unsupported; - /// - true: if table correlation names are supported. - case sqlSupportsTableCorrelationNames // = 518 - - /// - /// Retrieves a boolean value indicating whether, when table correlation names are supported, - /// they are restricted to being different from the names of the tables. - /// - /// Returns: - /// - false: if different table correlation names are unsupported; - /// - true: if different table correlation names are supported - case sqlSupportsDifferentTableCorrelationNames // = 519 - - /// - /// Retrieves a boolean value indicating whether expressions in ORDER BY lists are supported. - /// - /// Returns: - /// - false: if expressions in ORDER BY are unsupported; - /// - true: if expressions in ORDER BY are supported; - case sqlSupportsExpressionsInOrderBy // = 520 - - /// - /// Retrieves a boolean value indicating whether using a column that is not in the SELECT statement in a GROUP BY - /// clause is supported. - /// - /// Returns: - /// - false: if using a column that is not in the SELECT statement in a GROUP BY clause is unsupported; - /// - true: if using a column that is not in the SELECT statement in a GROUP BY clause is supported. - case sqlSupportsOrderByUnrelated // = 521 - - /// - /// Retrieves the supported GROUP BY commands; - /// - /// Returns an int32 bitmask value representing the supported commands. - /// The returned bitmask should be parsed in order to retrieve the supported commands. - /// - /// For instance: - /// - return 0 (\b0) => [] (GROUP BY is unsupported); - /// - return 1 (\b1) => [SQL_GROUP_BY_UNRELATED]; - /// - return 2 (\b10) => [SQL_GROUP_BY_BEYOND_SELECT]; - /// - return 3 (\b11) => [SQL_GROUP_BY_UNRELATED, SQL_GROUP_BY_BEYOND_SELECT]. - /// Valid GROUP BY types are described under `arrow.flight.protocol.sql.SqlSupportedGroupBy`. - case sqlSupportedGroupBy // = 522 - - /// - /// Retrieves a boolean value indicating whether specifying a LIKE escape clause is supported. - /// - /// Returns: - /// - false: if specifying a LIKE escape clause is unsupported; - /// - true: if specifying a LIKE escape clause is supported. - case sqlSupportsLikeEscapeClause // = 523 - - /// - /// Retrieves a boolean value indicating whether columns may be defined as non-nullable. - /// - /// Returns: - /// - false: if columns cannot be defined as non-nullable; - /// - true: if columns may be defined as non-nullable. - case sqlSupportsNonNullableColumns // = 524 - - /// - /// Retrieves the supported SQL grammar level as per the ODBC specification. - /// - /// Returns an int32 bitmask value representing the supported SQL grammar level. - /// The returned bitmask should be parsed in order to retrieve the supported grammar levels. - /// - /// For instance: - /// - return 0 (\b0) => [] (SQL grammar is unsupported); - /// - return 1 (\b1) => [SQL_MINIMUM_GRAMMAR]; - /// - return 2 (\b10) => [SQL_CORE_GRAMMAR]; - /// - return 3 (\b11) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR]; - /// - return 4 (\b100) => [SQL_EXTENDED_GRAMMAR]; - /// - return 5 (\b101) => [SQL_MINIMUM_GRAMMAR, SQL_EXTENDED_GRAMMAR]; - /// - return 6 (\b110) => [SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]; - /// - return 7 (\b111) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]. - /// Valid SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedSqlGrammar`. - case sqlSupportedGrammar // = 525 - - /// - /// Retrieves the supported ANSI92 SQL grammar level. - /// - /// Returns an int32 bitmask value representing the supported ANSI92 SQL grammar level. - /// The returned bitmask should be parsed in order to retrieve the supported commands. - /// - /// For instance: - /// - return 0 (\b0) => [] (ANSI92 SQL grammar is unsupported); - /// - return 1 (\b1) => [ANSI92_ENTRY_SQL]; - /// - return 2 (\b10) => [ANSI92_INTERMEDIATE_SQL]; - /// - return 3 (\b11) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL]; - /// - return 4 (\b100) => [ANSI92_FULL_SQL]; - /// - return 5 (\b101) => [ANSI92_ENTRY_SQL, ANSI92_FULL_SQL]; - /// - return 6 (\b110) => [ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]; - /// - return 7 (\b111) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]. - /// Valid ANSI92 SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel`. - case sqlAnsi92SupportedLevel // = 526 - - /// - /// Retrieves a boolean value indicating whether the SQL Integrity Enhancement Facility is supported. - /// - /// Returns: - /// - false: if the SQL Integrity Enhancement Facility is supported; - /// - true: if the SQL Integrity Enhancement Facility is supported. - case sqlSupportsIntegrityEnhancementFacility // = 527 - - /// - /// Retrieves the support level for SQL OUTER JOINs. - /// - /// Returns a int32 ordinal for the SQL ordering being used, as described in - /// `arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel`. - case sqlOuterJoinsSupportLevel // = 528 - - /// Retrieves a UTF-8 string with the preferred term for "schema". - case sqlSchemaTerm // = 529 - - /// Retrieves a UTF-8 string with the preferred term for "procedure". - case sqlProcedureTerm // = 530 - - /// - /// Retrieves a UTF-8 string with the preferred term for "catalog". - /// If a empty string is returned its assumed that the server does NOT supports catalogs. - case sqlCatalogTerm // = 531 - - /// - /// Retrieves a boolean value indicating whether a catalog appears at the start of a fully qualified table name. - /// - /// - false: if a catalog does not appear at the start of a fully qualified table name; - /// - true: if a catalog appears at the start of a fully qualified table name. - case sqlCatalogAtStart // = 532 - - /// - /// Retrieves the supported actions for a SQL schema. - /// - /// Returns an int32 bitmask value representing the supported actions for a SQL schema. - /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL schema. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported actions for SQL schema); - /// - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; - /// - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. - /// Valid actions for a SQL schema described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. - case sqlSchemasSupportedActions // = 533 - - /// - /// Retrieves the supported actions for a SQL schema. - /// - /// Returns an int32 bitmask value representing the supported actions for a SQL catalog. - /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL catalog. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported actions for SQL catalog); - /// - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; - /// - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. - /// Valid actions for a SQL catalog are described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. - case sqlCatalogsSupportedActions // = 534 - - /// - /// Retrieves the supported SQL positioned commands. - /// - /// Returns an int32 bitmask value representing the supported SQL positioned commands. - /// The returned bitmask should be parsed in order to retrieve the supported SQL positioned commands. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL positioned commands); - /// - return 1 (\b1) => [SQL_POSITIONED_DELETE]; - /// - return 2 (\b10) => [SQL_POSITIONED_UPDATE]; - /// - return 3 (\b11) => [SQL_POSITIONED_DELETE, SQL_POSITIONED_UPDATE]. - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedPositionedCommands`. - case sqlSupportedPositionedCommands // = 535 - - /// - /// Retrieves a boolean value indicating whether SELECT FOR UPDATE statements are supported. - /// - /// Returns: - /// - false: if SELECT FOR UPDATE statements are unsupported; - /// - true: if SELECT FOR UPDATE statements are supported. - case sqlSelectForUpdateSupported // = 536 - - /// - /// Retrieves a boolean value indicating whether stored procedure calls that use the stored procedure escape syntax - /// are supported. - /// - /// Returns: - /// - false: if stored procedure calls that use the stored procedure escape syntax are unsupported; - /// - true: if stored procedure calls that use the stored procedure escape syntax are supported. - case sqlStoredProceduresSupported // = 537 - - /// - /// Retrieves the supported SQL subqueries. - /// - /// Returns an int32 bitmask value representing the supported SQL subqueries. - /// The returned bitmask should be parsed in order to retrieve the supported SQL subqueries. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL subqueries); - /// - return 1 (\b1) => [SQL_SUBQUERIES_IN_COMPARISONS]; - /// - return 2 (\b10) => [SQL_SUBQUERIES_IN_EXISTS]; - /// - return 3 (\b11) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS]; - /// - return 4 (\b100) => [SQL_SUBQUERIES_IN_INS]; - /// - return 5 (\b101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS]; - /// - return 6 (\b110) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_EXISTS]; - /// - return 7 (\b111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS]; - /// - return 8 (\b1000) => [SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 9 (\b1001) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 10 (\b1010) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 11 (\b1011) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 12 (\b1100) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 13 (\b1101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 14 (\b1110) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 15 (\b1111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - ... - /// Valid SQL subqueries are described under `arrow.flight.protocol.sql.SqlSupportedSubqueries`. - case sqlSupportedSubqueries // = 538 - - /// - /// Retrieves a boolean value indicating whether correlated subqueries are supported. - /// - /// Returns: - /// - false: if correlated subqueries are unsupported; - /// - true: if correlated subqueries are supported. - case sqlCorrelatedSubqueriesSupported // = 539 - - /// - /// Retrieves the supported SQL UNIONs. - /// - /// Returns an int32 bitmask value representing the supported SQL UNIONs. - /// The returned bitmask should be parsed in order to retrieve the supported SQL UNIONs. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL positioned commands); - /// - return 1 (\b1) => [SQL_UNION]; - /// - return 2 (\b10) => [SQL_UNION_ALL]; - /// - return 3 (\b11) => [SQL_UNION, SQL_UNION_ALL]. - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedUnions`. - case sqlSupportedUnions // = 540 - - /// Retrieves a int64 value representing the maximum number of hex characters allowed in an inline binary literal. - case sqlMaxBinaryLiteralLength // = 541 - - /// Retrieves a int64 value representing the maximum number of characters allowed for a character literal. - case sqlMaxCharLiteralLength // = 542 - - /// Retrieves a int64 value representing the maximum number of characters allowed for a column name. - case sqlMaxColumnNameLength // = 543 - - /// Retrieves a int64 value representing the maximum number of columns allowed in a GROUP BY clause. - case sqlMaxColumnsInGroupBy // = 544 - - /// Retrieves a int64 value representing the maximum number of columns allowed in an index. - case sqlMaxColumnsInIndex // = 545 - - /// Retrieves a int64 value representing the maximum number of columns allowed in an ORDER BY clause. - case sqlMaxColumnsInOrderBy // = 546 - - /// Retrieves a int64 value representing the maximum number of columns allowed in a SELECT list. - case sqlMaxColumnsInSelect // = 547 - - /// Retrieves a int64 value representing the maximum number of columns allowed in a table. - case sqlMaxColumnsInTable // = 548 - - /// Retrieves a int64 value representing the maximum number of concurrent connections possible. - case sqlMaxConnections // = 549 - - /// Retrieves a int64 value the maximum number of characters allowed in a cursor name. - case sqlMaxCursorNameLength // = 550 - - /// - /// Retrieves a int64 value representing the maximum number of bytes allowed for an index, - /// including all of the parts of the index. - case sqlMaxIndexLength // = 551 - - /// Retrieves a int64 value representing the maximum number of characters allowed in a schema name. - case sqlDbSchemaNameLength // = 552 - - /// Retrieves a int64 value representing the maximum number of characters allowed in a procedure name. - case sqlMaxProcedureNameLength // = 553 - - /// Retrieves a int64 value representing the maximum number of characters allowed in a catalog name. - case sqlMaxCatalogNameLength // = 554 - - /// Retrieves a int64 value representing the maximum number of bytes allowed in a single row. - case sqlMaxRowSize // = 555 - - /// - /// Retrieves a boolean indicating whether the return value for the JDBC method getMaxRowSize includes the SQL - /// data types LONGVARCHAR and LONGVARBINARY. - /// - /// Returns: - /// - false: if return value for the JDBC method getMaxRowSize does - /// not include the SQL data types LONGVARCHAR and LONGVARBINARY; - /// - true: if return value for the JDBC method getMaxRowSize includes - /// the SQL data types LONGVARCHAR and LONGVARBINARY. - case sqlMaxRowSizeIncludesBlobs // = 556 - - /// - /// Retrieves a int64 value representing the maximum number of characters allowed for an SQL statement; - /// a result of 0 (zero) means that there is no limit or the limit is not known. - case sqlMaxStatementLength // = 557 - - /// Retrieves a int64 value representing the maximum number of active statements that can be open at the same time. - case sqlMaxStatements // = 558 - - /// Retrieves a int64 value representing the maximum number of characters allowed in a table name. - case sqlMaxTableNameLength // = 559 - - /// Retrieves a int64 value representing the maximum number of tables allowed in a SELECT statement. - case sqlMaxTablesInSelect // = 560 - - /// Retrieves a int64 value representing the maximum number of characters allowed in a user name. - case sqlMaxUsernameLength // = 561 - - /// - /// Retrieves this database's default transaction isolation level as described in - /// `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. - /// - /// Returns a int32 ordinal for the SQL transaction isolation level. - case sqlDefaultTransactionIsolation // = 562 - - /// - /// Retrieves a boolean value indicating whether transactions are supported. If not, invoking the method commit is a - /// noop, and the isolation level is `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. - /// - /// Returns: - /// - false: if transactions are unsupported; - /// - true: if transactions are supported. - case sqlTransactionsSupported // = 563 - - /// - /// Retrieves the supported transactions isolation levels. - /// - /// Returns an int32 bitmask value representing the supported transactions isolation levels. - /// The returned bitmask should be parsed in order to retrieve the supported transactions isolation levels. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL transactions isolation levels); - /// - return 1 (\b1) => [SQL_TRANSACTION_NONE]; - /// - return 2 (\b10) => [SQL_TRANSACTION_READ_UNCOMMITTED]; - /// - return 3 (\b11) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED]; - /// - return 4 (\b100) => [SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 5 (\b101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 6 (\b110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 7 (\b111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 8 (\b1000) => [SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 9 (\b1001) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 10 (\b1010) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 11 (\b1011) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 12 (\b1100) => [SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 13 (\b1101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 14 (\b1110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 15 (\b1111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 16 (\b10000) => [SQL_TRANSACTION_SERIALIZABLE]; - /// - ... - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. - case sqlSupportedTransactionsIsolationLevels // = 564 - - /// - /// Retrieves a boolean value indicating whether a data definition statement within a transaction forces - /// the transaction to commit. - /// - /// Returns: - /// - false: if a data definition statement within a transaction does not force the transaction to commit; - /// - true: if a data definition statement within a transaction forces the transaction to commit. - case sqlDataDefinitionCausesTransactionCommit // = 565 - - /// - /// Retrieves a boolean value indicating whether a data definition statement within a transaction is ignored. - /// - /// Returns: - /// - false: if a data definition statement within a transaction is taken into account; - /// - true: a data definition statement within a transaction is ignored. - case sqlDataDefinitionsInTransactionsIgnored // = 566 - - /// - /// Retrieves an int32 bitmask value representing the supported result set types. - /// The returned bitmask should be parsed in order to retrieve the supported result set types. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported result set types); - /// - return 1 (\b1) => [SQL_RESULT_SET_TYPE_UNSPECIFIED]; - /// - return 2 (\b10) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY]; - /// - return 3 (\b11) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY]; - /// - return 4 (\b100) => [SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 5 (\b101) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 6 (\b110) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 7 (\b111) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 8 (\b1000) => [SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE]; - /// - ... - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetType`. - case sqlSupportedResultSetTypes // = 567 - - /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_UNSPECIFIED`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] - /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. - case sqlSupportedConcurrenciesForResultSetUnspecified // = 568 - - /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_FORWARD_ONLY`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] - /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. - case sqlSupportedConcurrenciesForResultSetForwardOnly // = 569 - - /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] - /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. - case sqlSupportedConcurrenciesForResultSetScrollSensitive // = 570 - - /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] - /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. - case sqlSupportedConcurrenciesForResultSetScrollInsensitive // = 571 - - /// - /// Retrieves a boolean value indicating whether this database supports batch updates. - /// - /// - false: if this database does not support batch updates; - /// - true: if this database supports batch updates. - case sqlBatchUpdatesSupported // = 572 - - /// - /// Retrieves a boolean value indicating whether this database supports savepoints. - /// - /// Returns: - /// - false: if this database does not support savepoints; - /// - true: if this database supports savepoints. - case sqlSavepointsSupported // = 573 - - /// - /// Retrieves a boolean value indicating whether named parameters are supported in callable statements. - /// - /// Returns: - /// - false: if named parameters in callable statements are unsupported; - /// - true: if named parameters in callable statements are supported. - case sqlNamedParametersSupported // = 574 - - /// - /// Retrieves a boolean value indicating whether updates made to a LOB are made on a copy or directly to the LOB. - /// - /// Returns: - /// - false: if updates made to a LOB are made directly to the LOB; - /// - true: if updates made to a LOB are made on a copy. - case sqlLocatorsUpdateCopy // = 575 - - /// - /// Retrieves a boolean value indicating whether invoking user-defined or vendor functions - /// using the stored procedure escape syntax is supported. - /// - /// Returns: - /// - false: if invoking user-defined or vendor functions using the stored procedure escape syntax is unsupported; - /// - true: if invoking user-defined or vendor functions using the stored procedure escape syntax is supported. - case sqlStoredFunctionsUsingCallSyntaxSupported // = 576 - case UNRECOGNIZED(Int) - - init() { - self = .flightSqlServerName - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .flightSqlServerName - case 1: self = .flightSqlServerVersion - case 2: self = .flightSqlServerArrowVersion - case 3: self = .flightSqlServerReadOnly - case 4: self = .flightSqlServerSql - case 5: self = .flightSqlServerSubstrait - case 6: self = .flightSqlServerSubstraitMinVersion - case 7: self = .flightSqlServerSubstraitMaxVersion - case 8: self = .flightSqlServerTransaction - case 9: self = .flightSqlServerCancel - case 100: self = .flightSqlServerStatementTimeout - case 101: self = .flightSqlServerTransactionTimeout - case 500: self = .sqlDdlCatalog - case 501: self = .sqlDdlSchema - case 502: self = .sqlDdlTable - case 503: self = .sqlIdentifierCase - case 504: self = .sqlIdentifierQuoteChar - case 505: self = .sqlQuotedIdentifierCase - case 506: self = .sqlAllTablesAreSelectable - case 507: self = .sqlNullOrdering - case 508: self = .sqlKeywords - case 509: self = .sqlNumericFunctions - case 510: self = .sqlStringFunctions - case 511: self = .sqlSystemFunctions - case 512: self = .sqlDatetimeFunctions - case 513: self = .sqlSearchStringEscape - case 514: self = .sqlExtraNameCharacters - case 515: self = .sqlSupportsColumnAliasing - case 516: self = .sqlNullPlusNullIsNull - case 517: self = .sqlSupportsConvert - case 518: self = .sqlSupportsTableCorrelationNames - case 519: self = .sqlSupportsDifferentTableCorrelationNames - case 520: self = .sqlSupportsExpressionsInOrderBy - case 521: self = .sqlSupportsOrderByUnrelated - case 522: self = .sqlSupportedGroupBy - case 523: self = .sqlSupportsLikeEscapeClause - case 524: self = .sqlSupportsNonNullableColumns - case 525: self = .sqlSupportedGrammar - case 526: self = .sqlAnsi92SupportedLevel - case 527: self = .sqlSupportsIntegrityEnhancementFacility - case 528: self = .sqlOuterJoinsSupportLevel - case 529: self = .sqlSchemaTerm - case 530: self = .sqlProcedureTerm - case 531: self = .sqlCatalogTerm - case 532: self = .sqlCatalogAtStart - case 533: self = .sqlSchemasSupportedActions - case 534: self = .sqlCatalogsSupportedActions - case 535: self = .sqlSupportedPositionedCommands - case 536: self = .sqlSelectForUpdateSupported - case 537: self = .sqlStoredProceduresSupported - case 538: self = .sqlSupportedSubqueries - case 539: self = .sqlCorrelatedSubqueriesSupported - case 540: self = .sqlSupportedUnions - case 541: self = .sqlMaxBinaryLiteralLength - case 542: self = .sqlMaxCharLiteralLength - case 543: self = .sqlMaxColumnNameLength - case 544: self = .sqlMaxColumnsInGroupBy - case 545: self = .sqlMaxColumnsInIndex - case 546: self = .sqlMaxColumnsInOrderBy - case 547: self = .sqlMaxColumnsInSelect - case 548: self = .sqlMaxColumnsInTable - case 549: self = .sqlMaxConnections - case 550: self = .sqlMaxCursorNameLength - case 551: self = .sqlMaxIndexLength - case 552: self = .sqlDbSchemaNameLength - case 553: self = .sqlMaxProcedureNameLength - case 554: self = .sqlMaxCatalogNameLength - case 555: self = .sqlMaxRowSize - case 556: self = .sqlMaxRowSizeIncludesBlobs - case 557: self = .sqlMaxStatementLength - case 558: self = .sqlMaxStatements - case 559: self = .sqlMaxTableNameLength - case 560: self = .sqlMaxTablesInSelect - case 561: self = .sqlMaxUsernameLength - case 562: self = .sqlDefaultTransactionIsolation - case 563: self = .sqlTransactionsSupported - case 564: self = .sqlSupportedTransactionsIsolationLevels - case 565: self = .sqlDataDefinitionCausesTransactionCommit - case 566: self = .sqlDataDefinitionsInTransactionsIgnored - case 567: self = .sqlSupportedResultSetTypes - case 568: self = .sqlSupportedConcurrenciesForResultSetUnspecified - case 569: self = .sqlSupportedConcurrenciesForResultSetForwardOnly - case 570: self = .sqlSupportedConcurrenciesForResultSetScrollSensitive - case 571: self = .sqlSupportedConcurrenciesForResultSetScrollInsensitive - case 572: self = .sqlBatchUpdatesSupported - case 573: self = .sqlSavepointsSupported - case 574: self = .sqlNamedParametersSupported - case 575: self = .sqlLocatorsUpdateCopy - case 576: self = .sqlStoredFunctionsUsingCallSyntaxSupported - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .flightSqlServerName: return 0 - case .flightSqlServerVersion: return 1 - case .flightSqlServerArrowVersion: return 2 - case .flightSqlServerReadOnly: return 3 - case .flightSqlServerSql: return 4 - case .flightSqlServerSubstrait: return 5 - case .flightSqlServerSubstraitMinVersion: return 6 - case .flightSqlServerSubstraitMaxVersion: return 7 - case .flightSqlServerTransaction: return 8 - case .flightSqlServerCancel: return 9 - case .flightSqlServerStatementTimeout: return 100 - case .flightSqlServerTransactionTimeout: return 101 - case .sqlDdlCatalog: return 500 - case .sqlDdlSchema: return 501 - case .sqlDdlTable: return 502 - case .sqlIdentifierCase: return 503 - case .sqlIdentifierQuoteChar: return 504 - case .sqlQuotedIdentifierCase: return 505 - case .sqlAllTablesAreSelectable: return 506 - case .sqlNullOrdering: return 507 - case .sqlKeywords: return 508 - case .sqlNumericFunctions: return 509 - case .sqlStringFunctions: return 510 - case .sqlSystemFunctions: return 511 - case .sqlDatetimeFunctions: return 512 - case .sqlSearchStringEscape: return 513 - case .sqlExtraNameCharacters: return 514 - case .sqlSupportsColumnAliasing: return 515 - case .sqlNullPlusNullIsNull: return 516 - case .sqlSupportsConvert: return 517 - case .sqlSupportsTableCorrelationNames: return 518 - case .sqlSupportsDifferentTableCorrelationNames: return 519 - case .sqlSupportsExpressionsInOrderBy: return 520 - case .sqlSupportsOrderByUnrelated: return 521 - case .sqlSupportedGroupBy: return 522 - case .sqlSupportsLikeEscapeClause: return 523 - case .sqlSupportsNonNullableColumns: return 524 - case .sqlSupportedGrammar: return 525 - case .sqlAnsi92SupportedLevel: return 526 - case .sqlSupportsIntegrityEnhancementFacility: return 527 - case .sqlOuterJoinsSupportLevel: return 528 - case .sqlSchemaTerm: return 529 - case .sqlProcedureTerm: return 530 - case .sqlCatalogTerm: return 531 - case .sqlCatalogAtStart: return 532 - case .sqlSchemasSupportedActions: return 533 - case .sqlCatalogsSupportedActions: return 534 - case .sqlSupportedPositionedCommands: return 535 - case .sqlSelectForUpdateSupported: return 536 - case .sqlStoredProceduresSupported: return 537 - case .sqlSupportedSubqueries: return 538 - case .sqlCorrelatedSubqueriesSupported: return 539 - case .sqlSupportedUnions: return 540 - case .sqlMaxBinaryLiteralLength: return 541 - case .sqlMaxCharLiteralLength: return 542 - case .sqlMaxColumnNameLength: return 543 - case .sqlMaxColumnsInGroupBy: return 544 - case .sqlMaxColumnsInIndex: return 545 - case .sqlMaxColumnsInOrderBy: return 546 - case .sqlMaxColumnsInSelect: return 547 - case .sqlMaxColumnsInTable: return 548 - case .sqlMaxConnections: return 549 - case .sqlMaxCursorNameLength: return 550 - case .sqlMaxIndexLength: return 551 - case .sqlDbSchemaNameLength: return 552 - case .sqlMaxProcedureNameLength: return 553 - case .sqlMaxCatalogNameLength: return 554 - case .sqlMaxRowSize: return 555 - case .sqlMaxRowSizeIncludesBlobs: return 556 - case .sqlMaxStatementLength: return 557 - case .sqlMaxStatements: return 558 - case .sqlMaxTableNameLength: return 559 - case .sqlMaxTablesInSelect: return 560 - case .sqlMaxUsernameLength: return 561 - case .sqlDefaultTransactionIsolation: return 562 - case .sqlTransactionsSupported: return 563 - case .sqlSupportedTransactionsIsolationLevels: return 564 - case .sqlDataDefinitionCausesTransactionCommit: return 565 - case .sqlDataDefinitionsInTransactionsIgnored: return 566 - case .sqlSupportedResultSetTypes: return 567 - case .sqlSupportedConcurrenciesForResultSetUnspecified: return 568 - case .sqlSupportedConcurrenciesForResultSetForwardOnly: return 569 - case .sqlSupportedConcurrenciesForResultSetScrollSensitive: return 570 - case .sqlSupportedConcurrenciesForResultSetScrollInsensitive: return 571 - case .sqlBatchUpdatesSupported: return 572 - case .sqlSavepointsSupported: return 573 - case .sqlNamedParametersSupported: return 574 - case .sqlLocatorsUpdateCopy: return 575 - case .sqlStoredFunctionsUsingCallSyntaxSupported: return 576 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + + /// Retrieves a UTF-8 string with the name of the Flight SQL Server. + case flightSqlServerName // = 0 + + /// Retrieves a UTF-8 string with the native version of the Flight SQL Server. + case flightSqlServerVersion // = 1 + + /// Retrieves a UTF-8 string with the Arrow format version of the Flight SQL Server. + case flightSqlServerArrowVersion // = 2 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server is read only. + /// + /// Returns: + /// - false: if read-write + /// - true: if read only + case flightSqlServerReadOnly // = 3 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server supports executing + /// SQL queries. + /// + /// Note that the absence of this info (as opposed to a false value) does not necessarily + /// mean that SQL is not supported, as this property was not originally defined. + case flightSqlServerSql // = 4 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server supports executing + /// Substrait plans. + case flightSqlServerSubstrait // = 5 + + /// + /// Retrieves a string value indicating the minimum supported Substrait version, or null + /// if Substrait is not supported. + case flightSqlServerSubstraitMinVersion // = 6 + + /// + /// Retrieves a string value indicating the maximum supported Substrait version, or null + /// if Substrait is not supported. + case flightSqlServerSubstraitMaxVersion // = 7 + + /// + /// Retrieves an int32 indicating whether the Flight SQL Server supports the + /// BeginTransaction/EndTransaction/BeginSavepoint/EndSavepoint actions. + /// + /// Even if this is not supported, the database may still support explicit "BEGIN + /// TRANSACTION"/"COMMIT" SQL statements (see SQL_TRANSACTIONS_SUPPORTED); this property + /// is only about whether the server implements the Flight SQL API endpoints. + /// + /// The possible values are listed in `SqlSupportedTransaction`. + case flightSqlServerTransaction // = 8 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server supports explicit + /// query cancellation (the CancelQuery action). + case flightSqlServerCancel // = 9 + + /// + /// Retrieves an int32 indicating the timeout (in milliseconds) for prepared statement handles. + /// + /// If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. + case flightSqlServerStatementTimeout // = 100 + + /// + /// Retrieves an int32 indicating the timeout (in milliseconds) for transactions, since transactions are not tied to a connection. + /// + /// If 0, there is no timeout. Servers should reset the timeout when the handle is used in a command. + case flightSqlServerTransactionTimeout // = 101 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of catalogs. + /// + /// Returns: + /// - false: if it doesn't support CREATE and DROP of catalogs. + /// - true: if it supports CREATE and DROP of catalogs. + case sqlDdlCatalog // = 500 + + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of schemas. + /// + /// Returns: + /// - false: if it doesn't support CREATE and DROP of schemas. + /// - true: if it supports CREATE and DROP of schemas. + case sqlDdlSchema // = 501 + + /// + /// Indicates whether the Flight SQL Server supports CREATE and DROP of tables. + /// + /// Returns: + /// - false: if it doesn't support CREATE and DROP of tables. + /// - true: if it supports CREATE and DROP of tables. + case sqlDdlTable // = 502 + + /// + /// Retrieves a int32 ordinal representing the case sensitivity of catalog, table, schema and table names. + /// + /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + case sqlIdentifierCase // = 503 + + /// Retrieves a UTF-8 string with the supported character(s) used to surround a delimited identifier. + case sqlIdentifierQuoteChar // = 504 + + /// + /// Retrieves a int32 describing the case sensitivity of quoted identifiers. + /// + /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + case sqlQuotedIdentifierCase // = 505 + + /// + /// Retrieves a boolean value indicating whether all tables are selectable. + /// + /// Returns: + /// - false: if not all tables are selectable or if none are; + /// - true: if all tables are selectable. + case sqlAllTablesAreSelectable // = 506 + + /// + /// Retrieves the null ordering. + /// + /// Returns a int32 ordinal for the null ordering being used, as described in + /// `arrow.flight.protocol.sql.SqlNullOrdering`. + case sqlNullOrdering // = 507 + + /// Retrieves a UTF-8 string list with values of the supported keywords. + case sqlKeywords // = 508 + + /// Retrieves a UTF-8 string list with values of the supported numeric functions. + case sqlNumericFunctions // = 509 + + /// Retrieves a UTF-8 string list with values of the supported string functions. + case sqlStringFunctions // = 510 + + /// Retrieves a UTF-8 string list with values of the supported system functions. + case sqlSystemFunctions // = 511 + + /// Retrieves a UTF-8 string list with values of the supported datetime functions. + case sqlDatetimeFunctions // = 512 + + /// + /// Retrieves the UTF-8 string that can be used to escape wildcard characters. + /// This is the string that can be used to escape '_' or '%' in the catalog search parameters that are a pattern + /// (and therefore use one of the wildcard characters). + /// The '_' character represents any single character; the '%' character represents any sequence of zero or more + /// characters. + case sqlSearchStringEscape // = 513 + + /// + /// Retrieves a UTF-8 string with all the "extra" characters that can be used in unquoted identifier names + /// (those beyond a-z, A-Z, 0-9 and _). + case sqlExtraNameCharacters // = 514 + + /// + /// Retrieves a boolean value indicating whether column aliasing is supported. + /// If so, the SQL AS clause can be used to provide names for computed columns or to provide alias names for columns + /// as required. + /// + /// Returns: + /// - false: if column aliasing is unsupported; + /// - true: if column aliasing is supported. + case sqlSupportsColumnAliasing // = 515 + + /// + /// Retrieves a boolean value indicating whether concatenations between null and non-null values being + /// null are supported. + /// + /// - Returns: + /// - false: if concatenations between null and non-null values being null are unsupported; + /// - true: if concatenations between null and non-null values being null are supported. + case sqlNullPlusNullIsNull // = 516 + + /// + /// Retrieves a map where the key is the type to convert from and the value is a list with the types to convert to, + /// indicating the supported conversions. Each key and each item on the list value is a value to a predefined type on + /// SqlSupportsConvert enum. + /// The returned map will be: map> + case sqlSupportsConvert // = 517 + + /// + /// Retrieves a boolean value indicating whether, when table correlation names are supported, + /// they are restricted to being different from the names of the tables. + /// + /// Returns: + /// - false: if table correlation names are unsupported; + /// - true: if table correlation names are supported. + case sqlSupportsTableCorrelationNames // = 518 + + /// + /// Retrieves a boolean value indicating whether, when table correlation names are supported, + /// they are restricted to being different from the names of the tables. + /// + /// Returns: + /// - false: if different table correlation names are unsupported; + /// - true: if different table correlation names are supported + case sqlSupportsDifferentTableCorrelationNames // = 519 + + /// + /// Retrieves a boolean value indicating whether expressions in ORDER BY lists are supported. + /// + /// Returns: + /// - false: if expressions in ORDER BY are unsupported; + /// - true: if expressions in ORDER BY are supported; + case sqlSupportsExpressionsInOrderBy // = 520 + + /// + /// Retrieves a boolean value indicating whether using a column that is not in the SELECT statement in a GROUP BY + /// clause is supported. + /// + /// Returns: + /// - false: if using a column that is not in the SELECT statement in a GROUP BY clause is unsupported; + /// - true: if using a column that is not in the SELECT statement in a GROUP BY clause is supported. + case sqlSupportsOrderByUnrelated // = 521 + + /// + /// Retrieves the supported GROUP BY commands; + /// + /// Returns an int32 bitmask value representing the supported commands. + /// The returned bitmask should be parsed in order to retrieve the supported commands. + /// + /// For instance: + /// - return 0 (\b0) => [] (GROUP BY is unsupported); + /// - return 1 (\b1) => [SQL_GROUP_BY_UNRELATED]; + /// - return 2 (\b10) => [SQL_GROUP_BY_BEYOND_SELECT]; + /// - return 3 (\b11) => [SQL_GROUP_BY_UNRELATED, SQL_GROUP_BY_BEYOND_SELECT]. + /// Valid GROUP BY types are described under `arrow.flight.protocol.sql.SqlSupportedGroupBy`. + case sqlSupportedGroupBy // = 522 + + /// + /// Retrieves a boolean value indicating whether specifying a LIKE escape clause is supported. + /// + /// Returns: + /// - false: if specifying a LIKE escape clause is unsupported; + /// - true: if specifying a LIKE escape clause is supported. + case sqlSupportsLikeEscapeClause // = 523 + + /// + /// Retrieves a boolean value indicating whether columns may be defined as non-nullable. + /// + /// Returns: + /// - false: if columns cannot be defined as non-nullable; + /// - true: if columns may be defined as non-nullable. + case sqlSupportsNonNullableColumns // = 524 + + /// + /// Retrieves the supported SQL grammar level as per the ODBC specification. + /// + /// Returns an int32 bitmask value representing the supported SQL grammar level. + /// The returned bitmask should be parsed in order to retrieve the supported grammar levels. + /// + /// For instance: + /// - return 0 (\b0) => [] (SQL grammar is unsupported); + /// - return 1 (\b1) => [SQL_MINIMUM_GRAMMAR]; + /// - return 2 (\b10) => [SQL_CORE_GRAMMAR]; + /// - return 3 (\b11) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR]; + /// - return 4 (\b100) => [SQL_EXTENDED_GRAMMAR]; + /// - return 5 (\b101) => [SQL_MINIMUM_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + /// - return 6 (\b110) => [SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + /// - return 7 (\b111) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]. + /// Valid SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedSqlGrammar`. + case sqlSupportedGrammar // = 525 + + /// + /// Retrieves the supported ANSI92 SQL grammar level. + /// + /// Returns an int32 bitmask value representing the supported ANSI92 SQL grammar level. + /// The returned bitmask should be parsed in order to retrieve the supported commands. + /// + /// For instance: + /// - return 0 (\b0) => [] (ANSI92 SQL grammar is unsupported); + /// - return 1 (\b1) => [ANSI92_ENTRY_SQL]; + /// - return 2 (\b10) => [ANSI92_INTERMEDIATE_SQL]; + /// - return 3 (\b11) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL]; + /// - return 4 (\b100) => [ANSI92_FULL_SQL]; + /// - return 5 (\b101) => [ANSI92_ENTRY_SQL, ANSI92_FULL_SQL]; + /// - return 6 (\b110) => [ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]; + /// - return 7 (\b111) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]. + /// Valid ANSI92 SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel`. + case sqlAnsi92SupportedLevel // = 526 + + /// + /// Retrieves a boolean value indicating whether the SQL Integrity Enhancement Facility is supported. + /// + /// Returns: + /// - false: if the SQL Integrity Enhancement Facility is supported; + /// - true: if the SQL Integrity Enhancement Facility is supported. + case sqlSupportsIntegrityEnhancementFacility // = 527 + + /// + /// Retrieves the support level for SQL OUTER JOINs. + /// + /// Returns a int32 ordinal for the SQL ordering being used, as described in + /// `arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel`. + case sqlOuterJoinsSupportLevel // = 528 + + /// Retrieves a UTF-8 string with the preferred term for "schema". + case sqlSchemaTerm // = 529 + + /// Retrieves a UTF-8 string with the preferred term for "procedure". + case sqlProcedureTerm // = 530 + + /// + /// Retrieves a UTF-8 string with the preferred term for "catalog". + /// If a empty string is returned its assumed that the server does NOT supports catalogs. + case sqlCatalogTerm // = 531 + + /// + /// Retrieves a boolean value indicating whether a catalog appears at the start of a fully qualified table name. + /// + /// - false: if a catalog does not appear at the start of a fully qualified table name; + /// - true: if a catalog appears at the start of a fully qualified table name. + case sqlCatalogAtStart // = 532 + + /// + /// Retrieves the supported actions for a SQL schema. + /// + /// Returns an int32 bitmask value representing the supported actions for a SQL schema. + /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL schema. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported actions for SQL schema); + /// - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; + /// - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + /// Valid actions for a SQL schema described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + case sqlSchemasSupportedActions // = 533 + + /// + /// Retrieves the supported actions for a SQL schema. + /// + /// Returns an int32 bitmask value representing the supported actions for a SQL catalog. + /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL catalog. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported actions for SQL catalog); + /// - return 1 (\b1) => [SQL_ELEMENT_IN_PROCEDURE_CALLS]; + /// - return 2 (\b10) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 4 (\b100) => [SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + /// Valid actions for a SQL catalog are described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + case sqlCatalogsSupportedActions // = 534 + + /// + /// Retrieves the supported SQL positioned commands. + /// + /// Returns an int32 bitmask value representing the supported SQL positioned commands. + /// The returned bitmask should be parsed in order to retrieve the supported SQL positioned commands. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL positioned commands); + /// - return 1 (\b1) => [SQL_POSITIONED_DELETE]; + /// - return 2 (\b10) => [SQL_POSITIONED_UPDATE]; + /// - return 3 (\b11) => [SQL_POSITIONED_DELETE, SQL_POSITIONED_UPDATE]. + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedPositionedCommands`. + case sqlSupportedPositionedCommands // = 535 + + /// + /// Retrieves a boolean value indicating whether SELECT FOR UPDATE statements are supported. + /// + /// Returns: + /// - false: if SELECT FOR UPDATE statements are unsupported; + /// - true: if SELECT FOR UPDATE statements are supported. + case sqlSelectForUpdateSupported // = 536 + + /// + /// Retrieves a boolean value indicating whether stored procedure calls that use the stored procedure escape syntax + /// are supported. + /// + /// Returns: + /// - false: if stored procedure calls that use the stored procedure escape syntax are unsupported; + /// - true: if stored procedure calls that use the stored procedure escape syntax are supported. + case sqlStoredProceduresSupported // = 537 + + /// + /// Retrieves the supported SQL subqueries. + /// + /// Returns an int32 bitmask value representing the supported SQL subqueries. + /// The returned bitmask should be parsed in order to retrieve the supported SQL subqueries. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL subqueries); + /// - return 1 (\b1) => [SQL_SUBQUERIES_IN_COMPARISONS]; + /// - return 2 (\b10) => [SQL_SUBQUERIES_IN_EXISTS]; + /// - return 3 (\b11) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS]; + /// - return 4 (\b100) => [SQL_SUBQUERIES_IN_INS]; + /// - return 5 (\b101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS]; + /// - return 6 (\b110) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_EXISTS]; + /// - return 7 (\b111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS]; + /// - return 8 (\b1000) => [SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 9 (\b1001) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 10 (\b1010) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 11 (\b1011) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 12 (\b1100) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 13 (\b1101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 14 (\b1110) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 15 (\b1111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - ... + /// Valid SQL subqueries are described under `arrow.flight.protocol.sql.SqlSupportedSubqueries`. + case sqlSupportedSubqueries // = 538 + + /// + /// Retrieves a boolean value indicating whether correlated subqueries are supported. + /// + /// Returns: + /// - false: if correlated subqueries are unsupported; + /// - true: if correlated subqueries are supported. + case sqlCorrelatedSubqueriesSupported // = 539 + + /// + /// Retrieves the supported SQL UNIONs. + /// + /// Returns an int32 bitmask value representing the supported SQL UNIONs. + /// The returned bitmask should be parsed in order to retrieve the supported SQL UNIONs. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL positioned commands); + /// - return 1 (\b1) => [SQL_UNION]; + /// - return 2 (\b10) => [SQL_UNION_ALL]; + /// - return 3 (\b11) => [SQL_UNION, SQL_UNION_ALL]. + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedUnions`. + case sqlSupportedUnions // = 540 + + /// Retrieves a int64 value representing the maximum number of hex characters allowed in an inline binary literal. + case sqlMaxBinaryLiteralLength // = 541 + + /// Retrieves a int64 value representing the maximum number of characters allowed for a character literal. + case sqlMaxCharLiteralLength // = 542 + + /// Retrieves a int64 value representing the maximum number of characters allowed for a column name. + case sqlMaxColumnNameLength // = 543 + + /// Retrieves a int64 value representing the maximum number of columns allowed in a GROUP BY clause. + case sqlMaxColumnsInGroupBy // = 544 + + /// Retrieves a int64 value representing the maximum number of columns allowed in an index. + case sqlMaxColumnsInIndex // = 545 + + /// Retrieves a int64 value representing the maximum number of columns allowed in an ORDER BY clause. + case sqlMaxColumnsInOrderBy // = 546 + + /// Retrieves a int64 value representing the maximum number of columns allowed in a SELECT list. + case sqlMaxColumnsInSelect // = 547 + + /// Retrieves a int64 value representing the maximum number of columns allowed in a table. + case sqlMaxColumnsInTable // = 548 + + /// Retrieves a int64 value representing the maximum number of concurrent connections possible. + case sqlMaxConnections // = 549 + + /// Retrieves a int64 value the maximum number of characters allowed in a cursor name. + case sqlMaxCursorNameLength // = 550 + + /// + /// Retrieves a int64 value representing the maximum number of bytes allowed for an index, + /// including all of the parts of the index. + case sqlMaxIndexLength // = 551 + + /// Retrieves a int64 value representing the maximum number of characters allowed in a schema name. + case sqlDbSchemaNameLength // = 552 + + /// Retrieves a int64 value representing the maximum number of characters allowed in a procedure name. + case sqlMaxProcedureNameLength // = 553 + + /// Retrieves a int64 value representing the maximum number of characters allowed in a catalog name. + case sqlMaxCatalogNameLength // = 554 + + /// Retrieves a int64 value representing the maximum number of bytes allowed in a single row. + case sqlMaxRowSize // = 555 + + /// + /// Retrieves a boolean indicating whether the return value for the JDBC method getMaxRowSize includes the SQL + /// data types LONGVARCHAR and LONGVARBINARY. + /// + /// Returns: + /// - false: if return value for the JDBC method getMaxRowSize does + /// not include the SQL data types LONGVARCHAR and LONGVARBINARY; + /// - true: if return value for the JDBC method getMaxRowSize includes + /// the SQL data types LONGVARCHAR and LONGVARBINARY. + case sqlMaxRowSizeIncludesBlobs // = 556 + + /// + /// Retrieves a int64 value representing the maximum number of characters allowed for an SQL statement; + /// a result of 0 (zero) means that there is no limit or the limit is not known. + case sqlMaxStatementLength // = 557 + + /// Retrieves a int64 value representing the maximum number of active statements that can be open at the same time. + case sqlMaxStatements // = 558 + + /// Retrieves a int64 value representing the maximum number of characters allowed in a table name. + case sqlMaxTableNameLength // = 559 + + /// Retrieves a int64 value representing the maximum number of tables allowed in a SELECT statement. + case sqlMaxTablesInSelect // = 560 + + /// Retrieves a int64 value representing the maximum number of characters allowed in a user name. + case sqlMaxUsernameLength // = 561 + + /// + /// Retrieves this database's default transaction isolation level as described in + /// `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + /// + /// Returns a int32 ordinal for the SQL transaction isolation level. + case sqlDefaultTransactionIsolation // = 562 + + /// + /// Retrieves a boolean value indicating whether transactions are supported. If not, invoking the method commit is a + /// noop, and the isolation level is `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. + /// + /// Returns: + /// - false: if transactions are unsupported; + /// - true: if transactions are supported. + case sqlTransactionsSupported // = 563 + + /// + /// Retrieves the supported transactions isolation levels. + /// + /// Returns an int32 bitmask value representing the supported transactions isolation levels. + /// The returned bitmask should be parsed in order to retrieve the supported transactions isolation levels. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL transactions isolation levels); + /// - return 1 (\b1) => [SQL_TRANSACTION_NONE]; + /// - return 2 (\b10) => [SQL_TRANSACTION_READ_UNCOMMITTED]; + /// - return 3 (\b11) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED]; + /// - return 4 (\b100) => [SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 5 (\b101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 6 (\b110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 7 (\b111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 8 (\b1000) => [SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 9 (\b1001) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 10 (\b1010) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 11 (\b1011) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 12 (\b1100) => [SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 13 (\b1101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 14 (\b1110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 15 (\b1111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 16 (\b10000) => [SQL_TRANSACTION_SERIALIZABLE]; + /// - ... + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + case sqlSupportedTransactionsIsolationLevels // = 564 + + /// + /// Retrieves a boolean value indicating whether a data definition statement within a transaction forces + /// the transaction to commit. + /// + /// Returns: + /// - false: if a data definition statement within a transaction does not force the transaction to commit; + /// - true: if a data definition statement within a transaction forces the transaction to commit. + case sqlDataDefinitionCausesTransactionCommit // = 565 + + /// + /// Retrieves a boolean value indicating whether a data definition statement within a transaction is ignored. + /// + /// Returns: + /// - false: if a data definition statement within a transaction is taken into account; + /// - true: a data definition statement within a transaction is ignored. + case sqlDataDefinitionsInTransactionsIgnored // = 566 + + /// + /// Retrieves an int32 bitmask value representing the supported result set types. + /// The returned bitmask should be parsed in order to retrieve the supported result set types. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported result set types); + /// - return 1 (\b1) => [SQL_RESULT_SET_TYPE_UNSPECIFIED]; + /// - return 2 (\b10) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY]; + /// - return 3 (\b11) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY]; + /// - return 4 (\b100) => [SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 5 (\b101) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 6 (\b110) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 7 (\b111) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 8 (\b1000) => [SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE]; + /// - ... + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetType`. + case sqlSupportedResultSetTypes // = 567 + + /// + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_UNSPECIFIED`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + case sqlSupportedConcurrenciesForResultSetUnspecified // = 568 + + /// + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_FORWARD_ONLY`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + case sqlSupportedConcurrenciesForResultSetForwardOnly // = 569 + + /// + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + case sqlSupportedConcurrenciesForResultSetScrollSensitive // = 570 + + /// + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED] + /// - return 2 (\b10) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => [SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + case sqlSupportedConcurrenciesForResultSetScrollInsensitive // = 571 + + /// + /// Retrieves a boolean value indicating whether this database supports batch updates. + /// + /// - false: if this database does not support batch updates; + /// - true: if this database supports batch updates. + case sqlBatchUpdatesSupported // = 572 + + /// + /// Retrieves a boolean value indicating whether this database supports savepoints. + /// + /// Returns: + /// - false: if this database does not support savepoints; + /// - true: if this database supports savepoints. + case sqlSavepointsSupported // = 573 + + /// + /// Retrieves a boolean value indicating whether named parameters are supported in callable statements. + /// + /// Returns: + /// - false: if named parameters in callable statements are unsupported; + /// - true: if named parameters in callable statements are supported. + case sqlNamedParametersSupported // = 574 + + /// + /// Retrieves a boolean value indicating whether updates made to a LOB are made on a copy or directly to the LOB. + /// + /// Returns: + /// - false: if updates made to a LOB are made directly to the LOB; + /// - true: if updates made to a LOB are made on a copy. + case sqlLocatorsUpdateCopy // = 575 + + /// + /// Retrieves a boolean value indicating whether invoking user-defined or vendor functions + /// using the stored procedure escape syntax is supported. + /// + /// Returns: + /// - false: if invoking user-defined or vendor functions using the stored procedure escape syntax is unsupported; + /// - true: if invoking user-defined or vendor functions using the stored procedure escape syntax is supported. + case sqlStoredFunctionsUsingCallSyntaxSupported // = 576 + case UNRECOGNIZED(Int) + + init() { + self = .flightSqlServerName + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .flightSqlServerName + case 1: self = .flightSqlServerVersion + case 2: self = .flightSqlServerArrowVersion + case 3: self = .flightSqlServerReadOnly + case 4: self = .flightSqlServerSql + case 5: self = .flightSqlServerSubstrait + case 6: self = .flightSqlServerSubstraitMinVersion + case 7: self = .flightSqlServerSubstraitMaxVersion + case 8: self = .flightSqlServerTransaction + case 9: self = .flightSqlServerCancel + case 100: self = .flightSqlServerStatementTimeout + case 101: self = .flightSqlServerTransactionTimeout + case 500: self = .sqlDdlCatalog + case 501: self = .sqlDdlSchema + case 502: self = .sqlDdlTable + case 503: self = .sqlIdentifierCase + case 504: self = .sqlIdentifierQuoteChar + case 505: self = .sqlQuotedIdentifierCase + case 506: self = .sqlAllTablesAreSelectable + case 507: self = .sqlNullOrdering + case 508: self = .sqlKeywords + case 509: self = .sqlNumericFunctions + case 510: self = .sqlStringFunctions + case 511: self = .sqlSystemFunctions + case 512: self = .sqlDatetimeFunctions + case 513: self = .sqlSearchStringEscape + case 514: self = .sqlExtraNameCharacters + case 515: self = .sqlSupportsColumnAliasing + case 516: self = .sqlNullPlusNullIsNull + case 517: self = .sqlSupportsConvert + case 518: self = .sqlSupportsTableCorrelationNames + case 519: self = .sqlSupportsDifferentTableCorrelationNames + case 520: self = .sqlSupportsExpressionsInOrderBy + case 521: self = .sqlSupportsOrderByUnrelated + case 522: self = .sqlSupportedGroupBy + case 523: self = .sqlSupportsLikeEscapeClause + case 524: self = .sqlSupportsNonNullableColumns + case 525: self = .sqlSupportedGrammar + case 526: self = .sqlAnsi92SupportedLevel + case 527: self = .sqlSupportsIntegrityEnhancementFacility + case 528: self = .sqlOuterJoinsSupportLevel + case 529: self = .sqlSchemaTerm + case 530: self = .sqlProcedureTerm + case 531: self = .sqlCatalogTerm + case 532: self = .sqlCatalogAtStart + case 533: self = .sqlSchemasSupportedActions + case 534: self = .sqlCatalogsSupportedActions + case 535: self = .sqlSupportedPositionedCommands + case 536: self = .sqlSelectForUpdateSupported + case 537: self = .sqlStoredProceduresSupported + case 538: self = .sqlSupportedSubqueries + case 539: self = .sqlCorrelatedSubqueriesSupported + case 540: self = .sqlSupportedUnions + case 541: self = .sqlMaxBinaryLiteralLength + case 542: self = .sqlMaxCharLiteralLength + case 543: self = .sqlMaxColumnNameLength + case 544: self = .sqlMaxColumnsInGroupBy + case 545: self = .sqlMaxColumnsInIndex + case 546: self = .sqlMaxColumnsInOrderBy + case 547: self = .sqlMaxColumnsInSelect + case 548: self = .sqlMaxColumnsInTable + case 549: self = .sqlMaxConnections + case 550: self = .sqlMaxCursorNameLength + case 551: self = .sqlMaxIndexLength + case 552: self = .sqlDbSchemaNameLength + case 553: self = .sqlMaxProcedureNameLength + case 554: self = .sqlMaxCatalogNameLength + case 555: self = .sqlMaxRowSize + case 556: self = .sqlMaxRowSizeIncludesBlobs + case 557: self = .sqlMaxStatementLength + case 558: self = .sqlMaxStatements + case 559: self = .sqlMaxTableNameLength + case 560: self = .sqlMaxTablesInSelect + case 561: self = .sqlMaxUsernameLength + case 562: self = .sqlDefaultTransactionIsolation + case 563: self = .sqlTransactionsSupported + case 564: self = .sqlSupportedTransactionsIsolationLevels + case 565: self = .sqlDataDefinitionCausesTransactionCommit + case 566: self = .sqlDataDefinitionsInTransactionsIgnored + case 567: self = .sqlSupportedResultSetTypes + case 568: self = .sqlSupportedConcurrenciesForResultSetUnspecified + case 569: self = .sqlSupportedConcurrenciesForResultSetForwardOnly + case 570: self = .sqlSupportedConcurrenciesForResultSetScrollSensitive + case 571: self = .sqlSupportedConcurrenciesForResultSetScrollInsensitive + case 572: self = .sqlBatchUpdatesSupported + case 573: self = .sqlSavepointsSupported + case 574: self = .sqlNamedParametersSupported + case 575: self = .sqlLocatorsUpdateCopy + case 576: self = .sqlStoredFunctionsUsingCallSyntaxSupported + default: self = .UNRECOGNIZED(rawValue) + } + } + var rawValue: Int { + switch self { + case .flightSqlServerName: return 0 + case .flightSqlServerVersion: return 1 + case .flightSqlServerArrowVersion: return 2 + case .flightSqlServerReadOnly: return 3 + case .flightSqlServerSql: return 4 + case .flightSqlServerSubstrait: return 5 + case .flightSqlServerSubstraitMinVersion: return 6 + case .flightSqlServerSubstraitMaxVersion: return 7 + case .flightSqlServerTransaction: return 8 + case .flightSqlServerCancel: return 9 + case .flightSqlServerStatementTimeout: return 100 + case .flightSqlServerTransactionTimeout: return 101 + case .sqlDdlCatalog: return 500 + case .sqlDdlSchema: return 501 + case .sqlDdlTable: return 502 + case .sqlIdentifierCase: return 503 + case .sqlIdentifierQuoteChar: return 504 + case .sqlQuotedIdentifierCase: return 505 + case .sqlAllTablesAreSelectable: return 506 + case .sqlNullOrdering: return 507 + case .sqlKeywords: return 508 + case .sqlNumericFunctions: return 509 + case .sqlStringFunctions: return 510 + case .sqlSystemFunctions: return 511 + case .sqlDatetimeFunctions: return 512 + case .sqlSearchStringEscape: return 513 + case .sqlExtraNameCharacters: return 514 + case .sqlSupportsColumnAliasing: return 515 + case .sqlNullPlusNullIsNull: return 516 + case .sqlSupportsConvert: return 517 + case .sqlSupportsTableCorrelationNames: return 518 + case .sqlSupportsDifferentTableCorrelationNames: return 519 + case .sqlSupportsExpressionsInOrderBy: return 520 + case .sqlSupportsOrderByUnrelated: return 521 + case .sqlSupportedGroupBy: return 522 + case .sqlSupportsLikeEscapeClause: return 523 + case .sqlSupportsNonNullableColumns: return 524 + case .sqlSupportedGrammar: return 525 + case .sqlAnsi92SupportedLevel: return 526 + case .sqlSupportsIntegrityEnhancementFacility: return 527 + case .sqlOuterJoinsSupportLevel: return 528 + case .sqlSchemaTerm: return 529 + case .sqlProcedureTerm: return 530 + case .sqlCatalogTerm: return 531 + case .sqlCatalogAtStart: return 532 + case .sqlSchemasSupportedActions: return 533 + case .sqlCatalogsSupportedActions: return 534 + case .sqlSupportedPositionedCommands: return 535 + case .sqlSelectForUpdateSupported: return 536 + case .sqlStoredProceduresSupported: return 537 + case .sqlSupportedSubqueries: return 538 + case .sqlCorrelatedSubqueriesSupported: return 539 + case .sqlSupportedUnions: return 540 + case .sqlMaxBinaryLiteralLength: return 541 + case .sqlMaxCharLiteralLength: return 542 + case .sqlMaxColumnNameLength: return 543 + case .sqlMaxColumnsInGroupBy: return 544 + case .sqlMaxColumnsInIndex: return 545 + case .sqlMaxColumnsInOrderBy: return 546 + case .sqlMaxColumnsInSelect: return 547 + case .sqlMaxColumnsInTable: return 548 + case .sqlMaxConnections: return 549 + case .sqlMaxCursorNameLength: return 550 + case .sqlMaxIndexLength: return 551 + case .sqlDbSchemaNameLength: return 552 + case .sqlMaxProcedureNameLength: return 553 + case .sqlMaxCatalogNameLength: return 554 + case .sqlMaxRowSize: return 555 + case .sqlMaxRowSizeIncludesBlobs: return 556 + case .sqlMaxStatementLength: return 557 + case .sqlMaxStatements: return 558 + case .sqlMaxTableNameLength: return 559 + case .sqlMaxTablesInSelect: return 560 + case .sqlMaxUsernameLength: return 561 + case .sqlDefaultTransactionIsolation: return 562 + case .sqlTransactionsSupported: return 563 + case .sqlSupportedTransactionsIsolationLevels: return 564 + case .sqlDataDefinitionCausesTransactionCommit: return 565 + case .sqlDataDefinitionsInTransactionsIgnored: return 566 + case .sqlSupportedResultSetTypes: return 567 + case .sqlSupportedConcurrenciesForResultSetUnspecified: return 568 + case .sqlSupportedConcurrenciesForResultSetForwardOnly: return 569 + case .sqlSupportedConcurrenciesForResultSetScrollSensitive: return 570 + case .sqlSupportedConcurrenciesForResultSetScrollInsensitive: return 571 + case .sqlBatchUpdatesSupported: return 572 + case .sqlSavepointsSupported: return 573 + case .sqlNamedParametersSupported: return 574 + case .sqlLocatorsUpdateCopy: return 575 + case .sqlStoredFunctionsUsingCallSyntaxSupported: return 576 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlInfo: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlInfo] = [ - .flightSqlServerName, - .flightSqlServerVersion, - .flightSqlServerArrowVersion, - .flightSqlServerReadOnly, - .flightSqlServerSql, - .flightSqlServerSubstrait, - .flightSqlServerSubstraitMinVersion, - .flightSqlServerSubstraitMaxVersion, - .flightSqlServerTransaction, - .flightSqlServerCancel, - .flightSqlServerStatementTimeout, - .flightSqlServerTransactionTimeout, - .sqlDdlCatalog, - .sqlDdlSchema, - .sqlDdlTable, - .sqlIdentifierCase, - .sqlIdentifierQuoteChar, - .sqlQuotedIdentifierCase, - .sqlAllTablesAreSelectable, - .sqlNullOrdering, - .sqlKeywords, - .sqlNumericFunctions, - .sqlStringFunctions, - .sqlSystemFunctions, - .sqlDatetimeFunctions, - .sqlSearchStringEscape, - .sqlExtraNameCharacters, - .sqlSupportsColumnAliasing, - .sqlNullPlusNullIsNull, - .sqlSupportsConvert, - .sqlSupportsTableCorrelationNames, - .sqlSupportsDifferentTableCorrelationNames, - .sqlSupportsExpressionsInOrderBy, - .sqlSupportsOrderByUnrelated, - .sqlSupportedGroupBy, - .sqlSupportsLikeEscapeClause, - .sqlSupportsNonNullableColumns, - .sqlSupportedGrammar, - .sqlAnsi92SupportedLevel, - .sqlSupportsIntegrityEnhancementFacility, - .sqlOuterJoinsSupportLevel, - .sqlSchemaTerm, - .sqlProcedureTerm, - .sqlCatalogTerm, - .sqlCatalogAtStart, - .sqlSchemasSupportedActions, - .sqlCatalogsSupportedActions, - .sqlSupportedPositionedCommands, - .sqlSelectForUpdateSupported, - .sqlStoredProceduresSupported, - .sqlSupportedSubqueries, - .sqlCorrelatedSubqueriesSupported, - .sqlSupportedUnions, - .sqlMaxBinaryLiteralLength, - .sqlMaxCharLiteralLength, - .sqlMaxColumnNameLength, - .sqlMaxColumnsInGroupBy, - .sqlMaxColumnsInIndex, - .sqlMaxColumnsInOrderBy, - .sqlMaxColumnsInSelect, - .sqlMaxColumnsInTable, - .sqlMaxConnections, - .sqlMaxCursorNameLength, - .sqlMaxIndexLength, - .sqlDbSchemaNameLength, - .sqlMaxProcedureNameLength, - .sqlMaxCatalogNameLength, - .sqlMaxRowSize, - .sqlMaxRowSizeIncludesBlobs, - .sqlMaxStatementLength, - .sqlMaxStatements, - .sqlMaxTableNameLength, - .sqlMaxTablesInSelect, - .sqlMaxUsernameLength, - .sqlDefaultTransactionIsolation, - .sqlTransactionsSupported, - .sqlSupportedTransactionsIsolationLevels, - .sqlDataDefinitionCausesTransactionCommit, - .sqlDataDefinitionsInTransactionsIgnored, - .sqlSupportedResultSetTypes, - .sqlSupportedConcurrenciesForResultSetUnspecified, - .sqlSupportedConcurrenciesForResultSetForwardOnly, - .sqlSupportedConcurrenciesForResultSetScrollSensitive, - .sqlSupportedConcurrenciesForResultSetScrollInsensitive, - .sqlBatchUpdatesSupported, - .sqlSavepointsSupported, - .sqlNamedParametersSupported, - .sqlLocatorsUpdateCopy, - .sqlStoredFunctionsUsingCallSyntaxSupported, - ] -} - -#endif // swift(>=4.2) + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlInfo] = [ + .flightSqlServerName, + .flightSqlServerVersion, + .flightSqlServerArrowVersion, + .flightSqlServerReadOnly, + .flightSqlServerSql, + .flightSqlServerSubstrait, + .flightSqlServerSubstraitMinVersion, + .flightSqlServerSubstraitMaxVersion, + .flightSqlServerTransaction, + .flightSqlServerCancel, + .flightSqlServerStatementTimeout, + .flightSqlServerTransactionTimeout, + .sqlDdlCatalog, + .sqlDdlSchema, + .sqlDdlTable, + .sqlIdentifierCase, + .sqlIdentifierQuoteChar, + .sqlQuotedIdentifierCase, + .sqlAllTablesAreSelectable, + .sqlNullOrdering, + .sqlKeywords, + .sqlNumericFunctions, + .sqlStringFunctions, + .sqlSystemFunctions, + .sqlDatetimeFunctions, + .sqlSearchStringEscape, + .sqlExtraNameCharacters, + .sqlSupportsColumnAliasing, + .sqlNullPlusNullIsNull, + .sqlSupportsConvert, + .sqlSupportsTableCorrelationNames, + .sqlSupportsDifferentTableCorrelationNames, + .sqlSupportsExpressionsInOrderBy, + .sqlSupportsOrderByUnrelated, + .sqlSupportedGroupBy, + .sqlSupportsLikeEscapeClause, + .sqlSupportsNonNullableColumns, + .sqlSupportedGrammar, + .sqlAnsi92SupportedLevel, + .sqlSupportsIntegrityEnhancementFacility, + .sqlOuterJoinsSupportLevel, + .sqlSchemaTerm, + .sqlProcedureTerm, + .sqlCatalogTerm, + .sqlCatalogAtStart, + .sqlSchemasSupportedActions, + .sqlCatalogsSupportedActions, + .sqlSupportedPositionedCommands, + .sqlSelectForUpdateSupported, + .sqlStoredProceduresSupported, + .sqlSupportedSubqueries, + .sqlCorrelatedSubqueriesSupported, + .sqlSupportedUnions, + .sqlMaxBinaryLiteralLength, + .sqlMaxCharLiteralLength, + .sqlMaxColumnNameLength, + .sqlMaxColumnsInGroupBy, + .sqlMaxColumnsInIndex, + .sqlMaxColumnsInOrderBy, + .sqlMaxColumnsInSelect, + .sqlMaxColumnsInTable, + .sqlMaxConnections, + .sqlMaxCursorNameLength, + .sqlMaxIndexLength, + .sqlDbSchemaNameLength, + .sqlMaxProcedureNameLength, + .sqlMaxCatalogNameLength, + .sqlMaxRowSize, + .sqlMaxRowSizeIncludesBlobs, + .sqlMaxStatementLength, + .sqlMaxStatements, + .sqlMaxTableNameLength, + .sqlMaxTablesInSelect, + .sqlMaxUsernameLength, + .sqlDefaultTransactionIsolation, + .sqlTransactionsSupported, + .sqlSupportedTransactionsIsolationLevels, + .sqlDataDefinitionCausesTransactionCommit, + .sqlDataDefinitionsInTransactionsIgnored, + .sqlSupportedResultSetTypes, + .sqlSupportedConcurrenciesForResultSetUnspecified, + .sqlSupportedConcurrenciesForResultSetForwardOnly, + .sqlSupportedConcurrenciesForResultSetScrollSensitive, + .sqlSupportedConcurrenciesForResultSetScrollInsensitive, + .sqlBatchUpdatesSupported, + .sqlSavepointsSupported, + .sqlNamedParametersSupported, + .sqlLocatorsUpdateCopy, + .sqlStoredFunctionsUsingCallSyntaxSupported, + ] +} + +#endif // swift(>=4.2) /// The level of support for Flight SQL transaction RPCs. enum Arrow_Flight_Protocol_Sql_SqlSupportedTransaction: SwiftProtobuf.Enum { - typealias RawValue = Int - - /// Unknown/not indicated/no support - case none // = 0 + typealias RawValue = Int - /// Transactions, but not savepoints. - /// A savepoint is a mark within a transaction that can be individually - /// rolled back to. Not all databases support savepoints. - case transaction // = 1 + /// Unknown/not indicated/no support + case none // = 0 - /// Transactions and savepoints - case savepoint // = 2 - case UNRECOGNIZED(Int) + /// Transactions, but not savepoints. + /// A savepoint is a mark within a transaction that can be individually + /// rolled back to. Not all databases support savepoints. + case transaction // = 1 - init() { - self = .none - } + /// Transactions and savepoints + case savepoint // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .none - case 1: self = .transaction - case 2: self = .savepoint - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .none } - } - var rawValue: Int { - switch self { - case .none: return 0 - case .transaction: return 1 - case .savepoint: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .none + case 1: self = .transaction + case 2: self = .savepoint + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .none: return 0 + case .transaction: return 1 + case .savepoint: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedTransaction: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransaction] = [ - .none, - .transaction, - .savepoint, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransaction] = [ + .none, + .transaction, + .savepoint, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlCaseSensitivityUnknown // = 0 - case sqlCaseSensitivityCaseInsensitive // = 1 - case sqlCaseSensitivityUppercase // = 2 - case sqlCaseSensitivityLowercase // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .sqlCaseSensitivityUnknown - } + typealias RawValue = Int + case sqlCaseSensitivityUnknown // = 0 + case sqlCaseSensitivityCaseInsensitive // = 1 + case sqlCaseSensitivityUppercase // = 2 + case sqlCaseSensitivityLowercase // = 3 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlCaseSensitivityUnknown - case 1: self = .sqlCaseSensitivityCaseInsensitive - case 2: self = .sqlCaseSensitivityUppercase - case 3: self = .sqlCaseSensitivityLowercase - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlCaseSensitivityUnknown } - } - var rawValue: Int { - switch self { - case .sqlCaseSensitivityUnknown: return 0 - case .sqlCaseSensitivityCaseInsensitive: return 1 - case .sqlCaseSensitivityUppercase: return 2 - case .sqlCaseSensitivityLowercase: return 3 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlCaseSensitivityUnknown + case 1: self = .sqlCaseSensitivityCaseInsensitive + case 2: self = .sqlCaseSensitivityUppercase + case 3: self = .sqlCaseSensitivityLowercase + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlCaseSensitivityUnknown: return 0 + case .sqlCaseSensitivityCaseInsensitive: return 1 + case .sqlCaseSensitivityUppercase: return 2 + case .sqlCaseSensitivityLowercase: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity] = [ - .sqlCaseSensitivityUnknown, - .sqlCaseSensitivityCaseInsensitive, - .sqlCaseSensitivityUppercase, - .sqlCaseSensitivityLowercase, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity] = [ + .sqlCaseSensitivityUnknown, + .sqlCaseSensitivityCaseInsensitive, + .sqlCaseSensitivityUppercase, + .sqlCaseSensitivityLowercase, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlNullOrdering: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlNullsSortedHigh // = 0 - case sqlNullsSortedLow // = 1 - case sqlNullsSortedAtStart // = 2 - case sqlNullsSortedAtEnd // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .sqlNullsSortedHigh - } + typealias RawValue = Int + case sqlNullsSortedHigh // = 0 + case sqlNullsSortedLow // = 1 + case sqlNullsSortedAtStart // = 2 + case sqlNullsSortedAtEnd // = 3 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlNullsSortedHigh - case 1: self = .sqlNullsSortedLow - case 2: self = .sqlNullsSortedAtStart - case 3: self = .sqlNullsSortedAtEnd - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlNullsSortedHigh } - } - var rawValue: Int { - switch self { - case .sqlNullsSortedHigh: return 0 - case .sqlNullsSortedLow: return 1 - case .sqlNullsSortedAtStart: return 2 - case .sqlNullsSortedAtEnd: return 3 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlNullsSortedHigh + case 1: self = .sqlNullsSortedLow + case 2: self = .sqlNullsSortedAtStart + case 3: self = .sqlNullsSortedAtEnd + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlNullsSortedHigh: return 0 + case .sqlNullsSortedLow: return 1 + case .sqlNullsSortedAtStart: return 2 + case .sqlNullsSortedAtEnd: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlNullOrdering: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlNullOrdering] = [ - .sqlNullsSortedHigh, - .sqlNullsSortedLow, - .sqlNullsSortedAtStart, - .sqlNullsSortedAtEnd, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlNullOrdering] = [ + .sqlNullsSortedHigh, + .sqlNullsSortedLow, + .sqlNullsSortedAtStart, + .sqlNullsSortedAtEnd, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SupportedSqlGrammar: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlMinimumGrammar // = 0 - case sqlCoreGrammar // = 1 - case sqlExtendedGrammar // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .sqlMinimumGrammar - } + typealias RawValue = Int + case sqlMinimumGrammar // = 0 + case sqlCoreGrammar // = 1 + case sqlExtendedGrammar // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlMinimumGrammar - case 1: self = .sqlCoreGrammar - case 2: self = .sqlExtendedGrammar - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlMinimumGrammar } - } - var rawValue: Int { - switch self { - case .sqlMinimumGrammar: return 0 - case .sqlCoreGrammar: return 1 - case .sqlExtendedGrammar: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlMinimumGrammar + case 1: self = .sqlCoreGrammar + case 2: self = .sqlExtendedGrammar + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlMinimumGrammar: return 0 + case .sqlCoreGrammar: return 1 + case .sqlExtendedGrammar: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SupportedSqlGrammar: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SupportedSqlGrammar] = [ - .sqlMinimumGrammar, - .sqlCoreGrammar, - .sqlExtendedGrammar, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SupportedSqlGrammar] = [ + .sqlMinimumGrammar, + .sqlCoreGrammar, + .sqlExtendedGrammar, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel: SwiftProtobuf.Enum { - typealias RawValue = Int - case ansi92EntrySql // = 0 - case ansi92IntermediateSql // = 1 - case ansi92FullSql // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .ansi92EntrySql - } + typealias RawValue = Int + case ansi92EntrySql // = 0 + case ansi92IntermediateSql // = 1 + case ansi92FullSql // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .ansi92EntrySql - case 1: self = .ansi92IntermediateSql - case 2: self = .ansi92FullSql - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .ansi92EntrySql } - } - var rawValue: Int { - switch self { - case .ansi92EntrySql: return 0 - case .ansi92IntermediateSql: return 1 - case .ansi92FullSql: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .ansi92EntrySql + case 1: self = .ansi92IntermediateSql + case 2: self = .ansi92FullSql + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .ansi92EntrySql: return 0 + case .ansi92IntermediateSql: return 1 + case .ansi92FullSql: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel] = [ - .ansi92EntrySql, - .ansi92IntermediateSql, - .ansi92FullSql, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel] = [ + .ansi92EntrySql, + .ansi92IntermediateSql, + .ansi92FullSql, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlJoinsUnsupported // = 0 - case sqlLimitedOuterJoins // = 1 - case sqlFullOuterJoins // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .sqlJoinsUnsupported - } + typealias RawValue = Int + case sqlJoinsUnsupported // = 0 + case sqlLimitedOuterJoins // = 1 + case sqlFullOuterJoins // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlJoinsUnsupported - case 1: self = .sqlLimitedOuterJoins - case 2: self = .sqlFullOuterJoins - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlJoinsUnsupported } - } - var rawValue: Int { - switch self { - case .sqlJoinsUnsupported: return 0 - case .sqlLimitedOuterJoins: return 1 - case .sqlFullOuterJoins: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlJoinsUnsupported + case 1: self = .sqlLimitedOuterJoins + case 2: self = .sqlFullOuterJoins + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlJoinsUnsupported: return 0 + case .sqlLimitedOuterJoins: return 1 + case .sqlFullOuterJoins: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel] = [ - .sqlJoinsUnsupported, - .sqlLimitedOuterJoins, - .sqlFullOuterJoins, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel] = [ + .sqlJoinsUnsupported, + .sqlLimitedOuterJoins, + .sqlFullOuterJoins, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlGroupByUnrelated // = 0 - case sqlGroupByBeyondSelect // = 1 - case UNRECOGNIZED(Int) - - init() { - self = .sqlGroupByUnrelated - } + typealias RawValue = Int + case sqlGroupByUnrelated // = 0 + case sqlGroupByBeyondSelect // = 1 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlGroupByUnrelated - case 1: self = .sqlGroupByBeyondSelect - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlGroupByUnrelated } - } - var rawValue: Int { - switch self { - case .sqlGroupByUnrelated: return 0 - case .sqlGroupByBeyondSelect: return 1 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlGroupByUnrelated + case 1: self = .sqlGroupByBeyondSelect + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlGroupByUnrelated: return 0 + case .sqlGroupByBeyondSelect: return 1 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy] = [ - .sqlGroupByUnrelated, - .sqlGroupByBeyondSelect, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy] = [ + .sqlGroupByUnrelated, + .sqlGroupByBeyondSelect, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedElementActions: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlElementInProcedureCalls // = 0 - case sqlElementInIndexDefinitions // = 1 - case sqlElementInPrivilegeDefinitions // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .sqlElementInProcedureCalls - } + typealias RawValue = Int + case sqlElementInProcedureCalls // = 0 + case sqlElementInIndexDefinitions // = 1 + case sqlElementInPrivilegeDefinitions // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlElementInProcedureCalls - case 1: self = .sqlElementInIndexDefinitions - case 2: self = .sqlElementInPrivilegeDefinitions - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlElementInProcedureCalls } - } - var rawValue: Int { - switch self { - case .sqlElementInProcedureCalls: return 0 - case .sqlElementInIndexDefinitions: return 1 - case .sqlElementInPrivilegeDefinitions: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlElementInProcedureCalls + case 1: self = .sqlElementInIndexDefinitions + case 2: self = .sqlElementInPrivilegeDefinitions + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlElementInProcedureCalls: return 0 + case .sqlElementInIndexDefinitions: return 1 + case .sqlElementInPrivilegeDefinitions: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedElementActions: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedElementActions] = [ - .sqlElementInProcedureCalls, - .sqlElementInIndexDefinitions, - .sqlElementInPrivilegeDefinitions, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedElementActions] = [ + .sqlElementInProcedureCalls, + .sqlElementInIndexDefinitions, + .sqlElementInPrivilegeDefinitions, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlPositionedDelete // = 0 - case sqlPositionedUpdate // = 1 - case UNRECOGNIZED(Int) - - init() { - self = .sqlPositionedDelete - } + typealias RawValue = Int + case sqlPositionedDelete // = 0 + case sqlPositionedUpdate // = 1 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlPositionedDelete - case 1: self = .sqlPositionedUpdate - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlPositionedDelete } - } - var rawValue: Int { - switch self { - case .sqlPositionedDelete: return 0 - case .sqlPositionedUpdate: return 1 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlPositionedDelete + case 1: self = .sqlPositionedUpdate + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlPositionedDelete: return 0 + case .sqlPositionedUpdate: return 1 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands] = [ - .sqlPositionedDelete, - .sqlPositionedUpdate, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands] = [ + .sqlPositionedDelete, + .sqlPositionedUpdate, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlSubqueriesInComparisons // = 0 - case sqlSubqueriesInExists // = 1 - case sqlSubqueriesInIns // = 2 - case sqlSubqueriesInQuantifieds // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .sqlSubqueriesInComparisons - } + typealias RawValue = Int + case sqlSubqueriesInComparisons // = 0 + case sqlSubqueriesInExists // = 1 + case sqlSubqueriesInIns // = 2 + case sqlSubqueriesInQuantifieds // = 3 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlSubqueriesInComparisons - case 1: self = .sqlSubqueriesInExists - case 2: self = .sqlSubqueriesInIns - case 3: self = .sqlSubqueriesInQuantifieds - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlSubqueriesInComparisons } - } - var rawValue: Int { - switch self { - case .sqlSubqueriesInComparisons: return 0 - case .sqlSubqueriesInExists: return 1 - case .sqlSubqueriesInIns: return 2 - case .sqlSubqueriesInQuantifieds: return 3 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlSubqueriesInComparisons + case 1: self = .sqlSubqueriesInExists + case 2: self = .sqlSubqueriesInIns + case 3: self = .sqlSubqueriesInQuantifieds + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlSubqueriesInComparisons: return 0 + case .sqlSubqueriesInExists: return 1 + case .sqlSubqueriesInIns: return 2 + case .sqlSubqueriesInQuantifieds: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries] = [ - .sqlSubqueriesInComparisons, - .sqlSubqueriesInExists, - .sqlSubqueriesInIns, - .sqlSubqueriesInQuantifieds, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries] = [ + .sqlSubqueriesInComparisons, + .sqlSubqueriesInExists, + .sqlSubqueriesInIns, + .sqlSubqueriesInQuantifieds, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedUnions: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlUnion // = 0 - case sqlUnionAll // = 1 - case UNRECOGNIZED(Int) - - init() { - self = .sqlUnion - } + typealias RawValue = Int + case sqlUnion // = 0 + case sqlUnionAll // = 1 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlUnion - case 1: self = .sqlUnionAll - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlUnion } - } - var rawValue: Int { - switch self { - case .sqlUnion: return 0 - case .sqlUnionAll: return 1 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlUnion + case 1: self = .sqlUnionAll + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlUnion: return 0 + case .sqlUnionAll: return 1 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedUnions: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedUnions] = [ - .sqlUnion, - .sqlUnionAll, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedUnions] = [ + .sqlUnion, + .sqlUnionAll, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlTransactionNone // = 0 - case sqlTransactionReadUncommitted // = 1 - case sqlTransactionReadCommitted // = 2 - case sqlTransactionRepeatableRead // = 3 - case sqlTransactionSerializable // = 4 - case UNRECOGNIZED(Int) - - init() { - self = .sqlTransactionNone - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlTransactionNone - case 1: self = .sqlTransactionReadUncommitted - case 2: self = .sqlTransactionReadCommitted - case 3: self = .sqlTransactionRepeatableRead - case 4: self = .sqlTransactionSerializable - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .sqlTransactionNone: return 0 - case .sqlTransactionReadUncommitted: return 1 - case .sqlTransactionReadCommitted: return 2 - case .sqlTransactionRepeatableRead: return 3 - case .sqlTransactionSerializable: return 4 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + case sqlTransactionNone // = 0 + case sqlTransactionReadUncommitted // = 1 + case sqlTransactionReadCommitted // = 2 + case sqlTransactionRepeatableRead // = 3 + case sqlTransactionSerializable // = 4 + case UNRECOGNIZED(Int) + + init() { + self = .sqlTransactionNone + } + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlTransactionNone + case 1: self = .sqlTransactionReadUncommitted + case 2: self = .sqlTransactionReadCommitted + case 3: self = .sqlTransactionRepeatableRead + case 4: self = .sqlTransactionSerializable + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .sqlTransactionNone: return 0 + case .sqlTransactionReadUncommitted: return 1 + case .sqlTransactionReadCommitted: return 2 + case .sqlTransactionRepeatableRead: return 3 + case .sqlTransactionSerializable: return 4 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel] = [ - .sqlTransactionNone, - .sqlTransactionReadUncommitted, - .sqlTransactionReadCommitted, - .sqlTransactionRepeatableRead, - .sqlTransactionSerializable, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel] = [ + .sqlTransactionNone, + .sqlTransactionReadUncommitted, + .sqlTransactionReadCommitted, + .sqlTransactionRepeatableRead, + .sqlTransactionSerializable, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedTransactions: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlTransactionUnspecified // = 0 - case sqlDataDefinitionTransactions // = 1 - case sqlDataManipulationTransactions // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .sqlTransactionUnspecified - } + typealias RawValue = Int + case sqlTransactionUnspecified // = 0 + case sqlDataDefinitionTransactions // = 1 + case sqlDataManipulationTransactions // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlTransactionUnspecified - case 1: self = .sqlDataDefinitionTransactions - case 2: self = .sqlDataManipulationTransactions - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlTransactionUnspecified } - } - var rawValue: Int { - switch self { - case .sqlTransactionUnspecified: return 0 - case .sqlDataDefinitionTransactions: return 1 - case .sqlDataManipulationTransactions: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlTransactionUnspecified + case 1: self = .sqlDataDefinitionTransactions + case 2: self = .sqlDataManipulationTransactions + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlTransactionUnspecified: return 0 + case .sqlDataDefinitionTransactions: return 1 + case .sqlDataManipulationTransactions: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedTransactions: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransactions] = [ - .sqlTransactionUnspecified, - .sqlDataDefinitionTransactions, - .sqlDataManipulationTransactions, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransactions] = [ + .sqlTransactionUnspecified, + .sqlDataDefinitionTransactions, + .sqlDataManipulationTransactions, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlResultSetTypeUnspecified // = 0 - case sqlResultSetTypeForwardOnly // = 1 - case sqlResultSetTypeScrollInsensitive // = 2 - case sqlResultSetTypeScrollSensitive // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .sqlResultSetTypeUnspecified - } + typealias RawValue = Int + case sqlResultSetTypeUnspecified // = 0 + case sqlResultSetTypeForwardOnly // = 1 + case sqlResultSetTypeScrollInsensitive // = 2 + case sqlResultSetTypeScrollSensitive // = 3 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlResultSetTypeUnspecified - case 1: self = .sqlResultSetTypeForwardOnly - case 2: self = .sqlResultSetTypeScrollInsensitive - case 3: self = .sqlResultSetTypeScrollSensitive - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlResultSetTypeUnspecified } - } - var rawValue: Int { - switch self { - case .sqlResultSetTypeUnspecified: return 0 - case .sqlResultSetTypeForwardOnly: return 1 - case .sqlResultSetTypeScrollInsensitive: return 2 - case .sqlResultSetTypeScrollSensitive: return 3 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlResultSetTypeUnspecified + case 1: self = .sqlResultSetTypeForwardOnly + case 2: self = .sqlResultSetTypeScrollInsensitive + case 3: self = .sqlResultSetTypeScrollSensitive + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlResultSetTypeUnspecified: return 0 + case .sqlResultSetTypeForwardOnly: return 1 + case .sqlResultSetTypeScrollInsensitive: return 2 + case .sqlResultSetTypeScrollSensitive: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType] = [ - .sqlResultSetTypeUnspecified, - .sqlResultSetTypeForwardOnly, - .sqlResultSetTypeScrollInsensitive, - .sqlResultSetTypeScrollSensitive, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType] = [ + .sqlResultSetTypeUnspecified, + .sqlResultSetTypeForwardOnly, + .sqlResultSetTypeScrollInsensitive, + .sqlResultSetTypeScrollSensitive, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlResultSetConcurrencyUnspecified // = 0 - case sqlResultSetConcurrencyReadOnly // = 1 - case sqlResultSetConcurrencyUpdatable // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .sqlResultSetConcurrencyUnspecified - } + typealias RawValue = Int + case sqlResultSetConcurrencyUnspecified // = 0 + case sqlResultSetConcurrencyReadOnly // = 1 + case sqlResultSetConcurrencyUpdatable // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlResultSetConcurrencyUnspecified - case 1: self = .sqlResultSetConcurrencyReadOnly - case 2: self = .sqlResultSetConcurrencyUpdatable - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .sqlResultSetConcurrencyUnspecified } - } - var rawValue: Int { - switch self { - case .sqlResultSetConcurrencyUnspecified: return 0 - case .sqlResultSetConcurrencyReadOnly: return 1 - case .sqlResultSetConcurrencyUpdatable: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlResultSetConcurrencyUnspecified + case 1: self = .sqlResultSetConcurrencyReadOnly + case 2: self = .sqlResultSetConcurrencyUpdatable + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .sqlResultSetConcurrencyUnspecified: return 0 + case .sqlResultSetConcurrencyReadOnly: return 1 + case .sqlResultSetConcurrencyUpdatable: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency] = [ - .sqlResultSetConcurrencyUnspecified, - .sqlResultSetConcurrencyReadOnly, - .sqlResultSetConcurrencyUpdatable, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency] = [ + .sqlResultSetConcurrencyUnspecified, + .sqlResultSetConcurrencyReadOnly, + .sqlResultSetConcurrencyUpdatable, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_SqlSupportsConvert: SwiftProtobuf.Enum { - typealias RawValue = Int - case sqlConvertBigint // = 0 - case sqlConvertBinary // = 1 - case sqlConvertBit // = 2 - case sqlConvertChar // = 3 - case sqlConvertDate // = 4 - case sqlConvertDecimal // = 5 - case sqlConvertFloat // = 6 - case sqlConvertInteger // = 7 - case sqlConvertIntervalDayTime // = 8 - case sqlConvertIntervalYearMonth // = 9 - case sqlConvertLongvarbinary // = 10 - case sqlConvertLongvarchar // = 11 - case sqlConvertNumeric // = 12 - case sqlConvertReal // = 13 - case sqlConvertSmallint // = 14 - case sqlConvertTime // = 15 - case sqlConvertTimestamp // = 16 - case sqlConvertTinyint // = 17 - case sqlConvertVarbinary // = 18 - case sqlConvertVarchar // = 19 - case UNRECOGNIZED(Int) - - init() { - self = .sqlConvertBigint - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .sqlConvertBigint - case 1: self = .sqlConvertBinary - case 2: self = .sqlConvertBit - case 3: self = .sqlConvertChar - case 4: self = .sqlConvertDate - case 5: self = .sqlConvertDecimal - case 6: self = .sqlConvertFloat - case 7: self = .sqlConvertInteger - case 8: self = .sqlConvertIntervalDayTime - case 9: self = .sqlConvertIntervalYearMonth - case 10: self = .sqlConvertLongvarbinary - case 11: self = .sqlConvertLongvarchar - case 12: self = .sqlConvertNumeric - case 13: self = .sqlConvertReal - case 14: self = .sqlConvertSmallint - case 15: self = .sqlConvertTime - case 16: self = .sqlConvertTimestamp - case 17: self = .sqlConvertTinyint - case 18: self = .sqlConvertVarbinary - case 19: self = .sqlConvertVarchar - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .sqlConvertBigint: return 0 - case .sqlConvertBinary: return 1 - case .sqlConvertBit: return 2 - case .sqlConvertChar: return 3 - case .sqlConvertDate: return 4 - case .sqlConvertDecimal: return 5 - case .sqlConvertFloat: return 6 - case .sqlConvertInteger: return 7 - case .sqlConvertIntervalDayTime: return 8 - case .sqlConvertIntervalYearMonth: return 9 - case .sqlConvertLongvarbinary: return 10 - case .sqlConvertLongvarchar: return 11 - case .sqlConvertNumeric: return 12 - case .sqlConvertReal: return 13 - case .sqlConvertSmallint: return 14 - case .sqlConvertTime: return 15 - case .sqlConvertTimestamp: return 16 - case .sqlConvertTinyint: return 17 - case .sqlConvertVarbinary: return 18 - case .sqlConvertVarchar: return 19 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + case sqlConvertBigint // = 0 + case sqlConvertBinary // = 1 + case sqlConvertBit // = 2 + case sqlConvertChar // = 3 + case sqlConvertDate // = 4 + case sqlConvertDecimal // = 5 + case sqlConvertFloat // = 6 + case sqlConvertInteger // = 7 + case sqlConvertIntervalDayTime // = 8 + case sqlConvertIntervalYearMonth // = 9 + case sqlConvertLongvarbinary // = 10 + case sqlConvertLongvarchar // = 11 + case sqlConvertNumeric // = 12 + case sqlConvertReal // = 13 + case sqlConvertSmallint // = 14 + case sqlConvertTime // = 15 + case sqlConvertTimestamp // = 16 + case sqlConvertTinyint // = 17 + case sqlConvertVarbinary // = 18 + case sqlConvertVarchar // = 19 + case UNRECOGNIZED(Int) + + init() { + self = .sqlConvertBigint + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sqlConvertBigint + case 1: self = .sqlConvertBinary + case 2: self = .sqlConvertBit + case 3: self = .sqlConvertChar + case 4: self = .sqlConvertDate + case 5: self = .sqlConvertDecimal + case 6: self = .sqlConvertFloat + case 7: self = .sqlConvertInteger + case 8: self = .sqlConvertIntervalDayTime + case 9: self = .sqlConvertIntervalYearMonth + case 10: self = .sqlConvertLongvarbinary + case 11: self = .sqlConvertLongvarchar + case 12: self = .sqlConvertNumeric + case 13: self = .sqlConvertReal + case 14: self = .sqlConvertSmallint + case 15: self = .sqlConvertTime + case 16: self = .sqlConvertTimestamp + case 17: self = .sqlConvertTinyint + case 18: self = .sqlConvertVarbinary + case 19: self = .sqlConvertVarchar + default: self = .UNRECOGNIZED(rawValue) + } + } + var rawValue: Int { + switch self { + case .sqlConvertBigint: return 0 + case .sqlConvertBinary: return 1 + case .sqlConvertBit: return 2 + case .sqlConvertChar: return 3 + case .sqlConvertDate: return 4 + case .sqlConvertDecimal: return 5 + case .sqlConvertFloat: return 6 + case .sqlConvertInteger: return 7 + case .sqlConvertIntervalDayTime: return 8 + case .sqlConvertIntervalYearMonth: return 9 + case .sqlConvertLongvarbinary: return 10 + case .sqlConvertLongvarchar: return 11 + case .sqlConvertNumeric: return 12 + case .sqlConvertReal: return 13 + case .sqlConvertSmallint: return 14 + case .sqlConvertTime: return 15 + case .sqlConvertTimestamp: return 16 + case .sqlConvertTinyint: return 17 + case .sqlConvertVarbinary: return 18 + case .sqlConvertVarchar: return 19 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_SqlSupportsConvert: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportsConvert] = [ - .sqlConvertBigint, - .sqlConvertBinary, - .sqlConvertBit, - .sqlConvertChar, - .sqlConvertDate, - .sqlConvertDecimal, - .sqlConvertFloat, - .sqlConvertInteger, - .sqlConvertIntervalDayTime, - .sqlConvertIntervalYearMonth, - .sqlConvertLongvarbinary, - .sqlConvertLongvarchar, - .sqlConvertNumeric, - .sqlConvertReal, - .sqlConvertSmallint, - .sqlConvertTime, - .sqlConvertTimestamp, - .sqlConvertTinyint, - .sqlConvertVarbinary, - .sqlConvertVarchar, - ] -} - -#endif // swift(>=4.2) - -///* + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportsConvert] = [ + .sqlConvertBigint, + .sqlConvertBinary, + .sqlConvertBit, + .sqlConvertChar, + .sqlConvertDate, + .sqlConvertDecimal, + .sqlConvertFloat, + .sqlConvertInteger, + .sqlConvertIntervalDayTime, + .sqlConvertIntervalYearMonth, + .sqlConvertLongvarbinary, + .sqlConvertLongvarchar, + .sqlConvertNumeric, + .sqlConvertReal, + .sqlConvertSmallint, + .sqlConvertTime, + .sqlConvertTimestamp, + .sqlConvertTinyint, + .sqlConvertVarbinary, + .sqlConvertVarchar, + ] +} + +#endif // swift(>=4.2) + +/// * /// The JDBC/ODBC-defined type of any object. /// All the values here are the same as in the JDBC and ODBC specs. enum Arrow_Flight_Protocol_Sql_XdbcDataType: SwiftProtobuf.Enum { - typealias RawValue = Int - case xdbcUnknownType // = 0 - case xdbcChar // = 1 - case xdbcNumeric // = 2 - case xdbcDecimal // = 3 - case xdbcInteger // = 4 - case xdbcSmallint // = 5 - case xdbcFloat // = 6 - case xdbcReal // = 7 - case xdbcDouble // = 8 - case xdbcDatetime // = 9 - case xdbcInterval // = 10 - case xdbcVarchar // = 12 - case xdbcDate // = 91 - case xdbcTime // = 92 - case xdbcTimestamp // = 93 - case xdbcLongvarchar // = -1 - case xdbcBinary // = -2 - case xdbcVarbinary // = -3 - case xdbcLongvarbinary // = -4 - case xdbcBigint // = -5 - case xdbcTinyint // = -6 - case xdbcBit // = -7 - case xdbcWchar // = -8 - case xdbcWvarchar // = -9 - case UNRECOGNIZED(Int) - - init() { - self = .xdbcUnknownType - } - - init?(rawValue: Int) { - switch rawValue { - case -9: self = .xdbcWvarchar - case -8: self = .xdbcWchar - case -7: self = .xdbcBit - case -6: self = .xdbcTinyint - case -5: self = .xdbcBigint - case -4: self = .xdbcLongvarbinary - case -3: self = .xdbcVarbinary - case -2: self = .xdbcBinary - case -1: self = .xdbcLongvarchar - case 0: self = .xdbcUnknownType - case 1: self = .xdbcChar - case 2: self = .xdbcNumeric - case 3: self = .xdbcDecimal - case 4: self = .xdbcInteger - case 5: self = .xdbcSmallint - case 6: self = .xdbcFloat - case 7: self = .xdbcReal - case 8: self = .xdbcDouble - case 9: self = .xdbcDatetime - case 10: self = .xdbcInterval - case 12: self = .xdbcVarchar - case 91: self = .xdbcDate - case 92: self = .xdbcTime - case 93: self = .xdbcTimestamp - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .xdbcWvarchar: return -9 - case .xdbcWchar: return -8 - case .xdbcBit: return -7 - case .xdbcTinyint: return -6 - case .xdbcBigint: return -5 - case .xdbcLongvarbinary: return -4 - case .xdbcVarbinary: return -3 - case .xdbcBinary: return -2 - case .xdbcLongvarchar: return -1 - case .xdbcUnknownType: return 0 - case .xdbcChar: return 1 - case .xdbcNumeric: return 2 - case .xdbcDecimal: return 3 - case .xdbcInteger: return 4 - case .xdbcSmallint: return 5 - case .xdbcFloat: return 6 - case .xdbcReal: return 7 - case .xdbcDouble: return 8 - case .xdbcDatetime: return 9 - case .xdbcInterval: return 10 - case .xdbcVarchar: return 12 - case .xdbcDate: return 91 - case .xdbcTime: return 92 - case .xdbcTimestamp: return 93 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + case xdbcUnknownType // = 0 + case xdbcChar // = 1 + case xdbcNumeric // = 2 + case xdbcDecimal // = 3 + case xdbcInteger // = 4 + case xdbcSmallint // = 5 + case xdbcFloat // = 6 + case xdbcReal // = 7 + case xdbcDouble // = 8 + case xdbcDatetime // = 9 + case xdbcInterval // = 10 + case xdbcVarchar // = 12 + case xdbcDate // = 91 + case xdbcTime // = 92 + case xdbcTimestamp // = 93 + case xdbcLongvarchar // = -1 + case xdbcBinary // = -2 + case xdbcVarbinary // = -3 + case xdbcLongvarbinary // = -4 + case xdbcBigint // = -5 + case xdbcTinyint // = -6 + case xdbcBit // = -7 + case xdbcWchar // = -8 + case xdbcWvarchar // = -9 + case UNRECOGNIZED(Int) + init() { + self = .xdbcUnknownType + } + + init?(rawValue: Int) { + switch rawValue { + case -9: self = .xdbcWvarchar + case -8: self = .xdbcWchar + case -7: self = .xdbcBit + case -6: self = .xdbcTinyint + case -5: self = .xdbcBigint + case -4: self = .xdbcLongvarbinary + case -3: self = .xdbcVarbinary + case -2: self = .xdbcBinary + case -1: self = .xdbcLongvarchar + case 0: self = .xdbcUnknownType + case 1: self = .xdbcChar + case 2: self = .xdbcNumeric + case 3: self = .xdbcDecimal + case 4: self = .xdbcInteger + case 5: self = .xdbcSmallint + case 6: self = .xdbcFloat + case 7: self = .xdbcReal + case 8: self = .xdbcDouble + case 9: self = .xdbcDatetime + case 10: self = .xdbcInterval + case 12: self = .xdbcVarchar + case 91: self = .xdbcDate + case 92: self = .xdbcTime + case 93: self = .xdbcTimestamp + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .xdbcWvarchar: return -9 + case .xdbcWchar: return -8 + case .xdbcBit: return -7 + case .xdbcTinyint: return -6 + case .xdbcBigint: return -5 + case .xdbcLongvarbinary: return -4 + case .xdbcVarbinary: return -3 + case .xdbcBinary: return -2 + case .xdbcLongvarchar: return -1 + case .xdbcUnknownType: return 0 + case .xdbcChar: return 1 + case .xdbcNumeric: return 2 + case .xdbcDecimal: return 3 + case .xdbcInteger: return 4 + case .xdbcSmallint: return 5 + case .xdbcFloat: return 6 + case .xdbcReal: return 7 + case .xdbcDouble: return 8 + case .xdbcDatetime: return 9 + case .xdbcInterval: return 10 + case .xdbcVarchar: return 12 + case .xdbcDate: return 91 + case .xdbcTime: return 92 + case .xdbcTimestamp: return 93 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_XdbcDataType: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_XdbcDataType] = [ - .xdbcUnknownType, - .xdbcChar, - .xdbcNumeric, - .xdbcDecimal, - .xdbcInteger, - .xdbcSmallint, - .xdbcFloat, - .xdbcReal, - .xdbcDouble, - .xdbcDatetime, - .xdbcInterval, - .xdbcVarchar, - .xdbcDate, - .xdbcTime, - .xdbcTimestamp, - .xdbcLongvarchar, - .xdbcBinary, - .xdbcVarbinary, - .xdbcLongvarbinary, - .xdbcBigint, - .xdbcTinyint, - .xdbcBit, - .xdbcWchar, - .xdbcWvarchar, - ] -} - -#endif // swift(>=4.2) - -///* + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_XdbcDataType] = [ + .xdbcUnknownType, + .xdbcChar, + .xdbcNumeric, + .xdbcDecimal, + .xdbcInteger, + .xdbcSmallint, + .xdbcFloat, + .xdbcReal, + .xdbcDouble, + .xdbcDatetime, + .xdbcInterval, + .xdbcVarchar, + .xdbcDate, + .xdbcTime, + .xdbcTimestamp, + .xdbcLongvarchar, + .xdbcBinary, + .xdbcVarbinary, + .xdbcLongvarbinary, + .xdbcBigint, + .xdbcTinyint, + .xdbcBit, + .xdbcWchar, + .xdbcWvarchar, + ] +} + +#endif // swift(>=4.2) + +/// * /// Detailed subtype information for XDBC_TYPE_DATETIME and XDBC_TYPE_INTERVAL. enum Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode: SwiftProtobuf.Enum { - typealias RawValue = Int - case xdbcSubcodeUnknown // = 0 - case xdbcSubcodeYear // = 1 - static let xdbcSubcodeDate = xdbcSubcodeYear - case xdbcSubcodeTime // = 2 - static let xdbcSubcodeMonth = xdbcSubcodeTime - case xdbcSubcodeTimestamp // = 3 - static let xdbcSubcodeDay = xdbcSubcodeTimestamp - case xdbcSubcodeTimeWithTimezone // = 4 - static let xdbcSubcodeHour = xdbcSubcodeTimeWithTimezone - case xdbcSubcodeTimestampWithTimezone // = 5 - static let xdbcSubcodeMinute = xdbcSubcodeTimestampWithTimezone - case xdbcSubcodeSecond // = 6 - case xdbcSubcodeYearToMonth // = 7 - case xdbcSubcodeDayToHour // = 8 - case xdbcSubcodeDayToMinute // = 9 - case xdbcSubcodeDayToSecond // = 10 - case xdbcSubcodeHourToMinute // = 11 - case xdbcSubcodeHourToSecond // = 12 - case xdbcSubcodeMinuteToSecond // = 13 - case xdbcSubcodeIntervalYear // = 101 - case xdbcSubcodeIntervalMonth // = 102 - case xdbcSubcodeIntervalDay // = 103 - case xdbcSubcodeIntervalHour // = 104 - case xdbcSubcodeIntervalMinute // = 105 - case xdbcSubcodeIntervalSecond // = 106 - case xdbcSubcodeIntervalYearToMonth // = 107 - case xdbcSubcodeIntervalDayToHour // = 108 - case xdbcSubcodeIntervalDayToMinute // = 109 - case xdbcSubcodeIntervalDayToSecond // = 110 - case xdbcSubcodeIntervalHourToMinute // = 111 - case xdbcSubcodeIntervalHourToSecond // = 112 - case xdbcSubcodeIntervalMinuteToSecond // = 113 - case UNRECOGNIZED(Int) - - init() { - self = .xdbcSubcodeUnknown - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .xdbcSubcodeUnknown - case 1: self = .xdbcSubcodeYear - case 2: self = .xdbcSubcodeTime - case 3: self = .xdbcSubcodeTimestamp - case 4: self = .xdbcSubcodeTimeWithTimezone - case 5: self = .xdbcSubcodeTimestampWithTimezone - case 6: self = .xdbcSubcodeSecond - case 7: self = .xdbcSubcodeYearToMonth - case 8: self = .xdbcSubcodeDayToHour - case 9: self = .xdbcSubcodeDayToMinute - case 10: self = .xdbcSubcodeDayToSecond - case 11: self = .xdbcSubcodeHourToMinute - case 12: self = .xdbcSubcodeHourToSecond - case 13: self = .xdbcSubcodeMinuteToSecond - case 101: self = .xdbcSubcodeIntervalYear - case 102: self = .xdbcSubcodeIntervalMonth - case 103: self = .xdbcSubcodeIntervalDay - case 104: self = .xdbcSubcodeIntervalHour - case 105: self = .xdbcSubcodeIntervalMinute - case 106: self = .xdbcSubcodeIntervalSecond - case 107: self = .xdbcSubcodeIntervalYearToMonth - case 108: self = .xdbcSubcodeIntervalDayToHour - case 109: self = .xdbcSubcodeIntervalDayToMinute - case 110: self = .xdbcSubcodeIntervalDayToSecond - case 111: self = .xdbcSubcodeIntervalHourToMinute - case 112: self = .xdbcSubcodeIntervalHourToSecond - case 113: self = .xdbcSubcodeIntervalMinuteToSecond - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .xdbcSubcodeUnknown: return 0 - case .xdbcSubcodeYear: return 1 - case .xdbcSubcodeTime: return 2 - case .xdbcSubcodeTimestamp: return 3 - case .xdbcSubcodeTimeWithTimezone: return 4 - case .xdbcSubcodeTimestampWithTimezone: return 5 - case .xdbcSubcodeSecond: return 6 - case .xdbcSubcodeYearToMonth: return 7 - case .xdbcSubcodeDayToHour: return 8 - case .xdbcSubcodeDayToMinute: return 9 - case .xdbcSubcodeDayToSecond: return 10 - case .xdbcSubcodeHourToMinute: return 11 - case .xdbcSubcodeHourToSecond: return 12 - case .xdbcSubcodeMinuteToSecond: return 13 - case .xdbcSubcodeIntervalYear: return 101 - case .xdbcSubcodeIntervalMonth: return 102 - case .xdbcSubcodeIntervalDay: return 103 - case .xdbcSubcodeIntervalHour: return 104 - case .xdbcSubcodeIntervalMinute: return 105 - case .xdbcSubcodeIntervalSecond: return 106 - case .xdbcSubcodeIntervalYearToMonth: return 107 - case .xdbcSubcodeIntervalDayToHour: return 108 - case .xdbcSubcodeIntervalDayToMinute: return 109 - case .xdbcSubcodeIntervalDayToSecond: return 110 - case .xdbcSubcodeIntervalHourToMinute: return 111 - case .xdbcSubcodeIntervalHourToSecond: return 112 - case .xdbcSubcodeIntervalMinuteToSecond: return 113 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + case xdbcSubcodeUnknown // = 0 + case xdbcSubcodeYear // = 1 + static let xdbcSubcodeDate = xdbcSubcodeYear + case xdbcSubcodeTime // = 2 + static let xdbcSubcodeMonth = xdbcSubcodeTime + case xdbcSubcodeTimestamp // = 3 + static let xdbcSubcodeDay = xdbcSubcodeTimestamp + case xdbcSubcodeTimeWithTimezone // = 4 + static let xdbcSubcodeHour = xdbcSubcodeTimeWithTimezone + case xdbcSubcodeTimestampWithTimezone // = 5 + static let xdbcSubcodeMinute = xdbcSubcodeTimestampWithTimezone + case xdbcSubcodeSecond // = 6 + case xdbcSubcodeYearToMonth // = 7 + case xdbcSubcodeDayToHour // = 8 + case xdbcSubcodeDayToMinute // = 9 + case xdbcSubcodeDayToSecond // = 10 + case xdbcSubcodeHourToMinute // = 11 + case xdbcSubcodeHourToSecond // = 12 + case xdbcSubcodeMinuteToSecond // = 13 + case xdbcSubcodeIntervalYear // = 101 + case xdbcSubcodeIntervalMonth // = 102 + case xdbcSubcodeIntervalDay // = 103 + case xdbcSubcodeIntervalHour // = 104 + case xdbcSubcodeIntervalMinute // = 105 + case xdbcSubcodeIntervalSecond // = 106 + case xdbcSubcodeIntervalYearToMonth // = 107 + case xdbcSubcodeIntervalDayToHour // = 108 + case xdbcSubcodeIntervalDayToMinute // = 109 + case xdbcSubcodeIntervalDayToSecond // = 110 + case xdbcSubcodeIntervalHourToMinute // = 111 + case xdbcSubcodeIntervalHourToSecond // = 112 + case xdbcSubcodeIntervalMinuteToSecond // = 113 + case UNRECOGNIZED(Int) + + init() { + self = .xdbcSubcodeUnknown + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .xdbcSubcodeUnknown + case 1: self = .xdbcSubcodeYear + case 2: self = .xdbcSubcodeTime + case 3: self = .xdbcSubcodeTimestamp + case 4: self = .xdbcSubcodeTimeWithTimezone + case 5: self = .xdbcSubcodeTimestampWithTimezone + case 6: self = .xdbcSubcodeSecond + case 7: self = .xdbcSubcodeYearToMonth + case 8: self = .xdbcSubcodeDayToHour + case 9: self = .xdbcSubcodeDayToMinute + case 10: self = .xdbcSubcodeDayToSecond + case 11: self = .xdbcSubcodeHourToMinute + case 12: self = .xdbcSubcodeHourToSecond + case 13: self = .xdbcSubcodeMinuteToSecond + case 101: self = .xdbcSubcodeIntervalYear + case 102: self = .xdbcSubcodeIntervalMonth + case 103: self = .xdbcSubcodeIntervalDay + case 104: self = .xdbcSubcodeIntervalHour + case 105: self = .xdbcSubcodeIntervalMinute + case 106: self = .xdbcSubcodeIntervalSecond + case 107: self = .xdbcSubcodeIntervalYearToMonth + case 108: self = .xdbcSubcodeIntervalDayToHour + case 109: self = .xdbcSubcodeIntervalDayToMinute + case 110: self = .xdbcSubcodeIntervalDayToSecond + case 111: self = .xdbcSubcodeIntervalHourToMinute + case 112: self = .xdbcSubcodeIntervalHourToSecond + case 113: self = .xdbcSubcodeIntervalMinuteToSecond + default: self = .UNRECOGNIZED(rawValue) + } + } + var rawValue: Int { + switch self { + case .xdbcSubcodeUnknown: return 0 + case .xdbcSubcodeYear: return 1 + case .xdbcSubcodeTime: return 2 + case .xdbcSubcodeTimestamp: return 3 + case .xdbcSubcodeTimeWithTimezone: return 4 + case .xdbcSubcodeTimestampWithTimezone: return 5 + case .xdbcSubcodeSecond: return 6 + case .xdbcSubcodeYearToMonth: return 7 + case .xdbcSubcodeDayToHour: return 8 + case .xdbcSubcodeDayToMinute: return 9 + case .xdbcSubcodeDayToSecond: return 10 + case .xdbcSubcodeHourToMinute: return 11 + case .xdbcSubcodeHourToSecond: return 12 + case .xdbcSubcodeMinuteToSecond: return 13 + case .xdbcSubcodeIntervalYear: return 101 + case .xdbcSubcodeIntervalMonth: return 102 + case .xdbcSubcodeIntervalDay: return 103 + case .xdbcSubcodeIntervalHour: return 104 + case .xdbcSubcodeIntervalMinute: return 105 + case .xdbcSubcodeIntervalSecond: return 106 + case .xdbcSubcodeIntervalYearToMonth: return 107 + case .xdbcSubcodeIntervalDayToHour: return 108 + case .xdbcSubcodeIntervalDayToMinute: return 109 + case .xdbcSubcodeIntervalDayToSecond: return 110 + case .xdbcSubcodeIntervalHourToMinute: return 111 + case .xdbcSubcodeIntervalHourToSecond: return 112 + case .xdbcSubcodeIntervalMinuteToSecond: return 113 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode] = [ - .xdbcSubcodeUnknown, - .xdbcSubcodeYear, - .xdbcSubcodeTime, - .xdbcSubcodeTimestamp, - .xdbcSubcodeTimeWithTimezone, - .xdbcSubcodeTimestampWithTimezone, - .xdbcSubcodeSecond, - .xdbcSubcodeYearToMonth, - .xdbcSubcodeDayToHour, - .xdbcSubcodeDayToMinute, - .xdbcSubcodeDayToSecond, - .xdbcSubcodeHourToMinute, - .xdbcSubcodeHourToSecond, - .xdbcSubcodeMinuteToSecond, - .xdbcSubcodeIntervalYear, - .xdbcSubcodeIntervalMonth, - .xdbcSubcodeIntervalDay, - .xdbcSubcodeIntervalHour, - .xdbcSubcodeIntervalMinute, - .xdbcSubcodeIntervalSecond, - .xdbcSubcodeIntervalYearToMonth, - .xdbcSubcodeIntervalDayToHour, - .xdbcSubcodeIntervalDayToMinute, - .xdbcSubcodeIntervalDayToSecond, - .xdbcSubcodeIntervalHourToMinute, - .xdbcSubcodeIntervalHourToSecond, - .xdbcSubcodeIntervalMinuteToSecond, - ] -} - -#endif // swift(>=4.2) + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode] = [ + .xdbcSubcodeUnknown, + .xdbcSubcodeYear, + .xdbcSubcodeTime, + .xdbcSubcodeTimestamp, + .xdbcSubcodeTimeWithTimezone, + .xdbcSubcodeTimestampWithTimezone, + .xdbcSubcodeSecond, + .xdbcSubcodeYearToMonth, + .xdbcSubcodeDayToHour, + .xdbcSubcodeDayToMinute, + .xdbcSubcodeDayToSecond, + .xdbcSubcodeHourToMinute, + .xdbcSubcodeHourToSecond, + .xdbcSubcodeMinuteToSecond, + .xdbcSubcodeIntervalYear, + .xdbcSubcodeIntervalMonth, + .xdbcSubcodeIntervalDay, + .xdbcSubcodeIntervalHour, + .xdbcSubcodeIntervalMinute, + .xdbcSubcodeIntervalSecond, + .xdbcSubcodeIntervalYearToMonth, + .xdbcSubcodeIntervalDayToHour, + .xdbcSubcodeIntervalDayToMinute, + .xdbcSubcodeIntervalDayToSecond, + .xdbcSubcodeIntervalHourToMinute, + .xdbcSubcodeIntervalHourToSecond, + .xdbcSubcodeIntervalMinuteToSecond, + ] +} + +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_Nullable: SwiftProtobuf.Enum { - typealias RawValue = Int - - ///* - /// Indicates that the fields does not allow the use of null values. - case nullabilityNoNulls // = 0 + typealias RawValue = Int - ///* - /// Indicates that the fields allow the use of null values. - case nullabilityNullable // = 1 + /// * + /// Indicates that the fields does not allow the use of null values. + case nullabilityNoNulls // = 0 - ///* - /// Indicates that nullability of the fields cannot be determined. - case nullabilityUnknown // = 2 - case UNRECOGNIZED(Int) + /// * + /// Indicates that the fields allow the use of null values. + case nullabilityNullable // = 1 - init() { - self = .nullabilityNoNulls - } + /// * + /// Indicates that nullability of the fields cannot be determined. + case nullabilityUnknown // = 2 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .nullabilityNoNulls - case 1: self = .nullabilityNullable - case 2: self = .nullabilityUnknown - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .nullabilityNoNulls } - } - var rawValue: Int { - switch self { - case .nullabilityNoNulls: return 0 - case .nullabilityNullable: return 1 - case .nullabilityUnknown: return 2 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .nullabilityNoNulls + case 1: self = .nullabilityNullable + case 2: self = .nullabilityUnknown + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .nullabilityNoNulls: return 0 + case .nullabilityNullable: return 1 + case .nullabilityUnknown: return 2 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_Nullable: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_Nullable] = [ - .nullabilityNoNulls, - .nullabilityNullable, - .nullabilityUnknown, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_Nullable] = [ + .nullabilityNoNulls, + .nullabilityNullable, + .nullabilityUnknown, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_Searchable: SwiftProtobuf.Enum { - typealias RawValue = Int - - ///* - /// Indicates that column cannot be used in a WHERE clause. - case none // = 0 - - ///* - /// Indicates that the column can be used in a WHERE clause if it is using a - /// LIKE operator. - case char // = 1 - - ///* - /// Indicates that the column can be used In a WHERE clause with any - /// operator other than LIKE. - /// - /// - Allowed operators: comparison, quantified comparison, BETWEEN, - /// DISTINCT, IN, MATCH, and UNIQUE. - case basic // = 2 - - ///* - /// Indicates that the column can be used in a WHERE clause using any operator. - case full // = 3 - case UNRECOGNIZED(Int) + typealias RawValue = Int - init() { - self = .none - } + /// * + /// Indicates that column cannot be used in a WHERE clause. + case none // = 0 + + /// * + /// Indicates that the column can be used in a WHERE clause if it is using a + /// LIKE operator. + case char // = 1 + + /// * + /// Indicates that the column can be used In a WHERE clause with any + /// operator other than LIKE. + /// + /// - Allowed operators: comparison, quantified comparison, BETWEEN, + /// DISTINCT, IN, MATCH, and UNIQUE. + case basic // = 2 + + /// * + /// Indicates that the column can be used in a WHERE clause using any operator. + case full // = 3 + case UNRECOGNIZED(Int) - init?(rawValue: Int) { - switch rawValue { - case 0: self = .none - case 1: self = .char - case 2: self = .basic - case 3: self = .full - default: self = .UNRECOGNIZED(rawValue) + init() { + self = .none } - } - var rawValue: Int { - switch self { - case .none: return 0 - case .char: return 1 - case .basic: return 2 - case .full: return 3 - case .UNRECOGNIZED(let i): return i + init?(rawValue: Int) { + switch rawValue { + case 0: self = .none + case 1: self = .char + case 2: self = .basic + case 3: self = .full + default: self = .UNRECOGNIZED(rawValue) + } } - } + var rawValue: Int { + switch self { + case .none: return 0 + case .char: return 1 + case .basic: return 2 + case .full: return 3 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_Searchable: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_Searchable] = [ - .none, - .char, - .basic, - .full, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_Searchable] = [ + .none, + .char, + .basic, + .full, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) enum Arrow_Flight_Protocol_Sql_UpdateDeleteRules: SwiftProtobuf.Enum { - typealias RawValue = Int - case cascade // = 0 - case restrict // = 1 - case setNull // = 2 - case noAction // = 3 - case setDefault // = 4 - case UNRECOGNIZED(Int) - - init() { - self = .cascade - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .cascade - case 1: self = .restrict - case 2: self = .setNull - case 3: self = .noAction - case 4: self = .setDefault - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .cascade: return 0 - case .restrict: return 1 - case .setNull: return 2 - case .noAction: return 3 - case .setDefault: return 4 - case .UNRECOGNIZED(let i): return i - } - } + typealias RawValue = Int + case cascade // = 0 + case restrict // = 1 + case setNull // = 2 + case noAction // = 3 + case setDefault // = 4 + case UNRECOGNIZED(Int) + + init() { + self = .cascade + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .cascade + case 1: self = .restrict + case 2: self = .setNull + case 3: self = .noAction + case 4: self = .setDefault + default: self = .UNRECOGNIZED(rawValue) + } + } + var rawValue: Int { + switch self { + case .cascade: return 0 + case .restrict: return 1 + case .setNull: return 2 + case .noAction: return 3 + case .setDefault: return 4 + case let .UNRECOGNIZED(i): return i + } + } } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_UpdateDeleteRules: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_UpdateDeleteRules] = [ - .cascade, - .restrict, - .setNull, - .noAction, - .setDefault, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_UpdateDeleteRules] = [ + .cascade, + .restrict, + .setNull, + .noAction, + .setDefault, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) /// /// Represents a metadata request. Used in the command member of FlightDescriptor @@ -2281,33 +2259,33 @@ extension Arrow_Flight_Protocol_Sql_UpdateDeleteRules: CaseIterable { /// > /// where there is one row per requested piece of metadata information. struct Arrow_Flight_Protocol_Sql_CommandGetSqlInfo { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Values are modelled after ODBC's SQLGetInfo() function. This information is intended to provide - /// Flight SQL clients with basic, SQL syntax and SQL functions related information. - /// More information types can be added in future releases. - /// E.g. more SQL syntax support types, scalar functions support, type conversion support etc. - /// - /// Note that the set of metadata may expand. - /// - /// Initially, Flight SQL will support the following information types: - /// - Server Information - Range [0-500) - /// - Syntax Information - Range [500-1000) - /// Range [0-10,000) is reserved for defaults (see SqlInfo enum for default options). - /// Custom options should start at 10,000. - /// - /// If omitted, then all metadata will be retrieved. - /// Flight SQL Servers may choose to include additional metadata above and beyond the specified set, however they must - /// at least return the specified set. IDs ranging from 0 to 10,000 (exclusive) are reserved for future use. - /// If additional metadata is included, the metadata IDs should start from 10,000. - var info: [UInt32] = [] - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Values are modelled after ODBC's SQLGetInfo() function. This information is intended to provide + /// Flight SQL clients with basic, SQL syntax and SQL functions related information. + /// More information types can be added in future releases. + /// E.g. more SQL syntax support types, scalar functions support, type conversion support etc. + /// + /// Note that the set of metadata may expand. + /// + /// Initially, Flight SQL will support the following information types: + /// - Server Information - Range [0-500) + /// - Syntax Information - Range [500-1000) + /// Range [0-10,000) is reserved for defaults (see SqlInfo enum for default options). + /// Custom options should start at 10,000. + /// + /// If omitted, then all metadata will be retrieved. + /// Flight SQL Servers may choose to include additional metadata above and beyond the specified set, however they must + /// at least return the specified set. IDs ranging from 0 to 10,000 (exclusive) are reserved for future use. + /// If additional metadata is included, the metadata IDs should start from 10,000. + var info: [UInt32] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} } /// @@ -2368,26 +2346,27 @@ struct Arrow_Flight_Protocol_Sql_CommandGetSqlInfo { /// > /// The returned data should be ordered by data_type and then by type_name. struct Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the data type to search for the info. + var dataType: Int32 { + get { return _dataType ?? 0 } + set { _dataType = newValue } + } - /// - /// Specifies the data type to search for the info. - var dataType: Int32 { - get {return _dataType ?? 0} - set {_dataType = newValue} - } - /// Returns true if `dataType` has been explicitly set. - var hasDataType: Bool {return self._dataType != nil} - /// Clears the value of `dataType`. Subsequent reads from it will return its default value. - mutating func clearDataType() {self._dataType = nil} + /// Returns true if `dataType` has been explicitly set. + var hasDataType: Bool { return _dataType != nil } + /// Clears the value of `dataType`. Subsequent reads from it will return its default value. + mutating func clearDataType() { _dataType = nil } - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} - fileprivate var _dataType: Int32? = nil + fileprivate var _dataType: Int32? } /// @@ -2403,13 +2382,13 @@ struct Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo { /// > /// The returned data should be ordered by catalog_name. struct Arrow_Flight_Protocol_Sql_CommandGetCatalogs { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -2426,44 +2405,46 @@ struct Arrow_Flight_Protocol_Sql_CommandGetCatalogs { /// > /// The returned data should be ordered by catalog_name, then db_schema_name. struct Arrow_Flight_Protocol_Sql_CommandGetDbSchemas { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Specifies the Catalog to search for the tables. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var catalog: String { - get {return _catalog ?? String()} - set {_catalog = newValue} - } - /// Returns true if `catalog` has been explicitly set. - var hasCatalog: Bool {return self._catalog != nil} - /// Clears the value of `catalog`. Subsequent reads from it will return its default value. - mutating func clearCatalog() {self._catalog = nil} - - /// - /// Specifies a filter pattern for schemas to search for. - /// When no db_schema_filter_pattern is provided, the pattern will not be used to narrow the search. - /// In the pattern string, two special characters can be used to denote matching rules: - /// - "%" means to match any substring with 0 or more characters. - /// - "_" means to match any one character. - var dbSchemaFilterPattern: String { - get {return _dbSchemaFilterPattern ?? String()} - set {_dbSchemaFilterPattern = newValue} - } - /// Returns true if `dbSchemaFilterPattern` has been explicitly set. - var hasDbSchemaFilterPattern: Bool {return self._dbSchemaFilterPattern != nil} - /// Clears the value of `dbSchemaFilterPattern`. Subsequent reads from it will return its default value. - mutating func clearDbSchemaFilterPattern() {self._dbSchemaFilterPattern = nil} - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _catalog: String? = nil - fileprivate var _dbSchemaFilterPattern: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the Catalog to search for the tables. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var catalog: String { + get { return _catalog ?? String() } + set { _catalog = newValue } + } + + /// Returns true if `catalog` has been explicitly set. + var hasCatalog: Bool { return _catalog != nil } + /// Clears the value of `catalog`. Subsequent reads from it will return its default value. + mutating func clearCatalog() { _catalog = nil } + + /// + /// Specifies a filter pattern for schemas to search for. + /// When no db_schema_filter_pattern is provided, the pattern will not be used to narrow the search. + /// In the pattern string, two special characters can be used to denote matching rules: + /// - "%" means to match any substring with 0 or more characters. + /// - "_" means to match any one character. + var dbSchemaFilterPattern: String { + get { return _dbSchemaFilterPattern ?? String() } + set { _dbSchemaFilterPattern = newValue } + } + + /// Returns true if `dbSchemaFilterPattern` has been explicitly set. + var hasDbSchemaFilterPattern: Bool { return _dbSchemaFilterPattern != nil } + /// Clears the value of `dbSchemaFilterPattern`. Subsequent reads from it will return its default value. + mutating func clearDbSchemaFilterPattern() { _dbSchemaFilterPattern = nil } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _catalog: String? + fileprivate var _dbSchemaFilterPattern: String? } /// @@ -2494,69 +2475,72 @@ struct Arrow_Flight_Protocol_Sql_CommandGetDbSchemas { /// - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. /// The returned data should be ordered by catalog_name, db_schema_name, table_name, then table_type, followed by table_schema if requested. struct Arrow_Flight_Protocol_Sql_CommandGetTables { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Specifies the Catalog to search for the tables. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var catalog: String { - get {return _catalog ?? String()} - set {_catalog = newValue} - } - /// Returns true if `catalog` has been explicitly set. - var hasCatalog: Bool {return self._catalog != nil} - /// Clears the value of `catalog`. Subsequent reads from it will return its default value. - mutating func clearCatalog() {self._catalog = nil} - - /// - /// Specifies a filter pattern for schemas to search for. - /// When no db_schema_filter_pattern is provided, all schemas matching other filters are searched. - /// In the pattern string, two special characters can be used to denote matching rules: - /// - "%" means to match any substring with 0 or more characters. - /// - "_" means to match any one character. - var dbSchemaFilterPattern: String { - get {return _dbSchemaFilterPattern ?? String()} - set {_dbSchemaFilterPattern = newValue} - } - /// Returns true if `dbSchemaFilterPattern` has been explicitly set. - var hasDbSchemaFilterPattern: Bool {return self._dbSchemaFilterPattern != nil} - /// Clears the value of `dbSchemaFilterPattern`. Subsequent reads from it will return its default value. - mutating func clearDbSchemaFilterPattern() {self._dbSchemaFilterPattern = nil} - - /// - /// Specifies a filter pattern for tables to search for. - /// When no table_name_filter_pattern is provided, all tables matching other filters are searched. - /// In the pattern string, two special characters can be used to denote matching rules: - /// - "%" means to match any substring with 0 or more characters. - /// - "_" means to match any one character. - var tableNameFilterPattern: String { - get {return _tableNameFilterPattern ?? String()} - set {_tableNameFilterPattern = newValue} - } - /// Returns true if `tableNameFilterPattern` has been explicitly set. - var hasTableNameFilterPattern: Bool {return self._tableNameFilterPattern != nil} - /// Clears the value of `tableNameFilterPattern`. Subsequent reads from it will return its default value. - mutating func clearTableNameFilterPattern() {self._tableNameFilterPattern = nil} - - /// - /// Specifies a filter of table types which must match. - /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. - /// TABLE, VIEW, and SYSTEM TABLE are commonly supported. - var tableTypes: [String] = [] - - /// Specifies if the Arrow schema should be returned for found tables. - var includeSchema: Bool = false - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _catalog: String? = nil - fileprivate var _dbSchemaFilterPattern: String? = nil - fileprivate var _tableNameFilterPattern: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the Catalog to search for the tables. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var catalog: String { + get { return _catalog ?? String() } + set { _catalog = newValue } + } + + /// Returns true if `catalog` has been explicitly set. + var hasCatalog: Bool { return _catalog != nil } + /// Clears the value of `catalog`. Subsequent reads from it will return its default value. + mutating func clearCatalog() { _catalog = nil } + + /// + /// Specifies a filter pattern for schemas to search for. + /// When no db_schema_filter_pattern is provided, all schemas matching other filters are searched. + /// In the pattern string, two special characters can be used to denote matching rules: + /// - "%" means to match any substring with 0 or more characters. + /// - "_" means to match any one character. + var dbSchemaFilterPattern: String { + get { return _dbSchemaFilterPattern ?? String() } + set { _dbSchemaFilterPattern = newValue } + } + + /// Returns true if `dbSchemaFilterPattern` has been explicitly set. + var hasDbSchemaFilterPattern: Bool { return _dbSchemaFilterPattern != nil } + /// Clears the value of `dbSchemaFilterPattern`. Subsequent reads from it will return its default value. + mutating func clearDbSchemaFilterPattern() { _dbSchemaFilterPattern = nil } + + /// + /// Specifies a filter pattern for tables to search for. + /// When no table_name_filter_pattern is provided, all tables matching other filters are searched. + /// In the pattern string, two special characters can be used to denote matching rules: + /// - "%" means to match any substring with 0 or more characters. + /// - "_" means to match any one character. + var tableNameFilterPattern: String { + get { return _tableNameFilterPattern ?? String() } + set { _tableNameFilterPattern = newValue } + } + + /// Returns true if `tableNameFilterPattern` has been explicitly set. + var hasTableNameFilterPattern: Bool { return _tableNameFilterPattern != nil } + /// Clears the value of `tableNameFilterPattern`. Subsequent reads from it will return its default value. + mutating func clearTableNameFilterPattern() { _tableNameFilterPattern = nil } + + /// + /// Specifies a filter of table types which must match. + /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. + /// TABLE, VIEW, and SYSTEM TABLE are commonly supported. + var tableTypes: [String] = [] + + /// Specifies if the Arrow schema should be returned for found tables. + var includeSchema: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _catalog: String? + fileprivate var _dbSchemaFilterPattern: String? + fileprivate var _tableNameFilterPattern: String? } /// @@ -2573,13 +2557,13 @@ struct Arrow_Flight_Protocol_Sql_CommandGetTables { /// > /// The returned data should be ordered by table_type. struct Arrow_Flight_Protocol_Sql_CommandGetTableTypes { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -2599,45 +2583,47 @@ struct Arrow_Flight_Protocol_Sql_CommandGetTableTypes { /// > /// The returned data should be ordered by catalog_name, db_schema_name, table_name, key_name, then key_sequence. struct Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Specifies the catalog to search for the table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var catalog: String { - get {return _catalog ?? String()} - set {_catalog = newValue} - } - /// Returns true if `catalog` has been explicitly set. - var hasCatalog: Bool {return self._catalog != nil} - /// Clears the value of `catalog`. Subsequent reads from it will return its default value. - mutating func clearCatalog() {self._catalog = nil} - - /// - /// Specifies the schema to search for the table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. - var dbSchema: String { - get {return _dbSchema ?? String()} - set {_dbSchema = newValue} - } - /// Returns true if `dbSchema` has been explicitly set. - var hasDbSchema: Bool {return self._dbSchema != nil} - /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. - mutating func clearDbSchema() {self._dbSchema = nil} - - /// Specifies the table to get the primary keys for. - var table: String = String() - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _catalog: String? = nil - fileprivate var _dbSchema: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the catalog to search for the table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var catalog: String { + get { return _catalog ?? String() } + set { _catalog = newValue } + } + + /// Returns true if `catalog` has been explicitly set. + var hasCatalog: Bool { return _catalog != nil } + /// Clears the value of `catalog`. Subsequent reads from it will return its default value. + mutating func clearCatalog() { _catalog = nil } + + /// + /// Specifies the schema to search for the table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. + var dbSchema: String { + get { return _dbSchema ?? String() } + set { _dbSchema = newValue } + } + + /// Returns true if `dbSchema` has been explicitly set. + var hasDbSchema: Bool { return _dbSchema != nil } + /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. + mutating func clearDbSchema() { _dbSchema = nil } + + /// Specifies the table to get the primary keys for. + var table: String = .init() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _catalog: String? + fileprivate var _dbSchema: String? } /// @@ -2666,45 +2652,47 @@ struct Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys { /// The returned data should be ordered by fk_catalog_name, fk_db_schema_name, fk_table_name, fk_key_name, then key_sequence. /// update_rule and delete_rule returns a byte that is equivalent to actions declared on UpdateDeleteRules enum. struct Arrow_Flight_Protocol_Sql_CommandGetExportedKeys { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Specifies the catalog to search for the foreign key table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var catalog: String { - get {return _catalog ?? String()} - set {_catalog = newValue} - } - /// Returns true if `catalog` has been explicitly set. - var hasCatalog: Bool {return self._catalog != nil} - /// Clears the value of `catalog`. Subsequent reads from it will return its default value. - mutating func clearCatalog() {self._catalog = nil} - - /// - /// Specifies the schema to search for the foreign key table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. - var dbSchema: String { - get {return _dbSchema ?? String()} - set {_dbSchema = newValue} - } - /// Returns true if `dbSchema` has been explicitly set. - var hasDbSchema: Bool {return self._dbSchema != nil} - /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. - mutating func clearDbSchema() {self._dbSchema = nil} - - /// Specifies the foreign key table to get the foreign keys for. - var table: String = String() - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _catalog: String? = nil - fileprivate var _dbSchema: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the catalog to search for the foreign key table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var catalog: String { + get { return _catalog ?? String() } + set { _catalog = newValue } + } + + /// Returns true if `catalog` has been explicitly set. + var hasCatalog: Bool { return _catalog != nil } + /// Clears the value of `catalog`. Subsequent reads from it will return its default value. + mutating func clearCatalog() { _catalog = nil } + + /// + /// Specifies the schema to search for the foreign key table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. + var dbSchema: String { + get { return _dbSchema ?? String() } + set { _dbSchema = newValue } + } + + /// Returns true if `dbSchema` has been explicitly set. + var hasDbSchema: Bool { return _dbSchema != nil } + /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. + mutating func clearDbSchema() { _dbSchema = nil } + + /// Specifies the foreign key table to get the foreign keys for. + var table: String = .init() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _catalog: String? + fileprivate var _dbSchema: String? } /// @@ -2737,45 +2725,47 @@ struct Arrow_Flight_Protocol_Sql_CommandGetExportedKeys { /// - 3 = NO ACTION /// - 4 = SET DEFAULT struct Arrow_Flight_Protocol_Sql_CommandGetImportedKeys { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// - /// Specifies the catalog to search for the primary key table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var catalog: String { - get {return _catalog ?? String()} - set {_catalog = newValue} - } - /// Returns true if `catalog` has been explicitly set. - var hasCatalog: Bool {return self._catalog != nil} - /// Clears the value of `catalog`. Subsequent reads from it will return its default value. - mutating func clearCatalog() {self._catalog = nil} - - /// - /// Specifies the schema to search for the primary key table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. - var dbSchema: String { - get {return _dbSchema ?? String()} - set {_dbSchema = newValue} - } - /// Returns true if `dbSchema` has been explicitly set. - var hasDbSchema: Bool {return self._dbSchema != nil} - /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. - mutating func clearDbSchema() {self._dbSchema = nil} - - /// Specifies the primary key table to get the foreign keys for. - var table: String = String() - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _catalog: String? = nil - fileprivate var _dbSchema: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// + /// Specifies the catalog to search for the primary key table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var catalog: String { + get { return _catalog ?? String() } + set { _catalog = newValue } + } + + /// Returns true if `catalog` has been explicitly set. + var hasCatalog: Bool { return _catalog != nil } + /// Clears the value of `catalog`. Subsequent reads from it will return its default value. + mutating func clearCatalog() { _catalog = nil } + + /// + /// Specifies the schema to search for the primary key table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. + var dbSchema: String { + get { return _dbSchema ?? String() } + set { _dbSchema = newValue } + } + + /// Returns true if `dbSchema` has been explicitly set. + var hasDbSchema: Bool { return _dbSchema != nil } + /// Clears the value of `dbSchema`. Subsequent reads from it will return its default value. + mutating func clearDbSchema() { _dbSchema = nil } + + /// Specifies the primary key table to get the foreign keys for. + var table: String = .init() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _catalog: String? + fileprivate var _dbSchema: String? } /// @@ -2810,165 +2800,172 @@ struct Arrow_Flight_Protocol_Sql_CommandGetImportedKeys { /// - 3 = NO ACTION /// - 4 = SET DEFAULT struct Arrow_Flight_Protocol_Sql_CommandGetCrossReference { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - ///* - /// The catalog name where the parent table is. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var pkCatalog: String { - get {return _pkCatalog ?? String()} - set {_pkCatalog = newValue} - } - /// Returns true if `pkCatalog` has been explicitly set. - var hasPkCatalog: Bool {return self._pkCatalog != nil} - /// Clears the value of `pkCatalog`. Subsequent reads from it will return its default value. - mutating func clearPkCatalog() {self._pkCatalog = nil} - - ///* - /// The Schema name where the parent table is. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. - var pkDbSchema: String { - get {return _pkDbSchema ?? String()} - set {_pkDbSchema = newValue} - } - /// Returns true if `pkDbSchema` has been explicitly set. - var hasPkDbSchema: Bool {return self._pkDbSchema != nil} - /// Clears the value of `pkDbSchema`. Subsequent reads from it will return its default value. - mutating func clearPkDbSchema() {self._pkDbSchema = nil} - - ///* - /// The parent table name. It cannot be null. - var pkTable: String = String() - - ///* - /// The catalog name where the foreign table is. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. - var fkCatalog: String { - get {return _fkCatalog ?? String()} - set {_fkCatalog = newValue} - } - /// Returns true if `fkCatalog` has been explicitly set. - var hasFkCatalog: Bool {return self._fkCatalog != nil} - /// Clears the value of `fkCatalog`. Subsequent reads from it will return its default value. - mutating func clearFkCatalog() {self._fkCatalog = nil} - - ///* - /// The schema name where the foreign table is. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. - var fkDbSchema: String { - get {return _fkDbSchema ?? String()} - set {_fkDbSchema = newValue} - } - /// Returns true if `fkDbSchema` has been explicitly set. - var hasFkDbSchema: Bool {return self._fkDbSchema != nil} - /// Clears the value of `fkDbSchema`. Subsequent reads from it will return its default value. - mutating func clearFkDbSchema() {self._fkDbSchema = nil} - - ///* - /// The foreign table name. It cannot be null. - var fkTable: String = String() - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _pkCatalog: String? = nil - fileprivate var _pkDbSchema: String? = nil - fileprivate var _fkCatalog: String? = nil - fileprivate var _fkDbSchema: String? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// * + /// The catalog name where the parent table is. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var pkCatalog: String { + get { return _pkCatalog ?? String() } + set { _pkCatalog = newValue } + } + + /// Returns true if `pkCatalog` has been explicitly set. + var hasPkCatalog: Bool { return _pkCatalog != nil } + /// Clears the value of `pkCatalog`. Subsequent reads from it will return its default value. + mutating func clearPkCatalog() { _pkCatalog = nil } + + /// * + /// The Schema name where the parent table is. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. + var pkDbSchema: String { + get { return _pkDbSchema ?? String() } + set { _pkDbSchema = newValue } + } + + /// Returns true if `pkDbSchema` has been explicitly set. + var hasPkDbSchema: Bool { return _pkDbSchema != nil } + /// Clears the value of `pkDbSchema`. Subsequent reads from it will return its default value. + mutating func clearPkDbSchema() { _pkDbSchema = nil } + + /// * + /// The parent table name. It cannot be null. + var pkTable: String = .init() + + /// * + /// The catalog name where the foreign table is. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. + var fkCatalog: String { + get { return _fkCatalog ?? String() } + set { _fkCatalog = newValue } + } + + /// Returns true if `fkCatalog` has been explicitly set. + var hasFkCatalog: Bool { return _fkCatalog != nil } + /// Clears the value of `fkCatalog`. Subsequent reads from it will return its default value. + mutating func clearFkCatalog() { _fkCatalog = nil } + + /// * + /// The schema name where the foreign table is. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. + var fkDbSchema: String { + get { return _fkDbSchema ?? String() } + set { _fkDbSchema = newValue } + } + + /// Returns true if `fkDbSchema` has been explicitly set. + var hasFkDbSchema: Bool { return _fkDbSchema != nil } + /// Clears the value of `fkDbSchema`. Subsequent reads from it will return its default value. + mutating func clearFkDbSchema() { _fkDbSchema = nil } + + /// * + /// The foreign table name. It cannot be null. + var fkTable: String = .init() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _pkCatalog: String? + fileprivate var _pkDbSchema: String? + fileprivate var _fkCatalog: String? + fileprivate var _fkDbSchema: String? } /// /// Request message for the "CreatePreparedStatement" action on a Flight SQL enabled backend. struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// The valid SQL string to create a prepared statement for. - var query: String = String() + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The valid SQL string to create a prepared statement for. + var query: String = .init() + + /// Create/execute the prepared statement as part of this transaction (if + /// unset, executions of the prepared statement will be auto-committed). + var transactionID: Data { + get { return _transactionID ?? Data() } + set { _transactionID = newValue } + } - /// Create/execute the prepared statement as part of this transaction (if - /// unset, executions of the prepared statement will be auto-committed). - var transactionID: Data { - get {return _transactionID ?? Data()} - set {_transactionID = newValue} - } - /// Returns true if `transactionID` has been explicitly set. - var hasTransactionID: Bool {return self._transactionID != nil} - /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. - mutating func clearTransactionID() {self._transactionID = nil} + /// Returns true if `transactionID` has been explicitly set. + var hasTransactionID: Bool { return _transactionID != nil } + /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. + mutating func clearTransactionID() { _transactionID = nil } - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} - fileprivate var _transactionID: Data? = nil + fileprivate var _transactionID: Data? } /// /// An embedded message describing a Substrait plan to execute. struct Arrow_Flight_Protocol_Sql_SubstraitPlan { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The serialized substrait.Plan to create a prepared statement for. - /// XXX(ARROW-16902): this is bytes instead of an embedded message - /// because Protobuf does not really support one DLL using Protobuf - /// definitions from another DLL. - var plan: Data = Data() + /// The serialized substrait.Plan to create a prepared statement for. + /// XXX(ARROW-16902): this is bytes instead of an embedded message + /// because Protobuf does not really support one DLL using Protobuf + /// definitions from another DLL. + var plan: Data = .init() - /// The Substrait release, e.g. "0.12.0". This information is not - /// tracked in the plan itself, so this is the only way for consumers - /// to potentially know if they can handle the plan. - var version: String = String() + /// The Substrait release, e.g. "0.12.0". This information is not + /// tracked in the plan itself, so this is the only way for consumers + /// to potentially know if they can handle the plan. + var version: String = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Request message for the "CreatePreparedSubstraitPlan" action on a Flight SQL enabled backend. struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// The serialized substrait.Plan to create a prepared statement for. - var plan: Arrow_Flight_Protocol_Sql_SubstraitPlan { - get {return _plan ?? Arrow_Flight_Protocol_Sql_SubstraitPlan()} - set {_plan = newValue} - } - /// Returns true if `plan` has been explicitly set. - var hasPlan: Bool {return self._plan != nil} - /// Clears the value of `plan`. Subsequent reads from it will return its default value. - mutating func clearPlan() {self._plan = nil} - - /// Create/execute the prepared statement as part of this transaction (if - /// unset, executions of the prepared statement will be auto-committed). - var transactionID: Data { - get {return _transactionID ?? Data()} - set {_transactionID = newValue} - } - /// Returns true if `transactionID` has been explicitly set. - var hasTransactionID: Bool {return self._transactionID != nil} - /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. - mutating func clearTransactionID() {self._transactionID = nil} - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? = nil - fileprivate var _transactionID: Data? = nil + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The serialized substrait.Plan to create a prepared statement for. + var plan: Arrow_Flight_Protocol_Sql_SubstraitPlan { + get { return _plan ?? Arrow_Flight_Protocol_Sql_SubstraitPlan() } + set { _plan = newValue } + } + + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool { return _plan != nil } + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() { _plan = nil } + + /// Create/execute the prepared statement as part of this transaction (if + /// unset, executions of the prepared statement will be auto-committed). + var transactionID: Data { + get { return _transactionID ?? Data() } + set { _transactionID = newValue } + } + + /// Returns true if `transactionID` has been explicitly set. + var hasTransactionID: Bool { return _transactionID != nil } + /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. + mutating func clearTransactionID() { _transactionID = nil } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? + fileprivate var _transactionID: Data? } /// @@ -2980,53 +2977,53 @@ struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest { /// /// The result should be wrapped in a google.protobuf.Any message. struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the prepared statement on the server. - var preparedStatementHandle: Data = Data() + /// Opaque handle for the prepared statement on the server. + var preparedStatementHandle: Data = .init() - /// If a result set generating query was provided, dataset_schema contains the - /// schema of the dataset as described in Schema.fbs::Schema, it is serialized as an IPC message. - var datasetSchema: Data = Data() + /// If a result set generating query was provided, dataset_schema contains the + /// schema of the dataset as described in Schema.fbs::Schema, it is serialized as an IPC message. + var datasetSchema: Data = .init() - /// If the query provided contained parameters, parameter_schema contains the - /// schema of the expected parameters as described in Schema.fbs::Schema, it is serialized as an IPC message. - var parameterSchema: Data = Data() + /// If the query provided contained parameters, parameter_schema contains the + /// schema of the expected parameters as described in Schema.fbs::Schema, it is serialized as an IPC message. + var parameterSchema: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Request message for the "ClosePreparedStatement" action on a Flight SQL enabled backend. /// Closes server resources associated with the prepared statement handle. struct Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the prepared statement on the server. - var preparedStatementHandle: Data = Data() + /// Opaque handle for the prepared statement on the server. + var preparedStatementHandle: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Request message for the "BeginTransaction" action. /// Begins a transaction. struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3036,19 +3033,19 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest { /// Only supported if FLIGHT_SQL_TRANSACTION is /// FLIGHT_SQL_TRANSACTION_SUPPORT_SAVEPOINT. struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The transaction to which a savepoint belongs. - var transactionID: Data = Data() + /// The transaction to which a savepoint belongs. + var transactionID: Data = .init() - /// Name for the savepoint. - var name: String = String() + /// Name for the savepoint. + var name: String = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3060,16 +3057,16 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest { /// /// The result should be wrapped in a google.protobuf.Any message. struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the transaction on the server. - var transactionID: Data = Data() + /// Opaque handle for the transaction on the server. + var transactionID: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3081,16 +3078,16 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult { /// /// The result should be wrapped in a google.protobuf.Any message. struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the savepoint on the server. - var savepointID: Data = Data() + /// Opaque handle for the savepoint on the server. + var savepointID: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3101,68 +3098,67 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult { /// If the action completes successfully, the transaction handle is /// invalidated, as are all associated savepoints. struct Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// Opaque handle for the transaction on the server. - var transactionID: Data = Data() - - /// Whether to commit/rollback the given transaction. - var action: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction = .unspecified - - var unknownFields = SwiftProtobuf.UnknownStorage() - - enum EndTransaction: SwiftProtobuf.Enum { - typealias RawValue = Int - case unspecified // = 0 - - /// Commit the transaction. - case commit // = 1 - - /// Roll back the transaction. - case rollback // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .unspecified + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Opaque handle for the transaction on the server. + var transactionID: Data = .init() + + /// Whether to commit/rollback the given transaction. + var action: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum EndTransaction: SwiftProtobuf.Enum { + typealias RawValue = Int + case unspecified // = 0 + + /// Commit the transaction. + case commit // = 1 + + /// Roll back the transaction. + case rollback // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .commit + case 2: self = .rollback + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .commit: return 1 + case .rollback: return 2 + case let .UNRECOGNIZED(i): return i + } + } } - init?(rawValue: Int) { - switch rawValue { - case 0: self = .unspecified - case 1: self = .commit - case 2: self = .rollback - default: self = .UNRECOGNIZED(rawValue) - } - } - - var rawValue: Int { - switch self { - case .unspecified: return 0 - case .commit: return 1 - case .rollback: return 2 - case .UNRECOGNIZED(let i): return i - } - } - - } - - init() {} + init() {} } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction] = [ - .unspecified, - .commit, - .rollback, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction] = [ + .unspecified, + .commit, + .rollback, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) /// /// Request message for the "EndSavepoint" action. @@ -3174,68 +3170,67 @@ extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction: /// a savepoint does not invalidate the savepoint, but invalidates all /// savepoints created after the current savepoint. struct Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// Opaque handle for the savepoint on the server. - var savepointID: Data = Data() - - /// Whether to rollback/release the given savepoint. - var action: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint = .unspecified - - var unknownFields = SwiftProtobuf.UnknownStorage() - - enum EndSavepoint: SwiftProtobuf.Enum { - typealias RawValue = Int - case unspecified // = 0 - - /// Release the savepoint. - case release // = 1 - - /// Roll back to a savepoint. - case rollback // = 2 - case UNRECOGNIZED(Int) - - init() { - self = .unspecified - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .unspecified - case 1: self = .release - case 2: self = .rollback - default: self = .UNRECOGNIZED(rawValue) - } + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Opaque handle for the savepoint on the server. + var savepointID: Data = .init() + + /// Whether to rollback/release the given savepoint. + var action: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum EndSavepoint: SwiftProtobuf.Enum { + typealias RawValue = Int + case unspecified // = 0 + + /// Release the savepoint. + case release // = 1 + + /// Roll back to a savepoint. + case rollback // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .release + case 2: self = .rollback + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .release: return 1 + case .rollback: return 2 + case let .UNRECOGNIZED(i): return i + } + } } - var rawValue: Int { - switch self { - case .unspecified: return 0 - case .release: return 1 - case .rollback: return 2 - case .UNRECOGNIZED(let i): return i - } - } - - } - - init() {} + init() {} } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint] = [ - .unspecified, - .release, - .rollback, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint] = [ + .unspecified, + .release, + .rollback, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) /// /// Represents a SQL query. Used in the command member of FlightDescriptor @@ -3254,28 +3249,29 @@ extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint: Case /// - ARROW:FLIGHT:SQL:IS_SEARCHABLE - "1" indicates if the column is searchable via WHERE clause, "0" otherwise. /// - GetFlightInfo: execute the query. struct Arrow_Flight_Protocol_Sql_CommandStatementQuery { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The SQL syntax. + var query: String = .init() - /// The SQL syntax. - var query: String = String() + /// Include the query as part of this transaction (if unset, the query is auto-committed). + var transactionID: Data { + get { return _transactionID ?? Data() } + set { _transactionID = newValue } + } - /// Include the query as part of this transaction (if unset, the query is auto-committed). - var transactionID: Data { - get {return _transactionID ?? Data()} - set {_transactionID = newValue} - } - /// Returns true if `transactionID` has been explicitly set. - var hasTransactionID: Bool {return self._transactionID != nil} - /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. - mutating func clearTransactionID() {self._transactionID = nil} + /// Returns true if `transactionID` has been explicitly set. + var hasTransactionID: Bool { return _transactionID != nil } + /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. + mutating func clearTransactionID() { _transactionID = nil } - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} - fileprivate var _transactionID: Data? = nil + fileprivate var _transactionID: Data? } /// @@ -3296,52 +3292,54 @@ struct Arrow_Flight_Protocol_Sql_CommandStatementQuery { /// - GetFlightInfo: execute the query. /// - DoPut: execute the query. struct Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - /// A serialized substrait.Plan - var plan: Arrow_Flight_Protocol_Sql_SubstraitPlan { - get {return _plan ?? Arrow_Flight_Protocol_Sql_SubstraitPlan()} - set {_plan = newValue} - } - /// Returns true if `plan` has been explicitly set. - var hasPlan: Bool {return self._plan != nil} - /// Clears the value of `plan`. Subsequent reads from it will return its default value. - mutating func clearPlan() {self._plan = nil} - - /// Include the query as part of this transaction (if unset, the query is auto-committed). - var transactionID: Data { - get {return _transactionID ?? Data()} - set {_transactionID = newValue} - } - /// Returns true if `transactionID` has been explicitly set. - var hasTransactionID: Bool {return self._transactionID != nil} - /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. - mutating func clearTransactionID() {self._transactionID = nil} - - var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} - - fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? = nil - fileprivate var _transactionID: Data? = nil -} - -///* + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// A serialized substrait.Plan + var plan: Arrow_Flight_Protocol_Sql_SubstraitPlan { + get { return _plan ?? Arrow_Flight_Protocol_Sql_SubstraitPlan() } + set { _plan = newValue } + } + + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool { return _plan != nil } + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() { _plan = nil } + + /// Include the query as part of this transaction (if unset, the query is auto-committed). + var transactionID: Data { + get { return _transactionID ?? Data() } + set { _transactionID = newValue } + } + + /// Returns true if `transactionID` has been explicitly set. + var hasTransactionID: Bool { return _transactionID != nil } + /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. + mutating func clearTransactionID() { _transactionID = nil } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? + fileprivate var _transactionID: Data? +} + +/// * /// Represents a ticket resulting from GetFlightInfo with a CommandStatementQuery. /// This should be used only once and treated as an opaque value, that is, clients should not attempt to parse this. struct Arrow_Flight_Protocol_Sql_TicketStatementQuery { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Unique identifier for the instance of the statement to execute. - var statementHandle: Data = Data() + /// Unique identifier for the instance of the statement to execute. + var statementHandle: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3362,44 +3360,45 @@ struct Arrow_Flight_Protocol_Sql_TicketStatementQuery { /// - DoPut: bind parameter values. All of the bound parameter sets will be executed as a single atomic execution. /// - GetFlightInfo: execute the prepared statement instance. struct Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the prepared statement on the server. - var preparedStatementHandle: Data = Data() + /// Opaque handle for the prepared statement on the server. + var preparedStatementHandle: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// /// Represents a SQL update query. Used in the command member of FlightDescriptor /// for the RPC call DoPut to cause the server to execute the included SQL update. struct Arrow_Flight_Protocol_Sql_CommandStatementUpdate { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The SQL syntax. - var query: String = String() + /// The SQL syntax. + var query: String = .init() - /// Include the query as part of this transaction (if unset, the query is auto-committed). - var transactionID: Data { - get {return _transactionID ?? Data()} - set {_transactionID = newValue} - } - /// Returns true if `transactionID` has been explicitly set. - var hasTransactionID: Bool {return self._transactionID != nil} - /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. - mutating func clearTransactionID() {self._transactionID = nil} + /// Include the query as part of this transaction (if unset, the query is auto-committed). + var transactionID: Data { + get { return _transactionID ?? Data() } + set { _transactionID = newValue } + } + + /// Returns true if `transactionID` has been explicitly set. + var hasTransactionID: Bool { return _transactionID != nil } + /// Clears the value of `transactionID`. Subsequent reads from it will return its default value. + mutating func clearTransactionID() { _transactionID = nil } - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} - fileprivate var _transactionID: Data? = nil + fileprivate var _transactionID: Data? } /// @@ -3407,16 +3406,16 @@ struct Arrow_Flight_Protocol_Sql_CommandStatementUpdate { /// for the RPC call DoPut to cause the server to execute the included /// prepared statement handle as an update. struct Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// Opaque handle for the prepared statement on the server. - var preparedStatementHandle: Data = Data() + /// Opaque handle for the prepared statement on the server. + var preparedStatementHandle: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3424,17 +3423,17 @@ struct Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate { /// CommandPreparedStatementUpdate was in the request, containing /// results from the update. struct Arrow_Flight_Protocol_Sql_DoPutUpdateResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The number of records updated. A return value of -1 represents - /// an unknown updated record count. - var recordCount: Int64 = 0 + /// The number of records updated. A return value of -1 represents + /// an unknown updated record count. + var recordCount: Int64 = 0 - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3454,19 +3453,19 @@ struct Arrow_Flight_Protocol_Sql_DoPutUpdateResult { /// This command is deprecated since 13.0.0. Use the "CancelFlightInfo" /// action with DoAction instead. struct Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. - /// The result of the GetFlightInfo RPC that initiated the query. - /// XXX(ARROW-16902): this must be a serialized FlightInfo, but is - /// rendered as bytes because Protobuf does not really support one - /// DLL using Protobuf definitions from another DLL. - var info: Data = Data() + /// The result of the GetFlightInfo RPC that initiated the query. + /// XXX(ARROW-16902): this must be a serialized FlightInfo, but is + /// rendered as bytes because Protobuf does not really support one + /// DLL using Protobuf definitions from another DLL. + var info: Data = .init() - var unknownFields = SwiftProtobuf.UnknownStorage() + var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} + init() {} } /// @@ -3477,77 +3476,76 @@ struct Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest { /// This command is deprecated since 13.0.0. Use the "CancelFlightInfo" /// action with DoAction instead. struct Arrow_Flight_Protocol_Sql_ActionCancelQueryResult { - // SwiftProtobuf.Message conformance is added in an extension below. See the - // `Message` and `Message+*Additions` files in the SwiftProtobuf library for - // methods supported on all messages. - - var result: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult = .unspecified - - var unknownFields = SwiftProtobuf.UnknownStorage() - - enum CancelResult: SwiftProtobuf.Enum { - typealias RawValue = Int - - /// The cancellation status is unknown. Servers should avoid using - /// this value (send a NOT_FOUND error if the requested query is - /// not known). Clients can retry the request. - case unspecified // = 0 - - /// The cancellation request is complete. Subsequent requests with - /// the same payload may return CANCELLED or a NOT_FOUND error. - case cancelled // = 1 - - /// The cancellation request is in progress. The client may retry - /// the cancellation request. - case cancelling // = 2 - - /// The query is not cancellable. The client should not retry the - /// cancellation request. - case notCancellable // = 3 - case UNRECOGNIZED(Int) - - init() { - self = .unspecified - } - - init?(rawValue: Int) { - switch rawValue { - case 0: self = .unspecified - case 1: self = .cancelled - case 2: self = .cancelling - case 3: self = .notCancellable - default: self = .UNRECOGNIZED(rawValue) - } + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var result: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum CancelResult: SwiftProtobuf.Enum { + typealias RawValue = Int + + /// The cancellation status is unknown. Servers should avoid using + /// this value (send a NOT_FOUND error if the requested query is + /// not known). Clients can retry the request. + case unspecified // = 0 + + /// The cancellation request is complete. Subsequent requests with + /// the same payload may return CANCELLED or a NOT_FOUND error. + case cancelled // = 1 + + /// The cancellation request is in progress. The client may retry + /// the cancellation request. + case cancelling // = 2 + + /// The query is not cancellable. The client should not retry the + /// cancellation request. + case notCancellable // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .cancelled + case 2: self = .cancelling + case 3: self = .notCancellable + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .cancelled: return 1 + case .cancelling: return 2 + case .notCancellable: return 3 + case let .UNRECOGNIZED(i): return i + } + } } - var rawValue: Int { - switch self { - case .unspecified: return 0 - case .cancelled: return 1 - case .cancelling: return 2 - case .notCancellable: return 3 - case .UNRECOGNIZED(let i): return i - } - } - - } - - init() {} + init() {} } #if swift(>=4.2) extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: CaseIterable { - // The compiler won't synthesize support with the UNRECOGNIZED case. - static var allCases: [Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult] = [ - .unspecified, - .cancelled, - .cancelling, - .notCancellable, - ] + // The compiler won't synthesize support with the UNRECOGNIZED case. + static var allCases: [Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult] = [ + .unspecified, + .cancelled, + .cancelling, + .notCancellable, + ] } -#endif // swift(>=4.2) +#endif // swift(>=4.2) #if swift(>=5.5) && canImport(_Concurrency) extension Arrow_Flight_Protocol_Sql_SqlInfo: @unchecked Sendable {} @@ -3605,7 +3603,7 @@ extension Arrow_Flight_Protocol_Sql_DoPutUpdateResult: @unchecked Sendable {} extension Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest: @unchecked Sendable {} extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult: @unchecked Sendable {} extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: @unchecked Sendable {} -#endif // swift(>=5.5) && canImport(_Concurrency) +#endif // swift(>=5.5) && canImport(_Concurrency) // MARK: - Extension support defined in FlightSql.proto. @@ -3617,22 +3615,22 @@ extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: @unche // the scope where the extend directive occurs. extension SwiftProtobuf.Google_Protobuf_MessageOptions { + var Arrow_Flight_Protocol_Sql_experimental: Bool { + get { return getExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) ?? false } + set { setExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental, value: newValue) } + } - var Arrow_Flight_Protocol_Sql_experimental: Bool { - get {return getExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) ?? false} - set {setExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental, value: newValue)} - } - /// Returns true if extension `Arrow_Flight_Protocol_Sql_Extensions_experimental` - /// has been explicitly set. - var hasArrow_Flight_Protocol_Sql_experimental: Bool { - return hasExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) - } - /// Clears the value of extension `Arrow_Flight_Protocol_Sql_Extensions_experimental`. - /// Subsequent reads from it will return its default value. - mutating func clearArrow_Flight_Protocol_Sql_experimental() { - clearExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) - } + /// Returns true if extension `Arrow_Flight_Protocol_Sql_Extensions_experimental` + /// has been explicitly set. + var hasArrow_Flight_Protocol_Sql_experimental: Bool { + return hasExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) + } + /// Clears the value of extension `Arrow_Flight_Protocol_Sql_Extensions_experimental`. + /// Subsequent reads from it will return its default value. + mutating func clearArrow_Flight_Protocol_Sql_experimental() { + clearExtensionValue(ext: Arrow_Flight_Protocol_Sql_Extensions_experimental) + } } // MARK: - File's ExtensionMap: Arrow_Flight_Protocol_Sql_FlightSql_Extensions @@ -3642,7 +3640,7 @@ extension SwiftProtobuf.Google_Protobuf_MessageOptions { /// in parsing, or it can be combined with other `SwiftProtobuf.SimpleExtensionMap`s to create /// a larger `SwiftProtobuf.SimpleExtensionMap`. let Arrow_Flight_Protocol_Sql_FlightSql_Extensions: SwiftProtobuf.SimpleExtensionMap = [ - Arrow_Flight_Protocol_Sql_Extensions_experimental + Arrow_Flight_Protocol_Sql_Extensions_experimental, ] // Extension Objects - The only reason these might be needed is when manually @@ -3650,1496 +3648,1493 @@ let Arrow_Flight_Protocol_Sql_FlightSql_Extensions: SwiftProtobuf.SimpleExtensio // accessors for the extension fields on the messages directly. let Arrow_Flight_Protocol_Sql_Extensions_experimental = SwiftProtobuf.MessageExtension, SwiftProtobuf.Google_Protobuf_MessageOptions>( - _protobuf_fieldNumber: 1000, - fieldName: "arrow.flight.protocol.sql.experimental" + _protobuf_fieldNumber: 1000, + fieldName: "arrow.flight.protocol.sql.experimental" ) // MARK: - Code below here is support for the SwiftProtobuf runtime. -fileprivate let _protobuf_package = "arrow.flight.protocol.sql" +private let _protobuf_package = "arrow.flight.protocol.sql" extension Arrow_Flight_Protocol_Sql_SqlInfo: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "FLIGHT_SQL_SERVER_NAME"), - 1: .same(proto: "FLIGHT_SQL_SERVER_VERSION"), - 2: .same(proto: "FLIGHT_SQL_SERVER_ARROW_VERSION"), - 3: .same(proto: "FLIGHT_SQL_SERVER_READ_ONLY"), - 4: .same(proto: "FLIGHT_SQL_SERVER_SQL"), - 5: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT"), - 6: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION"), - 7: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION"), - 8: .same(proto: "FLIGHT_SQL_SERVER_TRANSACTION"), - 9: .same(proto: "FLIGHT_SQL_SERVER_CANCEL"), - 100: .same(proto: "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT"), - 101: .same(proto: "FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT"), - 500: .same(proto: "SQL_DDL_CATALOG"), - 501: .same(proto: "SQL_DDL_SCHEMA"), - 502: .same(proto: "SQL_DDL_TABLE"), - 503: .same(proto: "SQL_IDENTIFIER_CASE"), - 504: .same(proto: "SQL_IDENTIFIER_QUOTE_CHAR"), - 505: .same(proto: "SQL_QUOTED_IDENTIFIER_CASE"), - 506: .same(proto: "SQL_ALL_TABLES_ARE_SELECTABLE"), - 507: .same(proto: "SQL_NULL_ORDERING"), - 508: .same(proto: "SQL_KEYWORDS"), - 509: .same(proto: "SQL_NUMERIC_FUNCTIONS"), - 510: .same(proto: "SQL_STRING_FUNCTIONS"), - 511: .same(proto: "SQL_SYSTEM_FUNCTIONS"), - 512: .same(proto: "SQL_DATETIME_FUNCTIONS"), - 513: .same(proto: "SQL_SEARCH_STRING_ESCAPE"), - 514: .same(proto: "SQL_EXTRA_NAME_CHARACTERS"), - 515: .same(proto: "SQL_SUPPORTS_COLUMN_ALIASING"), - 516: .same(proto: "SQL_NULL_PLUS_NULL_IS_NULL"), - 517: .same(proto: "SQL_SUPPORTS_CONVERT"), - 518: .same(proto: "SQL_SUPPORTS_TABLE_CORRELATION_NAMES"), - 519: .same(proto: "SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES"), - 520: .same(proto: "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY"), - 521: .same(proto: "SQL_SUPPORTS_ORDER_BY_UNRELATED"), - 522: .same(proto: "SQL_SUPPORTED_GROUP_BY"), - 523: .same(proto: "SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE"), - 524: .same(proto: "SQL_SUPPORTS_NON_NULLABLE_COLUMNS"), - 525: .same(proto: "SQL_SUPPORTED_GRAMMAR"), - 526: .same(proto: "SQL_ANSI92_SUPPORTED_LEVEL"), - 527: .same(proto: "SQL_SUPPORTS_INTEGRITY_ENHANCEMENT_FACILITY"), - 528: .same(proto: "SQL_OUTER_JOINS_SUPPORT_LEVEL"), - 529: .same(proto: "SQL_SCHEMA_TERM"), - 530: .same(proto: "SQL_PROCEDURE_TERM"), - 531: .same(proto: "SQL_CATALOG_TERM"), - 532: .same(proto: "SQL_CATALOG_AT_START"), - 533: .same(proto: "SQL_SCHEMAS_SUPPORTED_ACTIONS"), - 534: .same(proto: "SQL_CATALOGS_SUPPORTED_ACTIONS"), - 535: .same(proto: "SQL_SUPPORTED_POSITIONED_COMMANDS"), - 536: .same(proto: "SQL_SELECT_FOR_UPDATE_SUPPORTED"), - 537: .same(proto: "SQL_STORED_PROCEDURES_SUPPORTED"), - 538: .same(proto: "SQL_SUPPORTED_SUBQUERIES"), - 539: .same(proto: "SQL_CORRELATED_SUBQUERIES_SUPPORTED"), - 540: .same(proto: "SQL_SUPPORTED_UNIONS"), - 541: .same(proto: "SQL_MAX_BINARY_LITERAL_LENGTH"), - 542: .same(proto: "SQL_MAX_CHAR_LITERAL_LENGTH"), - 543: .same(proto: "SQL_MAX_COLUMN_NAME_LENGTH"), - 544: .same(proto: "SQL_MAX_COLUMNS_IN_GROUP_BY"), - 545: .same(proto: "SQL_MAX_COLUMNS_IN_INDEX"), - 546: .same(proto: "SQL_MAX_COLUMNS_IN_ORDER_BY"), - 547: .same(proto: "SQL_MAX_COLUMNS_IN_SELECT"), - 548: .same(proto: "SQL_MAX_COLUMNS_IN_TABLE"), - 549: .same(proto: "SQL_MAX_CONNECTIONS"), - 550: .same(proto: "SQL_MAX_CURSOR_NAME_LENGTH"), - 551: .same(proto: "SQL_MAX_INDEX_LENGTH"), - 552: .same(proto: "SQL_DB_SCHEMA_NAME_LENGTH"), - 553: .same(proto: "SQL_MAX_PROCEDURE_NAME_LENGTH"), - 554: .same(proto: "SQL_MAX_CATALOG_NAME_LENGTH"), - 555: .same(proto: "SQL_MAX_ROW_SIZE"), - 556: .same(proto: "SQL_MAX_ROW_SIZE_INCLUDES_BLOBS"), - 557: .same(proto: "SQL_MAX_STATEMENT_LENGTH"), - 558: .same(proto: "SQL_MAX_STATEMENTS"), - 559: .same(proto: "SQL_MAX_TABLE_NAME_LENGTH"), - 560: .same(proto: "SQL_MAX_TABLES_IN_SELECT"), - 561: .same(proto: "SQL_MAX_USERNAME_LENGTH"), - 562: .same(proto: "SQL_DEFAULT_TRANSACTION_ISOLATION"), - 563: .same(proto: "SQL_TRANSACTIONS_SUPPORTED"), - 564: .same(proto: "SQL_SUPPORTED_TRANSACTIONS_ISOLATION_LEVELS"), - 565: .same(proto: "SQL_DATA_DEFINITION_CAUSES_TRANSACTION_COMMIT"), - 566: .same(proto: "SQL_DATA_DEFINITIONS_IN_TRANSACTIONS_IGNORED"), - 567: .same(proto: "SQL_SUPPORTED_RESULT_SET_TYPES"), - 568: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_UNSPECIFIED"), - 569: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_FORWARD_ONLY"), - 570: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_SENSITIVE"), - 571: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_INSENSITIVE"), - 572: .same(proto: "SQL_BATCH_UPDATES_SUPPORTED"), - 573: .same(proto: "SQL_SAVEPOINTS_SUPPORTED"), - 574: .same(proto: "SQL_NAMED_PARAMETERS_SUPPORTED"), - 575: .same(proto: "SQL_LOCATORS_UPDATE_COPY"), - 576: .same(proto: "SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "FLIGHT_SQL_SERVER_NAME"), + 1: .same(proto: "FLIGHT_SQL_SERVER_VERSION"), + 2: .same(proto: "FLIGHT_SQL_SERVER_ARROW_VERSION"), + 3: .same(proto: "FLIGHT_SQL_SERVER_READ_ONLY"), + 4: .same(proto: "FLIGHT_SQL_SERVER_SQL"), + 5: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT"), + 6: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT_MIN_VERSION"), + 7: .same(proto: "FLIGHT_SQL_SERVER_SUBSTRAIT_MAX_VERSION"), + 8: .same(proto: "FLIGHT_SQL_SERVER_TRANSACTION"), + 9: .same(proto: "FLIGHT_SQL_SERVER_CANCEL"), + 100: .same(proto: "FLIGHT_SQL_SERVER_STATEMENT_TIMEOUT"), + 101: .same(proto: "FLIGHT_SQL_SERVER_TRANSACTION_TIMEOUT"), + 500: .same(proto: "SQL_DDL_CATALOG"), + 501: .same(proto: "SQL_DDL_SCHEMA"), + 502: .same(proto: "SQL_DDL_TABLE"), + 503: .same(proto: "SQL_IDENTIFIER_CASE"), + 504: .same(proto: "SQL_IDENTIFIER_QUOTE_CHAR"), + 505: .same(proto: "SQL_QUOTED_IDENTIFIER_CASE"), + 506: .same(proto: "SQL_ALL_TABLES_ARE_SELECTABLE"), + 507: .same(proto: "SQL_NULL_ORDERING"), + 508: .same(proto: "SQL_KEYWORDS"), + 509: .same(proto: "SQL_NUMERIC_FUNCTIONS"), + 510: .same(proto: "SQL_STRING_FUNCTIONS"), + 511: .same(proto: "SQL_SYSTEM_FUNCTIONS"), + 512: .same(proto: "SQL_DATETIME_FUNCTIONS"), + 513: .same(proto: "SQL_SEARCH_STRING_ESCAPE"), + 514: .same(proto: "SQL_EXTRA_NAME_CHARACTERS"), + 515: .same(proto: "SQL_SUPPORTS_COLUMN_ALIASING"), + 516: .same(proto: "SQL_NULL_PLUS_NULL_IS_NULL"), + 517: .same(proto: "SQL_SUPPORTS_CONVERT"), + 518: .same(proto: "SQL_SUPPORTS_TABLE_CORRELATION_NAMES"), + 519: .same(proto: "SQL_SUPPORTS_DIFFERENT_TABLE_CORRELATION_NAMES"), + 520: .same(proto: "SQL_SUPPORTS_EXPRESSIONS_IN_ORDER_BY"), + 521: .same(proto: "SQL_SUPPORTS_ORDER_BY_UNRELATED"), + 522: .same(proto: "SQL_SUPPORTED_GROUP_BY"), + 523: .same(proto: "SQL_SUPPORTS_LIKE_ESCAPE_CLAUSE"), + 524: .same(proto: "SQL_SUPPORTS_NON_NULLABLE_COLUMNS"), + 525: .same(proto: "SQL_SUPPORTED_GRAMMAR"), + 526: .same(proto: "SQL_ANSI92_SUPPORTED_LEVEL"), + 527: .same(proto: "SQL_SUPPORTS_INTEGRITY_ENHANCEMENT_FACILITY"), + 528: .same(proto: "SQL_OUTER_JOINS_SUPPORT_LEVEL"), + 529: .same(proto: "SQL_SCHEMA_TERM"), + 530: .same(proto: "SQL_PROCEDURE_TERM"), + 531: .same(proto: "SQL_CATALOG_TERM"), + 532: .same(proto: "SQL_CATALOG_AT_START"), + 533: .same(proto: "SQL_SCHEMAS_SUPPORTED_ACTIONS"), + 534: .same(proto: "SQL_CATALOGS_SUPPORTED_ACTIONS"), + 535: .same(proto: "SQL_SUPPORTED_POSITIONED_COMMANDS"), + 536: .same(proto: "SQL_SELECT_FOR_UPDATE_SUPPORTED"), + 537: .same(proto: "SQL_STORED_PROCEDURES_SUPPORTED"), + 538: .same(proto: "SQL_SUPPORTED_SUBQUERIES"), + 539: .same(proto: "SQL_CORRELATED_SUBQUERIES_SUPPORTED"), + 540: .same(proto: "SQL_SUPPORTED_UNIONS"), + 541: .same(proto: "SQL_MAX_BINARY_LITERAL_LENGTH"), + 542: .same(proto: "SQL_MAX_CHAR_LITERAL_LENGTH"), + 543: .same(proto: "SQL_MAX_COLUMN_NAME_LENGTH"), + 544: .same(proto: "SQL_MAX_COLUMNS_IN_GROUP_BY"), + 545: .same(proto: "SQL_MAX_COLUMNS_IN_INDEX"), + 546: .same(proto: "SQL_MAX_COLUMNS_IN_ORDER_BY"), + 547: .same(proto: "SQL_MAX_COLUMNS_IN_SELECT"), + 548: .same(proto: "SQL_MAX_COLUMNS_IN_TABLE"), + 549: .same(proto: "SQL_MAX_CONNECTIONS"), + 550: .same(proto: "SQL_MAX_CURSOR_NAME_LENGTH"), + 551: .same(proto: "SQL_MAX_INDEX_LENGTH"), + 552: .same(proto: "SQL_DB_SCHEMA_NAME_LENGTH"), + 553: .same(proto: "SQL_MAX_PROCEDURE_NAME_LENGTH"), + 554: .same(proto: "SQL_MAX_CATALOG_NAME_LENGTH"), + 555: .same(proto: "SQL_MAX_ROW_SIZE"), + 556: .same(proto: "SQL_MAX_ROW_SIZE_INCLUDES_BLOBS"), + 557: .same(proto: "SQL_MAX_STATEMENT_LENGTH"), + 558: .same(proto: "SQL_MAX_STATEMENTS"), + 559: .same(proto: "SQL_MAX_TABLE_NAME_LENGTH"), + 560: .same(proto: "SQL_MAX_TABLES_IN_SELECT"), + 561: .same(proto: "SQL_MAX_USERNAME_LENGTH"), + 562: .same(proto: "SQL_DEFAULT_TRANSACTION_ISOLATION"), + 563: .same(proto: "SQL_TRANSACTIONS_SUPPORTED"), + 564: .same(proto: "SQL_SUPPORTED_TRANSACTIONS_ISOLATION_LEVELS"), + 565: .same(proto: "SQL_DATA_DEFINITION_CAUSES_TRANSACTION_COMMIT"), + 566: .same(proto: "SQL_DATA_DEFINITIONS_IN_TRANSACTIONS_IGNORED"), + 567: .same(proto: "SQL_SUPPORTED_RESULT_SET_TYPES"), + 568: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_UNSPECIFIED"), + 569: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_FORWARD_ONLY"), + 570: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_SENSITIVE"), + 571: .same(proto: "SQL_SUPPORTED_CONCURRENCIES_FOR_RESULT_SET_SCROLL_INSENSITIVE"), + 572: .same(proto: "SQL_BATCH_UPDATES_SUPPORTED"), + 573: .same(proto: "SQL_SAVEPOINTS_SUPPORTED"), + 574: .same(proto: "SQL_NAMED_PARAMETERS_SUPPORTED"), + 575: .same(proto: "SQL_LOCATORS_UPDATE_COPY"), + 576: .same(proto: "SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedTransaction: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_SUPPORTED_TRANSACTION_NONE"), - 1: .same(proto: "SQL_SUPPORTED_TRANSACTION_TRANSACTION"), - 2: .same(proto: "SQL_SUPPORTED_TRANSACTION_SAVEPOINT"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_SUPPORTED_TRANSACTION_NONE"), + 1: .same(proto: "SQL_SUPPORTED_TRANSACTION_TRANSACTION"), + 2: .same(proto: "SQL_SUPPORTED_TRANSACTION_SAVEPOINT"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_CASE_SENSITIVITY_UNKNOWN"), - 1: .same(proto: "SQL_CASE_SENSITIVITY_CASE_INSENSITIVE"), - 2: .same(proto: "SQL_CASE_SENSITIVITY_UPPERCASE"), - 3: .same(proto: "SQL_CASE_SENSITIVITY_LOWERCASE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_CASE_SENSITIVITY_UNKNOWN"), + 1: .same(proto: "SQL_CASE_SENSITIVITY_CASE_INSENSITIVE"), + 2: .same(proto: "SQL_CASE_SENSITIVITY_UPPERCASE"), + 3: .same(proto: "SQL_CASE_SENSITIVITY_LOWERCASE"), + ] } extension Arrow_Flight_Protocol_Sql_SqlNullOrdering: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_NULLS_SORTED_HIGH"), - 1: .same(proto: "SQL_NULLS_SORTED_LOW"), - 2: .same(proto: "SQL_NULLS_SORTED_AT_START"), - 3: .same(proto: "SQL_NULLS_SORTED_AT_END"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_NULLS_SORTED_HIGH"), + 1: .same(proto: "SQL_NULLS_SORTED_LOW"), + 2: .same(proto: "SQL_NULLS_SORTED_AT_START"), + 3: .same(proto: "SQL_NULLS_SORTED_AT_END"), + ] } extension Arrow_Flight_Protocol_Sql_SupportedSqlGrammar: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_MINIMUM_GRAMMAR"), - 1: .same(proto: "SQL_CORE_GRAMMAR"), - 2: .same(proto: "SQL_EXTENDED_GRAMMAR"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_MINIMUM_GRAMMAR"), + 1: .same(proto: "SQL_CORE_GRAMMAR"), + 2: .same(proto: "SQL_EXTENDED_GRAMMAR"), + ] } extension Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "ANSI92_ENTRY_SQL"), - 1: .same(proto: "ANSI92_INTERMEDIATE_SQL"), - 2: .same(proto: "ANSI92_FULL_SQL"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "ANSI92_ENTRY_SQL"), + 1: .same(proto: "ANSI92_INTERMEDIATE_SQL"), + 2: .same(proto: "ANSI92_FULL_SQL"), + ] } extension Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_JOINS_UNSUPPORTED"), - 1: .same(proto: "SQL_LIMITED_OUTER_JOINS"), - 2: .same(proto: "SQL_FULL_OUTER_JOINS"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_JOINS_UNSUPPORTED"), + 1: .same(proto: "SQL_LIMITED_OUTER_JOINS"), + 2: .same(proto: "SQL_FULL_OUTER_JOINS"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_GROUP_BY_UNRELATED"), - 1: .same(proto: "SQL_GROUP_BY_BEYOND_SELECT"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_GROUP_BY_UNRELATED"), + 1: .same(proto: "SQL_GROUP_BY_BEYOND_SELECT"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedElementActions: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_ELEMENT_IN_PROCEDURE_CALLS"), - 1: .same(proto: "SQL_ELEMENT_IN_INDEX_DEFINITIONS"), - 2: .same(proto: "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_ELEMENT_IN_PROCEDURE_CALLS"), + 1: .same(proto: "SQL_ELEMENT_IN_INDEX_DEFINITIONS"), + 2: .same(proto: "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_POSITIONED_DELETE"), - 1: .same(proto: "SQL_POSITIONED_UPDATE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_POSITIONED_DELETE"), + 1: .same(proto: "SQL_POSITIONED_UPDATE"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_SUBQUERIES_IN_COMPARISONS"), - 1: .same(proto: "SQL_SUBQUERIES_IN_EXISTS"), - 2: .same(proto: "SQL_SUBQUERIES_IN_INS"), - 3: .same(proto: "SQL_SUBQUERIES_IN_QUANTIFIEDS"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_SUBQUERIES_IN_COMPARISONS"), + 1: .same(proto: "SQL_SUBQUERIES_IN_EXISTS"), + 2: .same(proto: "SQL_SUBQUERIES_IN_INS"), + 3: .same(proto: "SQL_SUBQUERIES_IN_QUANTIFIEDS"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedUnions: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_UNION"), - 1: .same(proto: "SQL_UNION_ALL"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_UNION"), + 1: .same(proto: "SQL_UNION_ALL"), + ] } extension Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_TRANSACTION_NONE"), - 1: .same(proto: "SQL_TRANSACTION_READ_UNCOMMITTED"), - 2: .same(proto: "SQL_TRANSACTION_READ_COMMITTED"), - 3: .same(proto: "SQL_TRANSACTION_REPEATABLE_READ"), - 4: .same(proto: "SQL_TRANSACTION_SERIALIZABLE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_TRANSACTION_NONE"), + 1: .same(proto: "SQL_TRANSACTION_READ_UNCOMMITTED"), + 2: .same(proto: "SQL_TRANSACTION_READ_COMMITTED"), + 3: .same(proto: "SQL_TRANSACTION_REPEATABLE_READ"), + 4: .same(proto: "SQL_TRANSACTION_SERIALIZABLE"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedTransactions: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_TRANSACTION_UNSPECIFIED"), - 1: .same(proto: "SQL_DATA_DEFINITION_TRANSACTIONS"), - 2: .same(proto: "SQL_DATA_MANIPULATION_TRANSACTIONS"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_TRANSACTION_UNSPECIFIED"), + 1: .same(proto: "SQL_DATA_DEFINITION_TRANSACTIONS"), + 2: .same(proto: "SQL_DATA_MANIPULATION_TRANSACTIONS"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_RESULT_SET_TYPE_UNSPECIFIED"), - 1: .same(proto: "SQL_RESULT_SET_TYPE_FORWARD_ONLY"), - 2: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE"), - 3: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_RESULT_SET_TYPE_UNSPECIFIED"), + 1: .same(proto: "SQL_RESULT_SET_TYPE_FORWARD_ONLY"), + 2: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE"), + 3: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED"), - 1: .same(proto: "SQL_RESULT_SET_CONCURRENCY_READ_ONLY"), - 2: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UPDATABLE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED"), + 1: .same(proto: "SQL_RESULT_SET_CONCURRENCY_READ_ONLY"), + 2: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UPDATABLE"), + ] } extension Arrow_Flight_Protocol_Sql_SqlSupportsConvert: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SQL_CONVERT_BIGINT"), - 1: .same(proto: "SQL_CONVERT_BINARY"), - 2: .same(proto: "SQL_CONVERT_BIT"), - 3: .same(proto: "SQL_CONVERT_CHAR"), - 4: .same(proto: "SQL_CONVERT_DATE"), - 5: .same(proto: "SQL_CONVERT_DECIMAL"), - 6: .same(proto: "SQL_CONVERT_FLOAT"), - 7: .same(proto: "SQL_CONVERT_INTEGER"), - 8: .same(proto: "SQL_CONVERT_INTERVAL_DAY_TIME"), - 9: .same(proto: "SQL_CONVERT_INTERVAL_YEAR_MONTH"), - 10: .same(proto: "SQL_CONVERT_LONGVARBINARY"), - 11: .same(proto: "SQL_CONVERT_LONGVARCHAR"), - 12: .same(proto: "SQL_CONVERT_NUMERIC"), - 13: .same(proto: "SQL_CONVERT_REAL"), - 14: .same(proto: "SQL_CONVERT_SMALLINT"), - 15: .same(proto: "SQL_CONVERT_TIME"), - 16: .same(proto: "SQL_CONVERT_TIMESTAMP"), - 17: .same(proto: "SQL_CONVERT_TINYINT"), - 18: .same(proto: "SQL_CONVERT_VARBINARY"), - 19: .same(proto: "SQL_CONVERT_VARCHAR"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL_CONVERT_BIGINT"), + 1: .same(proto: "SQL_CONVERT_BINARY"), + 2: .same(proto: "SQL_CONVERT_BIT"), + 3: .same(proto: "SQL_CONVERT_CHAR"), + 4: .same(proto: "SQL_CONVERT_DATE"), + 5: .same(proto: "SQL_CONVERT_DECIMAL"), + 6: .same(proto: "SQL_CONVERT_FLOAT"), + 7: .same(proto: "SQL_CONVERT_INTEGER"), + 8: .same(proto: "SQL_CONVERT_INTERVAL_DAY_TIME"), + 9: .same(proto: "SQL_CONVERT_INTERVAL_YEAR_MONTH"), + 10: .same(proto: "SQL_CONVERT_LONGVARBINARY"), + 11: .same(proto: "SQL_CONVERT_LONGVARCHAR"), + 12: .same(proto: "SQL_CONVERT_NUMERIC"), + 13: .same(proto: "SQL_CONVERT_REAL"), + 14: .same(proto: "SQL_CONVERT_SMALLINT"), + 15: .same(proto: "SQL_CONVERT_TIME"), + 16: .same(proto: "SQL_CONVERT_TIMESTAMP"), + 17: .same(proto: "SQL_CONVERT_TINYINT"), + 18: .same(proto: "SQL_CONVERT_VARBINARY"), + 19: .same(proto: "SQL_CONVERT_VARCHAR"), + ] } extension Arrow_Flight_Protocol_Sql_XdbcDataType: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - -9: .same(proto: "XDBC_WVARCHAR"), - -8: .same(proto: "XDBC_WCHAR"), - -7: .same(proto: "XDBC_BIT"), - -6: .same(proto: "XDBC_TINYINT"), - -5: .same(proto: "XDBC_BIGINT"), - -4: .same(proto: "XDBC_LONGVARBINARY"), - -3: .same(proto: "XDBC_VARBINARY"), - -2: .same(proto: "XDBC_BINARY"), - -1: .same(proto: "XDBC_LONGVARCHAR"), - 0: .same(proto: "XDBC_UNKNOWN_TYPE"), - 1: .same(proto: "XDBC_CHAR"), - 2: .same(proto: "XDBC_NUMERIC"), - 3: .same(proto: "XDBC_DECIMAL"), - 4: .same(proto: "XDBC_INTEGER"), - 5: .same(proto: "XDBC_SMALLINT"), - 6: .same(proto: "XDBC_FLOAT"), - 7: .same(proto: "XDBC_REAL"), - 8: .same(proto: "XDBC_DOUBLE"), - 9: .same(proto: "XDBC_DATETIME"), - 10: .same(proto: "XDBC_INTERVAL"), - 12: .same(proto: "XDBC_VARCHAR"), - 91: .same(proto: "XDBC_DATE"), - 92: .same(proto: "XDBC_TIME"), - 93: .same(proto: "XDBC_TIMESTAMP"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + -9: .same(proto: "XDBC_WVARCHAR"), + -8: .same(proto: "XDBC_WCHAR"), + -7: .same(proto: "XDBC_BIT"), + -6: .same(proto: "XDBC_TINYINT"), + -5: .same(proto: "XDBC_BIGINT"), + -4: .same(proto: "XDBC_LONGVARBINARY"), + -3: .same(proto: "XDBC_VARBINARY"), + -2: .same(proto: "XDBC_BINARY"), + -1: .same(proto: "XDBC_LONGVARCHAR"), + 0: .same(proto: "XDBC_UNKNOWN_TYPE"), + 1: .same(proto: "XDBC_CHAR"), + 2: .same(proto: "XDBC_NUMERIC"), + 3: .same(proto: "XDBC_DECIMAL"), + 4: .same(proto: "XDBC_INTEGER"), + 5: .same(proto: "XDBC_SMALLINT"), + 6: .same(proto: "XDBC_FLOAT"), + 7: .same(proto: "XDBC_REAL"), + 8: .same(proto: "XDBC_DOUBLE"), + 9: .same(proto: "XDBC_DATETIME"), + 10: .same(proto: "XDBC_INTERVAL"), + 12: .same(proto: "XDBC_VARCHAR"), + 91: .same(proto: "XDBC_DATE"), + 92: .same(proto: "XDBC_TIME"), + 93: .same(proto: "XDBC_TIMESTAMP"), + ] } extension Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "XDBC_SUBCODE_UNKNOWN"), - 1: .aliased(proto: "XDBC_SUBCODE_YEAR", aliases: ["XDBC_SUBCODE_DATE"]), - 2: .aliased(proto: "XDBC_SUBCODE_TIME", aliases: ["XDBC_SUBCODE_MONTH"]), - 3: .aliased(proto: "XDBC_SUBCODE_TIMESTAMP", aliases: ["XDBC_SUBCODE_DAY"]), - 4: .aliased(proto: "XDBC_SUBCODE_TIME_WITH_TIMEZONE", aliases: ["XDBC_SUBCODE_HOUR"]), - 5: .aliased(proto: "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE", aliases: ["XDBC_SUBCODE_MINUTE"]), - 6: .same(proto: "XDBC_SUBCODE_SECOND"), - 7: .same(proto: "XDBC_SUBCODE_YEAR_TO_MONTH"), - 8: .same(proto: "XDBC_SUBCODE_DAY_TO_HOUR"), - 9: .same(proto: "XDBC_SUBCODE_DAY_TO_MINUTE"), - 10: .same(proto: "XDBC_SUBCODE_DAY_TO_SECOND"), - 11: .same(proto: "XDBC_SUBCODE_HOUR_TO_MINUTE"), - 12: .same(proto: "XDBC_SUBCODE_HOUR_TO_SECOND"), - 13: .same(proto: "XDBC_SUBCODE_MINUTE_TO_SECOND"), - 101: .same(proto: "XDBC_SUBCODE_INTERVAL_YEAR"), - 102: .same(proto: "XDBC_SUBCODE_INTERVAL_MONTH"), - 103: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY"), - 104: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR"), - 105: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE"), - 106: .same(proto: "XDBC_SUBCODE_INTERVAL_SECOND"), - 107: .same(proto: "XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH"), - 108: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR"), - 109: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE"), - 110: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND"), - 111: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE"), - 112: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND"), - 113: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "XDBC_SUBCODE_UNKNOWN"), + 1: .aliased(proto: "XDBC_SUBCODE_YEAR", aliases: ["XDBC_SUBCODE_DATE"]), + 2: .aliased(proto: "XDBC_SUBCODE_TIME", aliases: ["XDBC_SUBCODE_MONTH"]), + 3: .aliased(proto: "XDBC_SUBCODE_TIMESTAMP", aliases: ["XDBC_SUBCODE_DAY"]), + 4: .aliased(proto: "XDBC_SUBCODE_TIME_WITH_TIMEZONE", aliases: ["XDBC_SUBCODE_HOUR"]), + 5: .aliased(proto: "XDBC_SUBCODE_TIMESTAMP_WITH_TIMEZONE", aliases: ["XDBC_SUBCODE_MINUTE"]), + 6: .same(proto: "XDBC_SUBCODE_SECOND"), + 7: .same(proto: "XDBC_SUBCODE_YEAR_TO_MONTH"), + 8: .same(proto: "XDBC_SUBCODE_DAY_TO_HOUR"), + 9: .same(proto: "XDBC_SUBCODE_DAY_TO_MINUTE"), + 10: .same(proto: "XDBC_SUBCODE_DAY_TO_SECOND"), + 11: .same(proto: "XDBC_SUBCODE_HOUR_TO_MINUTE"), + 12: .same(proto: "XDBC_SUBCODE_HOUR_TO_SECOND"), + 13: .same(proto: "XDBC_SUBCODE_MINUTE_TO_SECOND"), + 101: .same(proto: "XDBC_SUBCODE_INTERVAL_YEAR"), + 102: .same(proto: "XDBC_SUBCODE_INTERVAL_MONTH"), + 103: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY"), + 104: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR"), + 105: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE"), + 106: .same(proto: "XDBC_SUBCODE_INTERVAL_SECOND"), + 107: .same(proto: "XDBC_SUBCODE_INTERVAL_YEAR_TO_MONTH"), + 108: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_HOUR"), + 109: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_MINUTE"), + 110: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND"), + 111: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE"), + 112: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND"), + 113: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND"), + ] } extension Arrow_Flight_Protocol_Sql_Nullable: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "NULLABILITY_NO_NULLS"), - 1: .same(proto: "NULLABILITY_NULLABLE"), - 2: .same(proto: "NULLABILITY_UNKNOWN"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "NULLABILITY_NO_NULLS"), + 1: .same(proto: "NULLABILITY_NULLABLE"), + 2: .same(proto: "NULLABILITY_UNKNOWN"), + ] } extension Arrow_Flight_Protocol_Sql_Searchable: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "SEARCHABLE_NONE"), - 1: .same(proto: "SEARCHABLE_CHAR"), - 2: .same(proto: "SEARCHABLE_BASIC"), - 3: .same(proto: "SEARCHABLE_FULL"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SEARCHABLE_NONE"), + 1: .same(proto: "SEARCHABLE_CHAR"), + 2: .same(proto: "SEARCHABLE_BASIC"), + 3: .same(proto: "SEARCHABLE_FULL"), + ] } extension Arrow_Flight_Protocol_Sql_UpdateDeleteRules: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "CASCADE"), - 1: .same(proto: "RESTRICT"), - 2: .same(proto: "SET_NULL"), - 3: .same(proto: "NO_ACTION"), - 4: .same(proto: "SET_DEFAULT"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "CASCADE"), + 1: .same(proto: "RESTRICT"), + 2: .same(proto: "SET_NULL"), + 3: .same(proto: "NO_ACTION"), + 4: .same(proto: "SET_DEFAULT"), + ] } extension Arrow_Flight_Protocol_Sql_CommandGetSqlInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetSqlInfo" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeRepeatedUInt32Field(value: &self.info) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".CommandGetSqlInfo" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "info"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeRepeatedUInt32Field(value: &info) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.info.isEmpty { - try visitor.visitPackedUInt32Field(value: self.info, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !info.isEmpty { + try visitor.visitPackedUInt32Field(value: info, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetSqlInfo, rhs: Arrow_Flight_Protocol_Sql_CommandGetSqlInfo) -> Bool { - if lhs.info != rhs.info {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetSqlInfo, rhs: Arrow_Flight_Protocol_Sql_CommandGetSqlInfo) -> Bool { + if lhs.info != rhs.info { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetXdbcTypeInfo" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "data_type"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularInt32Field(value: &self._dataType) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._dataType { - try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo, rhs: Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo) -> Bool { - if lhs._dataType != rhs._dataType {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetXdbcTypeInfo" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "data_type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularInt32Field(value: &_dataType) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._dataType { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo, rhs: Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo) -> Bool { + if lhs._dataType != rhs._dataType { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetCatalogs: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetCatalogs" - static let _protobuf_nameMap = SwiftProtobuf._NameMap() + static let protoMessageName: String = _protobuf_package + ".CommandGetCatalogs" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() - mutating func decodeMessage(decoder: inout D) throws { - while let _ = try decoder.nextFieldNumber() { + mutating func decodeMessage(decoder: inout D) throws { + while let _ = try decoder.nextFieldNumber() {} } - } - func traverse(visitor: inout V) throws { - try unknownFields.traverse(visitor: &visitor) - } + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetCatalogs, rhs: Arrow_Flight_Protocol_Sql_CommandGetCatalogs) -> Bool { - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetCatalogs, rhs: Arrow_Flight_Protocol_Sql_CommandGetCatalogs) -> Bool { + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetDbSchemas: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetDbSchemas" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema_filter_pattern"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._catalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._dbSchemaFilterPattern) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._catalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._dbSchemaFilterPattern { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetDbSchemas, rhs: Arrow_Flight_Protocol_Sql_CommandGetDbSchemas) -> Bool { - if lhs._catalog != rhs._catalog {return false} - if lhs._dbSchemaFilterPattern != rhs._dbSchemaFilterPattern {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetDbSchemas" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "catalog"), + 2: .standard(proto: "db_schema_filter_pattern"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_catalog) + case 2: try decoder.decodeSingularStringField(value: &_dbSchemaFilterPattern) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._catalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._dbSchemaFilterPattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetDbSchemas, rhs: Arrow_Flight_Protocol_Sql_CommandGetDbSchemas) -> Bool { + if lhs._catalog != rhs._catalog { return false } + if lhs._dbSchemaFilterPattern != rhs._dbSchemaFilterPattern { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetTables: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetTables" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema_filter_pattern"), - 3: .standard(proto: "table_name_filter_pattern"), - 4: .standard(proto: "table_types"), - 5: .standard(proto: "include_schema"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._catalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._dbSchemaFilterPattern) }() - case 3: try { try decoder.decodeSingularStringField(value: &self._tableNameFilterPattern) }() - case 4: try { try decoder.decodeRepeatedStringField(value: &self.tableTypes) }() - case 5: try { try decoder.decodeSingularBoolField(value: &self.includeSchema) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._catalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._dbSchemaFilterPattern { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - try { if let v = self._tableNameFilterPattern { - try visitor.visitSingularStringField(value: v, fieldNumber: 3) - } }() - if !self.tableTypes.isEmpty { - try visitor.visitRepeatedStringField(value: self.tableTypes, fieldNumber: 4) - } - if self.includeSchema != false { - try visitor.visitSingularBoolField(value: self.includeSchema, fieldNumber: 5) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetTables, rhs: Arrow_Flight_Protocol_Sql_CommandGetTables) -> Bool { - if lhs._catalog != rhs._catalog {return false} - if lhs._dbSchemaFilterPattern != rhs._dbSchemaFilterPattern {return false} - if lhs._tableNameFilterPattern != rhs._tableNameFilterPattern {return false} - if lhs.tableTypes != rhs.tableTypes {return false} - if lhs.includeSchema != rhs.includeSchema {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetTables" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "catalog"), + 2: .standard(proto: "db_schema_filter_pattern"), + 3: .standard(proto: "table_name_filter_pattern"), + 4: .standard(proto: "table_types"), + 5: .standard(proto: "include_schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_catalog) + case 2: try decoder.decodeSingularStringField(value: &_dbSchemaFilterPattern) + case 3: try decoder.decodeSingularStringField(value: &_tableNameFilterPattern) + case 4: try decoder.decodeRepeatedStringField(value: &tableTypes) + case 5: try decoder.decodeSingularBoolField(value: &includeSchema) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._catalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._dbSchemaFilterPattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = self._tableNameFilterPattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if !tableTypes.isEmpty { + try visitor.visitRepeatedStringField(value: tableTypes, fieldNumber: 4) + } + if includeSchema != false { + try visitor.visitSingularBoolField(value: includeSchema, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetTables, rhs: Arrow_Flight_Protocol_Sql_CommandGetTables) -> Bool { + if lhs._catalog != rhs._catalog { return false } + if lhs._dbSchemaFilterPattern != rhs._dbSchemaFilterPattern { return false } + if lhs._tableNameFilterPattern != rhs._tableNameFilterPattern { return false } + if lhs.tableTypes != rhs.tableTypes { return false } + if lhs.includeSchema != rhs.includeSchema { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetTableTypes: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetTableTypes" - static let _protobuf_nameMap = SwiftProtobuf._NameMap() + static let protoMessageName: String = _protobuf_package + ".CommandGetTableTypes" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() - mutating func decodeMessage(decoder: inout D) throws { - while let _ = try decoder.nextFieldNumber() { + mutating func decodeMessage(decoder: inout D) throws { + while let _ = try decoder.nextFieldNumber() {} } - } - func traverse(visitor: inout V) throws { - try unknownFields.traverse(visitor: &visitor) - } + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetTableTypes, rhs: Arrow_Flight_Protocol_Sql_CommandGetTableTypes) -> Bool { - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetTableTypes, rhs: Arrow_Flight_Protocol_Sql_CommandGetTableTypes) -> Bool { + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetPrimaryKeys" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._catalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._dbSchema) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.table) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._catalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._dbSchema { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - if !self.table.isEmpty { - try visitor.visitSingularStringField(value: self.table, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys) -> Bool { - if lhs._catalog != rhs._catalog {return false} - if lhs._dbSchema != rhs._dbSchema {return false} - if lhs.table != rhs.table {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetPrimaryKeys" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "catalog"), + 2: .standard(proto: "db_schema"), + 3: .same(proto: "table"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_catalog) + case 2: try decoder.decodeSingularStringField(value: &_dbSchema) + case 3: try decoder.decodeSingularStringField(value: &table) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._catalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._dbSchema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !table.isEmpty { + try visitor.visitSingularStringField(value: table, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys) -> Bool { + if lhs._catalog != rhs._catalog { return false } + if lhs._dbSchema != rhs._dbSchema { return false } + if lhs.table != rhs.table { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetExportedKeys: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetExportedKeys" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._catalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._dbSchema) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.table) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._catalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._dbSchema { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - if !self.table.isEmpty { - try visitor.visitSingularStringField(value: self.table, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetExportedKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetExportedKeys) -> Bool { - if lhs._catalog != rhs._catalog {return false} - if lhs._dbSchema != rhs._dbSchema {return false} - if lhs.table != rhs.table {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetExportedKeys" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "catalog"), + 2: .standard(proto: "db_schema"), + 3: .same(proto: "table"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_catalog) + case 2: try decoder.decodeSingularStringField(value: &_dbSchema) + case 3: try decoder.decodeSingularStringField(value: &table) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._catalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._dbSchema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !table.isEmpty { + try visitor.visitSingularStringField(value: table, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetExportedKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetExportedKeys) -> Bool { + if lhs._catalog != rhs._catalog { return false } + if lhs._dbSchema != rhs._dbSchema { return false } + if lhs.table != rhs.table { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetImportedKeys: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetImportedKeys" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._catalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._dbSchema) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.table) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._catalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._dbSchema { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - if !self.table.isEmpty { - try visitor.visitSingularStringField(value: self.table, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetImportedKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetImportedKeys) -> Bool { - if lhs._catalog != rhs._catalog {return false} - if lhs._dbSchema != rhs._dbSchema {return false} - if lhs.table != rhs.table {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetImportedKeys" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "catalog"), + 2: .standard(proto: "db_schema"), + 3: .same(proto: "table"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_catalog) + case 2: try decoder.decodeSingularStringField(value: &_dbSchema) + case 3: try decoder.decodeSingularStringField(value: &table) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._catalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._dbSchema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !table.isEmpty { + try visitor.visitSingularStringField(value: table, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetImportedKeys, rhs: Arrow_Flight_Protocol_Sql_CommandGetImportedKeys) -> Bool { + if lhs._catalog != rhs._catalog { return false } + if lhs._dbSchema != rhs._dbSchema { return false } + if lhs.table != rhs.table { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandGetCrossReference: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandGetCrossReference" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "pk_catalog"), - 2: .standard(proto: "pk_db_schema"), - 3: .standard(proto: "pk_table"), - 4: .standard(proto: "fk_catalog"), - 5: .standard(proto: "fk_db_schema"), - 6: .standard(proto: "fk_table"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self._pkCatalog) }() - case 2: try { try decoder.decodeSingularStringField(value: &self._pkDbSchema) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.pkTable) }() - case 4: try { try decoder.decodeSingularStringField(value: &self._fkCatalog) }() - case 5: try { try decoder.decodeSingularStringField(value: &self._fkDbSchema) }() - case 6: try { try decoder.decodeSingularStringField(value: &self.fkTable) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._pkCatalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 1) - } }() - try { if let v = self._pkDbSchema { - try visitor.visitSingularStringField(value: v, fieldNumber: 2) - } }() - if !self.pkTable.isEmpty { - try visitor.visitSingularStringField(value: self.pkTable, fieldNumber: 3) - } - try { if let v = self._fkCatalog { - try visitor.visitSingularStringField(value: v, fieldNumber: 4) - } }() - try { if let v = self._fkDbSchema { - try visitor.visitSingularStringField(value: v, fieldNumber: 5) - } }() - if !self.fkTable.isEmpty { - try visitor.visitSingularStringField(value: self.fkTable, fieldNumber: 6) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandGetCrossReference, rhs: Arrow_Flight_Protocol_Sql_CommandGetCrossReference) -> Bool { - if lhs._pkCatalog != rhs._pkCatalog {return false} - if lhs._pkDbSchema != rhs._pkDbSchema {return false} - if lhs.pkTable != rhs.pkTable {return false} - if lhs._fkCatalog != rhs._fkCatalog {return false} - if lhs._fkDbSchema != rhs._fkDbSchema {return false} - if lhs.fkTable != rhs.fkTable {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandGetCrossReference" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "pk_catalog"), + 2: .standard(proto: "pk_db_schema"), + 3: .standard(proto: "pk_table"), + 4: .standard(proto: "fk_catalog"), + 5: .standard(proto: "fk_db_schema"), + 6: .standard(proto: "fk_table"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &_pkCatalog) + case 2: try decoder.decodeSingularStringField(value: &_pkDbSchema) + case 3: try decoder.decodeSingularStringField(value: &pkTable) + case 4: try decoder.decodeSingularStringField(value: &_fkCatalog) + case 5: try decoder.decodeSingularStringField(value: &_fkDbSchema) + case 6: try decoder.decodeSingularStringField(value: &fkTable) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._pkCatalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._pkDbSchema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !pkTable.isEmpty { + try visitor.visitSingularStringField(value: pkTable, fieldNumber: 3) + } + try { if let v = self._fkCatalog { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = self._fkDbSchema { + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + } }() + if !fkTable.isEmpty { + try visitor.visitSingularStringField(value: fkTable, fieldNumber: 6) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandGetCrossReference, rhs: Arrow_Flight_Protocol_Sql_CommandGetCrossReference) -> Bool { + if lhs._pkCatalog != rhs._pkCatalog { return false } + if lhs._pkDbSchema != rhs._pkDbSchema { return false } + if lhs.pkTable != rhs.pkTable { return false } + if lhs._fkCatalog != rhs._fkCatalog { return false } + if lhs._fkDbSchema != rhs._fkDbSchema { return false } + if lhs.fkTable != rhs.fkTable { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedStatementRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.query) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self._transactionID) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - if !self.query.isEmpty { - try visitor.visitSingularStringField(value: self.query, fieldNumber: 1) - } - try { if let v = self._transactionID { - try visitor.visitSingularBytesField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest) -> Bool { - if lhs.query != rhs.query {return false} - if lhs._transactionID != rhs._transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedStatementRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "query"), + 2: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &query) + case 2: try decoder.decodeSingularBytesField(value: &_transactionID) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !query.isEmpty { + try visitor.visitSingularStringField(value: query, fieldNumber: 1) + } + try { if let v = self._transactionID { + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest) -> Bool { + if lhs.query != rhs.query { return false } + if lhs._transactionID != rhs._transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_SubstraitPlan: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".SubstraitPlan" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "plan"), - 2: .same(proto: "version"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.plan) }() - case 2: try { try decoder.decodeSingularStringField(value: &self.version) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.plan.isEmpty { - try visitor.visitSingularBytesField(value: self.plan, fieldNumber: 1) - } - if !self.version.isEmpty { - try visitor.visitSingularStringField(value: self.version, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_SubstraitPlan, rhs: Arrow_Flight_Protocol_Sql_SubstraitPlan) -> Bool { - if lhs.plan != rhs.plan {return false} - if lhs.version != rhs.version {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".SubstraitPlan" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + 2: .same(proto: "version"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &plan) + case 2: try decoder.decodeSingularStringField(value: &version) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !plan.isEmpty { + try visitor.visitSingularBytesField(value: plan, fieldNumber: 1) + } + if !version.isEmpty { + try visitor.visitSingularStringField(value: version, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_SubstraitPlan, rhs: Arrow_Flight_Protocol_Sql_SubstraitPlan) -> Bool { + if lhs.plan != rhs.plan { return false } + if lhs.version != rhs.version { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedSubstraitPlanRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "plan"), - 2: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self._transactionID) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._plan { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - try { if let v = self._transactionID { - try visitor.visitSingularBytesField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest) -> Bool { - if lhs._plan != rhs._plan {return false} - if lhs._transactionID != rhs._transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedSubstraitPlanRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + 2: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_plan) + case 2: try decoder.decodeSingularBytesField(value: &_transactionID) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._transactionID { + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest) -> Bool { + if lhs._plan != rhs._plan { return false } + if lhs._transactionID != rhs._transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedStatementResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), - 2: .standard(proto: "dataset_schema"), - 3: .standard(proto: "parameter_schema"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.preparedStatementHandle) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.datasetSchema) }() - case 3: try { try decoder.decodeSingularBytesField(value: &self.parameterSchema) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.preparedStatementHandle.isEmpty { - try visitor.visitSingularBytesField(value: self.preparedStatementHandle, fieldNumber: 1) - } - if !self.datasetSchema.isEmpty { - try visitor.visitSingularBytesField(value: self.datasetSchema, fieldNumber: 2) - } - if !self.parameterSchema.isEmpty { - try visitor.visitSingularBytesField(value: self.parameterSchema, fieldNumber: 3) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult) -> Bool { - if lhs.preparedStatementHandle != rhs.preparedStatementHandle {return false} - if lhs.datasetSchema != rhs.datasetSchema {return false} - if lhs.parameterSchema != rhs.parameterSchema {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedStatementResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "prepared_statement_handle"), + 2: .standard(proto: "dataset_schema"), + 3: .standard(proto: "parameter_schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &preparedStatementHandle) + case 2: try decoder.decodeSingularBytesField(value: &datasetSchema) + case 3: try decoder.decodeSingularBytesField(value: ¶meterSchema) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !preparedStatementHandle.isEmpty { + try visitor.visitSingularBytesField(value: preparedStatementHandle, fieldNumber: 1) + } + if !datasetSchema.isEmpty { + try visitor.visitSingularBytesField(value: datasetSchema, fieldNumber: 2) + } + if !parameterSchema.isEmpty { + try visitor.visitSingularBytesField(value: parameterSchema, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult, rhs: Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult) -> Bool { + if lhs.preparedStatementHandle != rhs.preparedStatementHandle { return false } + if lhs.datasetSchema != rhs.datasetSchema { return false } + if lhs.parameterSchema != rhs.parameterSchema { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionClosePreparedStatementRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.preparedStatementHandle) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".ActionClosePreparedStatementRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "prepared_statement_handle"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &preparedStatementHandle) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.preparedStatementHandle.isEmpty { - try visitor.visitSingularBytesField(value: self.preparedStatementHandle, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !preparedStatementHandle.isEmpty { + try visitor.visitSingularBytesField(value: preparedStatementHandle, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest, rhs: Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest) -> Bool { - if lhs.preparedStatementHandle != rhs.preparedStatementHandle {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest, rhs: Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest) -> Bool { + if lhs.preparedStatementHandle != rhs.preparedStatementHandle { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionBeginTransactionRequest" - static let _protobuf_nameMap = SwiftProtobuf._NameMap() + static let protoMessageName: String = _protobuf_package + ".ActionBeginTransactionRequest" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() - mutating func decodeMessage(decoder: inout D) throws { - while let _ = try decoder.nextFieldNumber() { + mutating func decodeMessage(decoder: inout D) throws { + while let _ = try decoder.nextFieldNumber() {} } - } - func traverse(visitor: inout V) throws { - try unknownFields.traverse(visitor: &visitor) - } + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest, rhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest) -> Bool { - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest, rhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest) -> Bool { + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "transaction_id"), - 2: .same(proto: "name"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.transactionID) }() - case 2: try { try decoder.decodeSingularStringField(value: &self.name) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.transactionID.isEmpty { - try visitor.visitSingularBytesField(value: self.transactionID, fieldNumber: 1) - } - if !self.name.isEmpty { - try visitor.visitSingularStringField(value: self.name, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest, rhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest) -> Bool { - if lhs.transactionID != rhs.transactionID {return false} - if lhs.name != rhs.name {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "transaction_id"), + 2: .same(proto: "name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &transactionID) + case 2: try decoder.decodeSingularStringField(value: &name) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !transactionID.isEmpty { + try visitor.visitSingularBytesField(value: transactionID, fieldNumber: 1) + } + if !name.isEmpty { + try visitor.visitSingularStringField(value: name, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest, rhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest) -> Bool { + if lhs.transactionID != rhs.transactionID { return false } + if lhs.name != rhs.name { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionBeginTransactionResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.transactionID) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".ActionBeginTransactionResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &transactionID) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.transactionID.isEmpty { - try visitor.visitSingularBytesField(value: self.transactionID, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !transactionID.isEmpty { + try visitor.visitSingularBytesField(value: transactionID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult, rhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult) -> Bool { - if lhs.transactionID != rhs.transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult, rhs: Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult) -> Bool { + if lhs.transactionID != rhs.transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "savepoint_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.savepointID) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "savepoint_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &savepointID) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.savepointID.isEmpty { - try visitor.visitSingularBytesField(value: self.savepointID, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !savepointID.isEmpty { + try visitor.visitSingularBytesField(value: savepointID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult, rhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult) -> Bool { - if lhs.savepointID != rhs.savepointID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult, rhs: Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult) -> Bool { + if lhs.savepointID != rhs.savepointID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionEndTransactionRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "transaction_id"), - 2: .same(proto: "action"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.transactionID) }() - case 2: try { try decoder.decodeSingularEnumField(value: &self.action) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.transactionID.isEmpty { - try visitor.visitSingularBytesField(value: self.transactionID, fieldNumber: 1) - } - if self.action != .unspecified { - try visitor.visitSingularEnumField(value: self.action, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest, rhs: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest) -> Bool { - if lhs.transactionID != rhs.transactionID {return false} - if lhs.action != rhs.action {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionEndTransactionRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "transaction_id"), + 2: .same(proto: "action"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &transactionID) + case 2: try decoder.decodeSingularEnumField(value: &action) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !transactionID.isEmpty { + try visitor.visitSingularBytesField(value: transactionID, fieldNumber: 1) + } + if action != .unspecified { + try visitor.visitSingularEnumField(value: action, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest, rhs: Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest) -> Bool { + if lhs.transactionID != rhs.transactionID { return false } + if lhs.action != rhs.action { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "END_TRANSACTION_UNSPECIFIED"), - 1: .same(proto: "END_TRANSACTION_COMMIT"), - 2: .same(proto: "END_TRANSACTION_ROLLBACK"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "END_TRANSACTION_UNSPECIFIED"), + 1: .same(proto: "END_TRANSACTION_COMMIT"), + 2: .same(proto: "END_TRANSACTION_ROLLBACK"), + ] } extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionEndSavepointRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "savepoint_id"), - 2: .same(proto: "action"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.savepointID) }() - case 2: try { try decoder.decodeSingularEnumField(value: &self.action) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - if !self.savepointID.isEmpty { - try visitor.visitSingularBytesField(value: self.savepointID, fieldNumber: 1) - } - if self.action != .unspecified { - try visitor.visitSingularEnumField(value: self.action, fieldNumber: 2) - } - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest, rhs: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest) -> Bool { - if lhs.savepointID != rhs.savepointID {return false} - if lhs.action != rhs.action {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".ActionEndSavepointRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "savepoint_id"), + 2: .same(proto: "action"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &savepointID) + case 2: try decoder.decodeSingularEnumField(value: &action) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !savepointID.isEmpty { + try visitor.visitSingularBytesField(value: savepointID, fieldNumber: 1) + } + if action != .unspecified { + try visitor.visitSingularEnumField(value: action, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest, rhs: Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest) -> Bool { + if lhs.savepointID != rhs.savepointID { return false } + if lhs.action != rhs.action { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "END_SAVEPOINT_UNSPECIFIED"), - 1: .same(proto: "END_SAVEPOINT_RELEASE"), - 2: .same(proto: "END_SAVEPOINT_ROLLBACK"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "END_SAVEPOINT_UNSPECIFIED"), + 1: .same(proto: "END_SAVEPOINT_RELEASE"), + 2: .same(proto: "END_SAVEPOINT_ROLLBACK"), + ] } extension Arrow_Flight_Protocol_Sql_CommandStatementQuery: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandStatementQuery" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.query) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self._transactionID) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - if !self.query.isEmpty { - try visitor.visitSingularStringField(value: self.query, fieldNumber: 1) - } - try { if let v = self._transactionID { - try visitor.visitSingularBytesField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandStatementQuery, rhs: Arrow_Flight_Protocol_Sql_CommandStatementQuery) -> Bool { - if lhs.query != rhs.query {return false} - if lhs._transactionID != rhs._transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandStatementQuery" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "query"), + 2: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &query) + case 2: try decoder.decodeSingularBytesField(value: &_transactionID) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !query.isEmpty { + try visitor.visitSingularStringField(value: query, fieldNumber: 1) + } + try { if let v = self._transactionID { + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandStatementQuery, rhs: Arrow_Flight_Protocol_Sql_CommandStatementQuery) -> Bool { + if lhs.query != rhs.query { return false } + if lhs._transactionID != rhs._transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandStatementSubstraitPlan" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "plan"), - 2: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self._transactionID) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - try { if let v = self._plan { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } }() - try { if let v = self._transactionID { - try visitor.visitSingularBytesField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan, rhs: Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan) -> Bool { - if lhs._plan != rhs._plan {return false} - if lhs._transactionID != rhs._transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandStatementSubstraitPlan" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + 2: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_plan) + case 2: try decoder.decodeSingularBytesField(value: &_transactionID) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._transactionID { + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan, rhs: Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan) -> Bool { + if lhs._plan != rhs._plan { return false } + if lhs._transactionID != rhs._transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_TicketStatementQuery: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".TicketStatementQuery" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "statement_handle"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.statementHandle) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".TicketStatementQuery" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "statement_handle"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &statementHandle) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.statementHandle.isEmpty { - try visitor.visitSingularBytesField(value: self.statementHandle, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !statementHandle.isEmpty { + try visitor.visitSingularBytesField(value: statementHandle, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_TicketStatementQuery, rhs: Arrow_Flight_Protocol_Sql_TicketStatementQuery) -> Bool { - if lhs.statementHandle != rhs.statementHandle {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_TicketStatementQuery, rhs: Arrow_Flight_Protocol_Sql_TicketStatementQuery) -> Bool { + if lhs.statementHandle != rhs.statementHandle { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementQuery" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.preparedStatementHandle) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementQuery" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "prepared_statement_handle"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &preparedStatementHandle) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.preparedStatementHandle.isEmpty { - try visitor.visitSingularBytesField(value: self.preparedStatementHandle, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !preparedStatementHandle.isEmpty { + try visitor.visitSingularBytesField(value: preparedStatementHandle, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery, rhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery) -> Bool { - if lhs.preparedStatementHandle != rhs.preparedStatementHandle {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery, rhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery) -> Bool { + if lhs.preparedStatementHandle != rhs.preparedStatementHandle { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandStatementUpdate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandStatementUpdate" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.query) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self._transactionID) }() - default: break - } - } - } - - func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every if/case branch local when no optimizations - // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and - // https://github.com/apple/swift-protobuf/issues/1182 - if !self.query.isEmpty { - try visitor.visitSingularStringField(value: self.query, fieldNumber: 1) - } - try { if let v = self._transactionID { - try visitor.visitSingularBytesField(value: v, fieldNumber: 2) - } }() - try unknownFields.traverse(visitor: &visitor) - } - - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandStatementUpdate, rhs: Arrow_Flight_Protocol_Sql_CommandStatementUpdate) -> Bool { - if lhs.query != rhs.query {return false} - if lhs._transactionID != rhs._transactionID {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static let protoMessageName: String = _protobuf_package + ".CommandStatementUpdate" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "query"), + 2: .standard(proto: "transaction_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &query) + case 2: try decoder.decodeSingularBytesField(value: &_transactionID) + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !query.isEmpty { + try visitor.visitSingularStringField(value: query, fieldNumber: 1) + } + try { if let v = self._transactionID { + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandStatementUpdate, rhs: Arrow_Flight_Protocol_Sql_CommandStatementUpdate) -> Bool { + if lhs.query != rhs.query { return false } + if lhs._transactionID != rhs._transactionID { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementUpdate" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.preparedStatementHandle) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementUpdate" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "prepared_statement_handle"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &preparedStatementHandle) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.preparedStatementHandle.isEmpty { - try visitor.visitSingularBytesField(value: self.preparedStatementHandle, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !preparedStatementHandle.isEmpty { + try visitor.visitSingularBytesField(value: preparedStatementHandle, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate, rhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate) -> Bool { - if lhs.preparedStatementHandle != rhs.preparedStatementHandle {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate, rhs: Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate) -> Bool { + if lhs.preparedStatementHandle != rhs.preparedStatementHandle { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_DoPutUpdateResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".DoPutUpdateResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "record_count"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularInt64Field(value: &self.recordCount) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".DoPutUpdateResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "record_count"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularInt64Field(value: &recordCount) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if self.recordCount != 0 { - try visitor.visitSingularInt64Field(value: self.recordCount, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if recordCount != 0 { + try visitor.visitSingularInt64Field(value: recordCount, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_DoPutUpdateResult, rhs: Arrow_Flight_Protocol_Sql_DoPutUpdateResult) -> Bool { - if lhs.recordCount != rhs.recordCount {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_DoPutUpdateResult, rhs: Arrow_Flight_Protocol_Sql_DoPutUpdateResult) -> Bool { + if lhs.recordCount != rhs.recordCount { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryRequest" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularBytesField(value: &self.info) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "info"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &info) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if !self.info.isEmpty { - try visitor.visitSingularBytesField(value: self.info, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if !info.isEmpty { + try visitor.visitSingularBytesField(value: info, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest) -> Bool { - if lhs.info != rhs.info {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest, rhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest) -> Bool { + if lhs.info != rhs.info { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryResult" - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "result"), - ] - - mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularEnumField(value: &self.result) }() - default: break - } + static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "result"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try decoder.decodeSingularEnumField(value: &result) + default: break + } + } } - } - func traverse(visitor: inout V) throws { - if self.result != .unspecified { - try visitor.visitSingularEnumField(value: self.result, fieldNumber: 1) + func traverse(visitor: inout V) throws { + if result != .unspecified { + try visitor.visitSingularEnumField(value: result, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) } - try unknownFields.traverse(visitor: &visitor) - } - static func ==(lhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult, rhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult) -> Bool { - if lhs.result != rhs.result {return false} - if lhs.unknownFields != rhs.unknownFields {return false} - return true - } + static func == (lhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult, rhs: Arrow_Flight_Protocol_Sql_ActionCancelQueryResult) -> Bool { + if lhs.result != rhs.result { return false } + if lhs.unknownFields != rhs.unknownFields { return false } + return true + } } extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: SwiftProtobuf._ProtoNameProviding { - static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 0: .same(proto: "CANCEL_RESULT_UNSPECIFIED"), - 1: .same(proto: "CANCEL_RESULT_CANCELLED"), - 2: .same(proto: "CANCEL_RESULT_CANCELLING"), - 3: .same(proto: "CANCEL_RESULT_NOT_CANCELLABLE"), - ] + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "CANCEL_RESULT_UNSPECIFIED"), + 1: .same(proto: "CANCEL_RESULT_CANCELLED"), + 2: .same(proto: "CANCEL_RESULT_CANCELLING"), + 3: .same(proto: "CANCEL_RESULT_NOT_CANCELLABLE"), + ] } diff --git a/Sources/ArrowFlight/FlightTicket.swift b/Sources/ArrowFlight/FlightTicket.swift index ab3cb32..4b213f6 100644 --- a/Sources/ArrowFlight/FlightTicket.swift +++ b/Sources/ArrowFlight/FlightTicket.swift @@ -20,7 +20,7 @@ import Foundation public class FlightTicket { public let data: Data init(_ ticket: Arrow_Flight_Protocol_Ticket) { - self.data = ticket.ticket + data = ticket.ticket } public init(_ data: Data) { @@ -29,7 +29,7 @@ public class FlightTicket { func toProtocol() -> Arrow_Flight_Protocol_Ticket { var ticket = Arrow_Flight_Protocol_Ticket() - ticket.ticket = self.data + ticket.ticket = data return ticket } } diff --git a/Sources/ArrowFlight/RecordBatchStreamReader.swift b/Sources/ArrowFlight/RecordBatchStreamReader.swift index 464752d..954b9cb 100644 --- a/Sources/ArrowFlight/RecordBatchStreamReader.swift +++ b/Sources/ArrowFlight/RecordBatchStreamReader.swift @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -import Foundation import Arrow +import Foundation import GRPC public class RecordBatchStreamReader: AsyncSequence, AsyncIteratorProtocol { @@ -32,7 +32,7 @@ public class RecordBatchStreamReader: AsyncSequence, AsyncIteratorProtocol { init(_ stream: GRPC.GRPCAsyncRequestStream, useUnalignedBuffers: Bool = false) { self.stream = stream - self.streamIterator = self.stream.makeAsyncIterator() + streamIterator = self.stream.makeAsyncIterator() self.useUnalignedBuffers = useUnalignedBuffers } @@ -49,7 +49,7 @@ public class RecordBatchStreamReader: AsyncSequence, AsyncIteratorProtocol { let result = ArrowReader.makeArrowReaderResult() while true { - let streamData = try await self.streamIterator.next() + let streamData = try await streamIterator.next() if streamData == nil { return nil } @@ -62,14 +62,15 @@ public class RecordBatchStreamReader: AsyncSequence, AsyncIteratorProtocol { dataHeader, dataBody: dataBody, result: result, - useUnalignedBuffers: useUnalignedBuffers) { + useUnalignedBuffers: useUnalignedBuffers + ) { case .success(()): if result.batches.count > 0 { batches = result.batches batchIndex = 1 return (batches[0], descriptor) } - case .failure(let error): + case let .failure(error): throw error } } diff --git a/Sources/ArrowFlight/RecordBatchStreamWriter.swift b/Sources/ArrowFlight/RecordBatchStreamWriter.swift index d3e03fe..7035473 100644 --- a/Sources/ArrowFlight/RecordBatchStreamWriter.swift +++ b/Sources/ArrowFlight/RecordBatchStreamWriter.swift @@ -15,8 +15,8 @@ // specific language governing permissions and limitations // under the License. -import Foundation import Arrow +import Foundation import GRPC public class ActionTypeStreamWriter { @@ -26,7 +26,7 @@ public class ActionTypeStreamWriter { } public func write(_ actionType: FlightActionType) async throws { - try await self.stream.send(actionType.toProtocol()) + try await stream.send(actionType.toProtocol()) } } @@ -37,7 +37,7 @@ public class ResultStreamWriter { } public func write(_ result: FlightResult) async throws { - try await self.stream.send(result.toProtocol()) + try await stream.send(result.toProtocol()) } } @@ -48,7 +48,7 @@ public class FlightInfoStreamWriter { } public func write(_ result: FlightInfo) async throws { - try await self.stream.send(result.toProtocol()) + try await stream.send(result.toProtocol()) } } @@ -59,7 +59,7 @@ public class PutResultDataStreamWriter { } public func write(_ result: FlightPutResult) async throws { - try await self.stream.send(result.toProtocol()) + try await stream.send(result.toProtocol()) } } @@ -72,24 +72,24 @@ public class RecordBatchStreamWriter { public func write(_ rb: RecordBatch) async throws { switch writer.toMessage(rb.schema) { - case .success(let schemaData): + case let .success(schemaData): let schemaFlightData = Arrow_Flight_Protocol_FlightData.with { $0.dataHeader = schemaData } - try await self.stream.send(schemaFlightData) + try await stream.send(schemaFlightData) switch writer.toMessage(rb) { - case .success(let recordMessages): + case let .success(recordMessages): let rbMessage = Arrow_Flight_Protocol_FlightData.with { $0.dataHeader = recordMessages[0] $0.dataBody = recordMessages[1] } - try await self.stream.send(rbMessage) - case .failure(let error): + try await stream.send(rbMessage) + case let .failure(error): throw error } - case .failure(let error): + case let .failure(error): throw error } } From ac06c678bc8714c2e5b68d1376a30426da5dabff Mon Sep 17 00:00:00 2001 From: Will Temperley Date: Sat, 30 Aug 2025 18:28:46 +0800 Subject: [PATCH 6/7] Generated and added test data. All tests now green when running locally. Removed data generation part of build script. --- Package.swift | 5 ++++ Tests/ArrowTests/IPCTests.swift | 25 ++++++++++++------ .../ArrowTests/Resources/testdata_bool.arrow | Bin 0 -> 650 bytes .../Resources/testdata_double.arrow | Bin 0 -> 698 bytes .../Resources/testdata_struct.arrow | Bin 0 -> 810 bytes ci/scripts/build.sh | 14 ---------- 6 files changed, 22 insertions(+), 22 deletions(-) create mode 100644 Tests/ArrowTests/Resources/testdata_bool.arrow create mode 100644 Tests/ArrowTests/Resources/testdata_double.arrow create mode 100644 Tests/ArrowTests/Resources/testdata_struct.arrow diff --git a/Package.swift b/Package.swift index af2fdb0..d47ca40 100644 --- a/Package.swift +++ b/Package.swift @@ -76,6 +76,11 @@ let package = Package( .testTarget( name: "ArrowTests", dependencies: ["Arrow", "ArrowC"], + resources: [ + .process("Resources/testdata_double.arrow"), + .process("Resources/testdata_bool.arrow"), + .process("Resources/testdata_struct.arrow"), + ], swiftSettings: [ // build: .unsafeFlags(["-warnings-as-errors"]) ] diff --git a/Tests/ArrowTests/IPCTests.swift b/Tests/ArrowTests/IPCTests.swift index c7aab04..d851a26 100644 --- a/Tests/ArrowTests/IPCTests.swift +++ b/Tests/ArrowTests/IPCTests.swift @@ -264,9 +264,9 @@ final class IPCStreamReaderTests: XCTestCase { final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body_length func testFileReader_double() throws { - let fileURL = currentDirectory().appendingPathComponent("../testdata_double.arrow") + let fileURL = Bundle.module.url(forResource: "testdata_double", withExtension: "arrow", subdirectory: nil) let arrowReader = ArrowReader() - let result = arrowReader.fromFile(fileURL) + let result = arrowReader.fromFile(fileURL!) let recordBatches: [RecordBatch] switch result { case .success(let result): @@ -298,14 +298,14 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body } func testFileReader_bool() throws { - let fileURL = currentDirectory().appendingPathComponent("../testdata_bool.arrow") + let fileURL = Bundle.module.url(forResource: "testdata_bool", withExtension: "arrow", subdirectory: nil)! let arrowReader = ArrowReader() try checkBoolRecordBatch(arrowReader.fromFile(fileURL)) } func testFileWriter_bool() throws { // read existing file - let fileURL = currentDirectory().appendingPathComponent("../testdata_bool.arrow") + let fileURL = Bundle.module.url(forResource: "testdata_bool", withExtension: "arrow", subdirectory: nil)! let arrowReader = ArrowReader() let fileRBs = try checkBoolRecordBatch(arrowReader.fromFile(fileURL)) let arrowWriter = ArrowWriter() @@ -319,7 +319,11 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body throw error } // write file record batches to another file - let outputUrl = currentDirectory().appendingPathComponent("../testfilewriter_bool.arrow") + // write file record batches to another file + let tempDir = FileManager.default.temporaryDirectory + let outputUrl = tempDir.appendingPathComponent(UUID().uuidString) + .appendingPathExtension("arrow") + defer { try? FileManager.default.removeItem(at: outputUrl) } switch arrowWriter.toFile(outputUrl, info: writerInfo) { case .success: try checkBoolRecordBatch(arrowReader.fromFile(outputUrl)) @@ -329,14 +333,14 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body } func testFileReader_struct() throws { - let fileURL = currentDirectory().appendingPathComponent("../testdata_struct.arrow") + let fileURL = Bundle.module.url(forResource: "testdata_struct", withExtension: "arrow", subdirectory: nil)! let arrowReader = ArrowReader() try checkStructRecordBatch(arrowReader.fromFile(fileURL)) } func testFileWriter_struct() throws { // read existing file - let fileURL = currentDirectory().appendingPathComponent("../testdata_struct.arrow") + let fileURL = Bundle.module.url(forResource: "testdata_struct", withExtension: "arrow", subdirectory: nil)! let arrowReader = ArrowReader() let fileRBs = try checkStructRecordBatch(arrowReader.fromFile(fileURL)) let arrowWriter = ArrowWriter() @@ -350,13 +354,18 @@ final class IPCFileReaderTests: XCTestCase { // swiftlint:disable:this type_body throw error } // write file record batches to another file - let outputUrl = currentDirectory().appendingPathComponent("../testfilewriter_struct.arrow") + let tempDir = FileManager.default.temporaryDirectory + let outputUrl = tempDir.appendingPathComponent(UUID().uuidString) + .appendingPathExtension("arrow") + defer { try? FileManager.default.removeItem(at: outputUrl) } switch arrowWriter.toFile(outputUrl, info: writerInfo) { case .success: + defer { try? FileManager.default.removeItem(at: outputUrl) } // cleanup try checkStructRecordBatch(arrowReader.fromFile(outputUrl)) case .failure(let error): throw error } + } func testRBInMemoryToFromStream() throws { diff --git a/Tests/ArrowTests/Resources/testdata_bool.arrow b/Tests/ArrowTests/Resources/testdata_bool.arrow new file mode 100644 index 0000000000000000000000000000000000000000..8dd4a94868da76305004f4513748cda452bfb462 GIT binary patch literal 650 zcmc&y%L&3j5PfRmkAPo9umKNV1dkqK1%ekVhQ1az57fGBqBQ5q>YBcBm81A#7qwFBr(`p+TtM4xyTttjM_o yuJ5=nvTT>13R?YriM6wY)Aemu?LxL0u^?1cJ=~lQyo^)hK>xbiI~!QJfA9Z41Js3gb+pI7e#64DJYVTj!8#{v`x6cMyXh76@QA=h4zIa||Cj_aV z^C(&iD2eTB%ls3&8>{m`i{HaKrKfJ%{TiT!f0k*(B?EnA( literal 0 HcmV?d00001 diff --git a/Tests/ArrowTests/Resources/testdata_struct.arrow b/Tests/ArrowTests/Resources/testdata_struct.arrow new file mode 100644 index 0000000000000000000000000000000000000000..b35387dc4f2cb68004e32083b46aff3a5b41e580 GIT binary patch literal 810 zcmdUuy-EW?6opUB8dq6SG)T&n78bFyG`%3CvDZQbHJ}l+_faf;5Fa6rVCfsg@66mQ z*x7oPGk5Npx$|>&cRHJ$UmQy-u#~h5r7d-EQw?osy+$pM&}u97N+!Ki>$(vH+pjch z@C;giWBioZz-7#>`)JUi86W5y#JiXA<$Qii-LTZ|#BQE%4k1{Nde-zK&RS|87Y{f0 z*T3?os|7jBuRe)U&;1VQ?-Ls+oj@bT`@Z>zk(P0LVEq~PVPcRSqR(L18^hihdCrXE zqE>(0J&Zi-Cy{5pQ}3DkrvBGF=Xo91`n{MF-no(E)(rdP=<{~mdM`Y%N7jyD^{OfB X*;C`Ih94gzE#W_ZH!Y@3%RllBNe@lz literal 0 HcmV?d00001 diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index 1459fa9..3343f9a 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -43,20 +43,6 @@ if [ -d /cache ]; then fi github_actions_group_end -github_actions_group_begin "Generate data" -data_gen_dir="${build_dir}/source/data-generator/swift-datagen" -if [ -d /cache ]; then - export GOCACHE="/cache/go-build" - export GOMODCACHE="/cache/go-mod" -fi -export GOPATH="${build_dir}" -pushd "${data_gen_dir}" -go get -d ./... -go run . -cp *.arrow ../Arrow -popd -github_actions_group_end - github_actions_group_begin "Use -warnings-as-errors" for package in . Arrow ArrowFlight; do pushd "${build_dir}/source/${package}" From 5ab2eb725fa13423ed9ae716c0d45b3c8c775728 Mon Sep 17 00:00:00 2001 From: Moreno Date: Sun, 31 Aug 2025 01:43:05 +0200 Subject: [PATCH 7/7] linted --- Package.swift | 6 +- Sources/Arrow/ArrowArrayBuilder.swift | 9 +- Sources/Arrow/ArrowCExporter.swift | 4 +- Sources/Arrow/ArrowCImporter.swift | 4 +- Sources/Arrow/ArrowReader.swift | 8 +- Sources/Arrow/ArrowWriter.swift | 4 +- Sources/Arrow/File_generated.swift | 242 +-- Sources/Arrow/Message_generated.swift | 644 +++--- Sources/Arrow/Schema_generated.swift | 2156 ++++++++++---------- Sources/Arrow/SparseTensor_generated.swift | 908 ++++----- Sources/Arrow/Tensor_generated.swift | 318 +-- Sources/ArrowFlight/Flight.grpc.swift | 4 +- Sources/ArrowFlight/Flight.pb.swift | 68 +- Sources/ArrowFlight/FlightSql.pb.swift | 216 +- 14 files changed, 2255 insertions(+), 2336 deletions(-) diff --git a/Package.swift b/Package.swift index d47ca40..d1b3e02 100644 --- a/Package.swift +++ b/Package.swift @@ -23,13 +23,13 @@ import PackageDescription let package = Package( name: "Arrow", platforms: [ - .macOS(.v10_15), + .macOS(.v10_15) ], products: [ .library( name: "Arrow", targets: ["Arrow"] - ), + ) ], dependencies: [ .package(url: "https://github.com/google/flatbuffers.git", from: "25.2.10"), @@ -70,7 +70,7 @@ let package = Package( .target( name: "go-swift", dependencies: [ - .target(name: "Arrow"), + .target(name: "Arrow") ] ), .testTarget( diff --git a/Sources/Arrow/ArrowArrayBuilder.swift b/Sources/Arrow/ArrowArrayBuilder.swift index e284cc1..39fee56 100644 --- a/Sources/Arrow/ArrowArrayBuilder.swift +++ b/Sources/Arrow/ArrowArrayBuilder.swift @@ -166,9 +166,12 @@ public class StructArrayBuilder: ArrowArrayBuilder - Result { + Result { do { let exportSchema = try ExportSchema(arrowType, name: name) cSchema.format = exportSchema.arrowTypeNameCstr @@ -106,7 +106,7 @@ public class ArrowCExporter { } public func exportField(_ schema: inout ArrowC.ArrowSchema, field: ArrowField) -> - Result { + Result { return exportType(&schema, arrowType: field.type, name: field.name) } diff --git a/Sources/Arrow/ArrowCImporter.swift b/Sources/Arrow/ArrowCImporter.swift index 4814902..fae79dd 100644 --- a/Sources/Arrow/ArrowCImporter.swift +++ b/Sources/Arrow/ArrowCImporter.swift @@ -60,7 +60,7 @@ public class ArrowCImporter { public init() {} public func importType(_ cArrow: String, name: String = "") -> - Result { + Result { do { let type = try ArrowType.fromCDataFormatId(cArrow) return .success(ArrowField(name, type: ArrowType(type.info), isNullable: true)) @@ -70,7 +70,7 @@ public class ArrowCImporter { } public func importField(_ cSchema: ArrowC.ArrowSchema) -> - Result { + Result { if cSchema.n_children > 0 { ArrowCImporter.release(cSchema) return .failure(.invalid("Children currently not supported")) diff --git a/Sources/Arrow/ArrowReader.swift b/Sources/Arrow/ArrowReader.swift index 725fed8..4ca239d 100644 --- a/Sources/Arrow/ArrowReader.swift +++ b/Sources/Arrow/ArrowReader.swift @@ -88,7 +88,7 @@ public class ArrowReader { // swiftlint:disable:this type_body_length private func loadStructData(_ loadInfo: DataLoadInfo, field: org_apache_arrow_flatbuf_Field) - -> Result { + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -120,7 +120,7 @@ public class ArrowReader { // swiftlint:disable:this type_body_length _ loadInfo: DataLoadInfo, field: org_apache_arrow_flatbuf_Field ) - -> Result { + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -147,7 +147,7 @@ public class ArrowReader { // swiftlint:disable:this type_body_length _ loadInfo: DataLoadInfo, field: org_apache_arrow_flatbuf_Field ) - -> Result { + -> Result { guard let node = loadInfo.batchData.nextNode() else { return .failure(.invalid("Node not found")) } @@ -180,7 +180,7 @@ public class ArrowReader { // swiftlint:disable:this type_body_length _ loadInfo: DataLoadInfo, field: org_apache_arrow_flatbuf_Field ) - -> Result { + -> Result { if isNestedType(field.typeType) { return loadStructData(loadInfo, field: field) } else if isFixedPrimitive(field.typeType) { diff --git a/Sources/Arrow/ArrowWriter.swift b/Sources/Arrow/ArrowWriter.swift index 67c14fa..71b3175 100644 --- a/Sources/Arrow/ArrowWriter.swift +++ b/Sources/Arrow/ArrowWriter.swift @@ -240,7 +240,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length _ writer: inout DataWriter, fields: [ArrowField], columns: [ArrowArrayHolder] ) - -> Result { + -> Result { for index in 0 ..< fields.count { let column = columns[index] let colBufferData = column.getBufferData() @@ -401,7 +401,7 @@ public class ArrowWriter { // swiftlint:disable:this type_body_length case .success: return .success([ (writer as! InMemDataWriter).data, // swiftlint:disable:this force_cast - (dataWriter as! InMemDataWriter).data, // swiftlint:disable:this force_cast + (dataWriter as! InMemDataWriter).data // swiftlint:disable:this force_cast ]) case let .failure(error): return .failure(error) diff --git a/Sources/Arrow/File_generated.swift b/Sources/Arrow/File_generated.swift index 53888e4..f150193 100644 --- a/Sources/Arrow/File_generated.swift +++ b/Sources/Arrow/File_generated.swift @@ -23,138 +23,138 @@ import FlatBuffers public struct org_apache_arrow_flatbuf_Block: NativeStruct, Verifiable, FlatbuffersInitializable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - - /// Index to the start of the RecordBlock (note this is past the Message header) - private var _offset: Int64 - /// Length of the metadata - private var _metaDataLength: Int32 - private let padding0__: UInt32 = 0 - /// Length of the data (this is aligned so there can be a gap between this and - /// the metadata). - private var _bodyLength: Int64 - - public init(_ bb: ByteBuffer, o: Int32) { - let _accessor = Struct(bb: bb, position: o) - _offset = _accessor.readBuffer(of: Int64.self, at: 0) - _metaDataLength = _accessor.readBuffer(of: Int32.self, at: 8) - _bodyLength = _accessor.readBuffer(of: Int64.self, at: 16) - } - - public init(offset: Int64, metaDataLength: Int32, bodyLength: Int64) { - _offset = offset - _metaDataLength = metaDataLength - _bodyLength = bodyLength - } - - public init() { - _offset = 0 - _metaDataLength = 0 - _bodyLength = 0 - } - - /// Index to the start of the RecordBlock (note this is past the Message header) - public var offset: Int64 { _offset } - /// Length of the metadata - public var metaDataLength: Int32 { _metaDataLength } - /// Length of the data (this is aligned so there can be a gap between this and - /// the metadata). - public var bodyLength: Int64 { _bodyLength } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_Block.self) - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + + /// Index to the start of the RecordBlock (note this is past the Message header) + private var _offset: Int64 + /// Length of the metadata + private var _metaDataLength: Int32 + private let padding0__: UInt32 = 0 + /// Length of the data (this is aligned so there can be a gap between this and + /// the metadata). + private var _bodyLength: Int64 + + public init(_ bb: ByteBuffer, o: Int32) { + let _accessor = Struct(bb: bb, position: o) + _offset = _accessor.readBuffer(of: Int64.self, at: 0) + _metaDataLength = _accessor.readBuffer(of: Int32.self, at: 8) + _bodyLength = _accessor.readBuffer(of: Int64.self, at: 16) + } + + public init(offset: Int64, metaDataLength: Int32, bodyLength: Int64) { + _offset = offset + _metaDataLength = metaDataLength + _bodyLength = bodyLength + } + + public init() { + _offset = 0 + _metaDataLength = 0 + _bodyLength = 0 + } + + /// Index to the start of the RecordBlock (note this is past the Message header) + public var offset: Int64 { _offset } + /// Length of the metadata + public var metaDataLength: Int32 { _metaDataLength } + /// Length of the data (this is aligned so there can be a gap between this and + /// the metadata). + public var bodyLength: Int64 { _bodyLength } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_Block.self) + } } public struct org_apache_arrow_flatbuf_Block_Mutable: FlatBufferObject { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Struct + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Struct - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } - public var offset: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } - public var metaDataLength: Int32 { return _accessor.readBuffer(of: Int32.self, at: 8) } - public var bodyLength: Int64 { return _accessor.readBuffer(of: Int64.self, at: 16) } + public var offset: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } + public var metaDataLength: Int32 { return _accessor.readBuffer(of: Int32.self, at: 8) } + public var bodyLength: Int64 { return _accessor.readBuffer(of: Int64.self, at: 16) } } /// ---------------------------------------------------------------------- /// Arrow File metadata -/// +/// public struct org_apache_arrow_flatbuf_Footer: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsFooter(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Footer { return org_apache_arrow_flatbuf_Footer(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case version = 4 - case schema = 6 - case dictionaries = 8 - case recordBatches = 10 - case customMetadata = 12 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var version: org_apache_arrow_flatbuf_MetadataVersion { let o = _accessor.offset(VTOFFSET.version.v); return o == 0 ? .v1 : org_apache_arrow_flatbuf_MetadataVersion(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .v1 } - public var schema: org_apache_arrow_flatbuf_Schema? { let o = _accessor.offset(VTOFFSET.schema.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Schema(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - public var hasDictionaries: Bool { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? false : true } - public var dictionariesCount: Int32 { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func dictionaries(at index: Int32) -> org_apache_arrow_flatbuf_Block? { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Block.self, offset: _accessor.vector(at: o) + index * 24) } - public func mutableDictionaries(at index: Int32) -> org_apache_arrow_flatbuf_Block_Mutable? { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Block_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 24) } - public var hasRecordBatches: Bool { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? false : true } - public var recordBatchesCount: Int32 { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func recordBatches(at index: Int32) -> org_apache_arrow_flatbuf_Block? { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Block.self, offset: _accessor.vector(at: o) + index * 24) } - public func mutableRecordBatches(at index: Int32) -> org_apache_arrow_flatbuf_Block_Mutable? { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Block_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 24) } - /// User-defined metadata - public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } - public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - public static func startFooter(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } - public static func add(version: org_apache_arrow_flatbuf_MetadataVersion, _ fbb: inout FlatBufferBuilder) { fbb.add(element: version.rawValue, def: 0, at: VTOFFSET.version.p) } - public static func add(schema: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: schema, at: VTOFFSET.schema.p) } - public static func addVectorOf(dictionaries: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: dictionaries, at: VTOFFSET.dictionaries.p) } - public static func startVectorOfDictionaries(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func addVectorOf(recordBatches: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: recordBatches, at: VTOFFSET.recordBatches.p) } - public static func startVectorOfRecordBatches(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } - public static func endFooter(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createFooter( - _ fbb: inout FlatBufferBuilder, - version: org_apache_arrow_flatbuf_MetadataVersion = .v1, - schemaOffset schema: Offset = Offset(), - dictionariesVectorOffset dictionaries: Offset = Offset(), - recordBatchesVectorOffset recordBatches: Offset = Offset(), - customMetadataVectorOffset customMetadata: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Footer.startFooter(&fbb) - org_apache_arrow_flatbuf_Footer.add(version: version, &fbb) - org_apache_arrow_flatbuf_Footer.add(schema: schema, &fbb) - org_apache_arrow_flatbuf_Footer.addVectorOf(dictionaries: dictionaries, &fbb) - org_apache_arrow_flatbuf_Footer.addVectorOf(recordBatches: recordBatches, &fbb) - org_apache_arrow_flatbuf_Footer.addVectorOf(customMetadata: customMetadata, &fbb) - return org_apache_arrow_flatbuf_Footer.endFooter(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.version.p, fieldName: "version", required: false, type: org_apache_arrow_flatbuf_MetadataVersion.self) - try _v.visit(field: VTOFFSET.schema.p, fieldName: "schema", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.dictionaries.p, fieldName: "dictionaries", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.recordBatches.p, fieldName: "recordBatches", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsFooter(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Footer { return org_apache_arrow_flatbuf_Footer(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case version = 4 + case schema = 6 + case dictionaries = 8 + case recordBatches = 10 + case customMetadata = 12 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var version: org_apache_arrow_flatbuf_MetadataVersion { let o = _accessor.offset(VTOFFSET.version.v); return o == 0 ? .v1 : org_apache_arrow_flatbuf_MetadataVersion(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .v1 } + public var schema: org_apache_arrow_flatbuf_Schema? { let o = _accessor.offset(VTOFFSET.schema.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Schema(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + public var hasDictionaries: Bool { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? false : true } + public var dictionariesCount: Int32 { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func dictionaries(at index: Int32) -> org_apache_arrow_flatbuf_Block? { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Block.self, offset: _accessor.vector(at: o) + index * 24) } + public func mutableDictionaries(at index: Int32) -> org_apache_arrow_flatbuf_Block_Mutable? { let o = _accessor.offset(VTOFFSET.dictionaries.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Block_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 24) } + public var hasRecordBatches: Bool { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? false : true } + public var recordBatchesCount: Int32 { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func recordBatches(at index: Int32) -> org_apache_arrow_flatbuf_Block? { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Block.self, offset: _accessor.vector(at: o) + index * 24) } + public func mutableRecordBatches(at index: Int32) -> org_apache_arrow_flatbuf_Block_Mutable? { let o = _accessor.offset(VTOFFSET.recordBatches.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Block_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 24) } + /// User-defined metadata + public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } + public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + public static func startFooter(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } + public static func add(version: org_apache_arrow_flatbuf_MetadataVersion, _ fbb: inout FlatBufferBuilder) { fbb.add(element: version.rawValue, def: 0, at: VTOFFSET.version.p) } + public static func add(schema: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: schema, at: VTOFFSET.schema.p) } + public static func addVectorOf(dictionaries: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: dictionaries, at: VTOFFSET.dictionaries.p) } + public static func startVectorOfDictionaries(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func addVectorOf(recordBatches: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: recordBatches, at: VTOFFSET.recordBatches.p) } + public static func startVectorOfRecordBatches(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } + public static func endFooter(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createFooter( + _ fbb: inout FlatBufferBuilder, + version: org_apache_arrow_flatbuf_MetadataVersion = .v1, + schemaOffset schema: Offset = Offset(), + dictionariesVectorOffset dictionaries: Offset = Offset(), + recordBatchesVectorOffset recordBatches: Offset = Offset(), + customMetadataVectorOffset customMetadata: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Footer.startFooter(&fbb) + org_apache_arrow_flatbuf_Footer.add(version: version, &fbb) + org_apache_arrow_flatbuf_Footer.add(schema: schema, &fbb) + org_apache_arrow_flatbuf_Footer.addVectorOf(dictionaries: dictionaries, &fbb) + org_apache_arrow_flatbuf_Footer.addVectorOf(recordBatches: recordBatches, &fbb) + org_apache_arrow_flatbuf_Footer.addVectorOf(customMetadata: customMetadata, &fbb) + return org_apache_arrow_flatbuf_Footer.endFooter(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.version.p, fieldName: "version", required: false, type: org_apache_arrow_flatbuf_MetadataVersion.self) + try _v.visit(field: VTOFFSET.schema.p, fieldName: "schema", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.dictionaries.p, fieldName: "dictionaries", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.recordBatches.p, fieldName: "recordBatches", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) + _v.finish() + } } diff --git a/Sources/Arrow/Message_generated.swift b/Sources/Arrow/Message_generated.swift index 6820aa1..38533e5 100644 --- a/Sources/Arrow/Message_generated.swift +++ b/Sources/Arrow/Message_generated.swift @@ -22,14 +22,14 @@ import FlatBuffers public enum org_apache_arrow_flatbuf_CompressionType: Int8, Enum, Verifiable { - public typealias T = Int8 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int8 { return self.rawValue } - case lz4Frame = 0 - case zstd = 1 - - public static var max: org_apache_arrow_flatbuf_CompressionType { return .zstd } - public static var min: org_apache_arrow_flatbuf_CompressionType { return .lz4Frame } + public typealias T = Int8 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int8 { return self.rawValue } + case lz4Frame = 0 + case zstd = 1 + + public static var max: org_apache_arrow_flatbuf_CompressionType { return .zstd } + public static var min: org_apache_arrow_flatbuf_CompressionType { return .lz4Frame } } @@ -37,20 +37,20 @@ public enum org_apache_arrow_flatbuf_CompressionType: Int8, Enum, Verifiable { /// strategies for compressing the IPC message body (like whole-body /// compression rather than buffer-level) in the future public enum org_apache_arrow_flatbuf_BodyCompressionMethod: Int8, Enum, Verifiable { - public typealias T = Int8 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int8 { return self.rawValue } - /// Each constituent buffer is first compressed with the indicated - /// compressor, and then written with the uncompressed length in the first 8 - /// bytes as a 64-bit little-endian signed integer followed by the compressed - /// buffer bytes (and then padding as required by the protocol). The - /// uncompressed length may be set to -1 to indicate that the data that - /// follows is not compressed, which can be useful for cases where - /// compression does not yield appreciable savings. - case buffer = 0 - - public static var max: org_apache_arrow_flatbuf_BodyCompressionMethod { return .buffer } - public static var min: org_apache_arrow_flatbuf_BodyCompressionMethod { return .buffer } + public typealias T = Int8 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int8 { return self.rawValue } + /// Each constituent buffer is first compressed with the indicated + /// compressor, and then written with the uncompressed length in the first 8 + /// bytes as a 64-bit little-endian signed integer followed by the compressed + /// buffer bytes (and then padding as required by the protocol). The + /// uncompressed length may be set to -1 to indicate that the data that + /// follows is not compressed, which can be useful for cases where + /// compression does not yield appreciable savings. + case buffer = 0 + + public static var max: org_apache_arrow_flatbuf_BodyCompressionMethod { return .buffer } + public static var min: org_apache_arrow_flatbuf_BodyCompressionMethod { return .buffer } } @@ -58,28 +58,28 @@ public enum org_apache_arrow_flatbuf_BodyCompressionMethod: Int8, Enum, Verifiab /// The root Message type /// This union enables us to easily send different message types without /// redundant storage, and in the future we can easily add new message types. -/// +/// /// Arrow implementations do not need to implement all of the message types, /// which may include experimental metadata types. For maximum compatibility, /// it is best to send data using RecordBatch public enum org_apache_arrow_flatbuf_MessageHeader: UInt8, UnionEnum { - public typealias T = UInt8 - - public init?(value: T) { - self.init(rawValue: value) - } - - public static var byteSize: Int { return MemoryLayout.size } - public var value: UInt8 { return self.rawValue } - case none_ = 0 - case schema = 1 - case dictionarybatch = 2 - case recordbatch = 3 - case tensor = 4 - case sparsetensor = 5 - - public static var max: org_apache_arrow_flatbuf_MessageHeader { return .sparsetensor } - public static var min: org_apache_arrow_flatbuf_MessageHeader { return .none_ } + public typealias T = UInt8 + + public init?(value: T) { + self.init(rawValue: value) + } + + public static var byteSize: Int { return MemoryLayout.size } + public var value: UInt8 { return self.rawValue } + case none_ = 0 + case schema = 1 + case dictionarybatch = 2 + case recordbatch = 3 + case tensor = 4 + case sparsetensor = 5 + + public static var max: org_apache_arrow_flatbuf_MessageHeader { return .sparsetensor } + public static var min: org_apache_arrow_flatbuf_MessageHeader { return .none_ } } @@ -88,49 +88,49 @@ public enum org_apache_arrow_flatbuf_MessageHeader: UInt8, UnionEnum { /// equal-length Arrow arrays) /// Metadata about a field at some level of a nested type tree (but not /// its children). -/// +/// /// For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]` /// would have {length: 5, null_count: 2} for its List node, and {length: 6, /// null_count: 0} for its Int16 node, as separate FieldNode structs public struct org_apache_arrow_flatbuf_FieldNode: NativeStruct, Verifiable, FlatbuffersInitializable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - - /// The number of value slots in the Arrow array at this level of a nested - /// tree - private var _length: Int64 - /// The number of observed nulls. Fields with null_count == 0 may choose not - /// to write their physical validity bitmap out as a materialized buffer, - /// instead setting the length of the bitmap buffer to 0. - private var _nullCount: Int64 - - public init(_ bb: ByteBuffer, o: Int32) { - let _accessor = Struct(bb: bb, position: o) - _length = _accessor.readBuffer(of: Int64.self, at: 0) - _nullCount = _accessor.readBuffer(of: Int64.self, at: 8) - } - - public init(length: Int64, nullCount: Int64) { - _length = length - _nullCount = nullCount - } - - public init() { - _length = 0 - _nullCount = 0 - } - - /// The number of value slots in the Arrow array at this level of a nested - /// tree - public var length: Int64 { _length } - /// The number of observed nulls. Fields with null_count == 0 may choose not - /// to write their physical validity bitmap out as a materialized buffer, - /// instead setting the length of the bitmap buffer to 0. - public var nullCount: Int64 { _nullCount } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_FieldNode.self) - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + + /// The number of value slots in the Arrow array at this level of a nested + /// tree + private var _length: Int64 + /// The number of observed nulls. Fields with null_count == 0 may choose not + /// to write their physical validity bitmap out as a materialized buffer, + /// instead setting the length of the bitmap buffer to 0. + private var _nullCount: Int64 + + public init(_ bb: ByteBuffer, o: Int32) { + let _accessor = Struct(bb: bb, position: o) + _length = _accessor.readBuffer(of: Int64.self, at: 0) + _nullCount = _accessor.readBuffer(of: Int64.self, at: 8) + } + + public init(length: Int64, nullCount: Int64) { + _length = length + _nullCount = nullCount + } + + public init() { + _length = 0 + _nullCount = 0 + } + + /// The number of value slots in the Arrow array at this level of a nested + /// tree + public var length: Int64 { _length } + /// The number of observed nulls. Fields with null_count == 0 may choose not + /// to write their physical validity bitmap out as a materialized buffer, + /// instead setting the length of the bitmap buffer to 0. + public var nullCount: Int64 { _nullCount } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_FieldNode.self) + } } /// ---------------------------------------------------------------------- @@ -138,20 +138,20 @@ public struct org_apache_arrow_flatbuf_FieldNode: NativeStruct, Verifiable, Flat /// equal-length Arrow arrays) /// Metadata about a field at some level of a nested type tree (but not /// its children). -/// +/// /// For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]` /// would have {length: 5, null_count: 2} for its List node, and {length: 6, /// null_count: 0} for its Int16 node, as separate FieldNode structs public struct org_apache_arrow_flatbuf_FieldNode_Mutable: FlatBufferObject { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Struct + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Struct - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } - public var length: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } - public var nullCount: Int64 { return _accessor.readBuffer(of: Int64.self, at: 8) } + public var length: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } + public var nullCount: Int64 { return _accessor.readBuffer(of: Int64.self, at: 8) } } /// Optional compression for the memory buffers constituting IPC message @@ -159,48 +159,48 @@ public struct org_apache_arrow_flatbuf_FieldNode_Mutable: FlatBufferObject { /// message types public struct org_apache_arrow_flatbuf_BodyCompression: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsBodyCompression(bb: ByteBuffer) -> org_apache_arrow_flatbuf_BodyCompression { return org_apache_arrow_flatbuf_BodyCompression(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case codec = 4 - case method = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Compressor library. - /// For LZ4_FRAME, each compressed buffer must consist of a single frame. - public var codec: org_apache_arrow_flatbuf_CompressionType { let o = _accessor.offset(VTOFFSET.codec.v); return o == 0 ? .lz4Frame : org_apache_arrow_flatbuf_CompressionType(rawValue: _accessor.readBuffer(of: Int8.self, at: o)) ?? .lz4Frame } - /// Indicates the way the record batch body was compressed - public var method: org_apache_arrow_flatbuf_BodyCompressionMethod { let o = _accessor.offset(VTOFFSET.method.v); return o == 0 ? .buffer : org_apache_arrow_flatbuf_BodyCompressionMethod(rawValue: _accessor.readBuffer(of: Int8.self, at: o)) ?? .buffer } - public static func startBodyCompression(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(codec: org_apache_arrow_flatbuf_CompressionType, _ fbb: inout FlatBufferBuilder) { fbb.add(element: codec.rawValue, def: 0, at: VTOFFSET.codec.p) } - public static func add(method: org_apache_arrow_flatbuf_BodyCompressionMethod, _ fbb: inout FlatBufferBuilder) { fbb.add(element: method.rawValue, def: 0, at: VTOFFSET.method.p) } - public static func endBodyCompression(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createBodyCompression( - _ fbb: inout FlatBufferBuilder, - codec: org_apache_arrow_flatbuf_CompressionType = .lz4Frame, - method: org_apache_arrow_flatbuf_BodyCompressionMethod = .buffer - ) -> Offset { - let __start = org_apache_arrow_flatbuf_BodyCompression.startBodyCompression(&fbb) - org_apache_arrow_flatbuf_BodyCompression.add(codec: codec, &fbb) - org_apache_arrow_flatbuf_BodyCompression.add(method: method, &fbb) - return org_apache_arrow_flatbuf_BodyCompression.endBodyCompression(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.codec.p, fieldName: "codec", required: false, type: org_apache_arrow_flatbuf_CompressionType.self) - try _v.visit(field: VTOFFSET.method.p, fieldName: "method", required: false, type: org_apache_arrow_flatbuf_BodyCompressionMethod.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsBodyCompression(bb: ByteBuffer) -> org_apache_arrow_flatbuf_BodyCompression { return org_apache_arrow_flatbuf_BodyCompression(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case codec = 4 + case method = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Compressor library. + /// For LZ4_FRAME, each compressed buffer must consist of a single frame. + public var codec: org_apache_arrow_flatbuf_CompressionType { let o = _accessor.offset(VTOFFSET.codec.v); return o == 0 ? .lz4Frame : org_apache_arrow_flatbuf_CompressionType(rawValue: _accessor.readBuffer(of: Int8.self, at: o)) ?? .lz4Frame } + /// Indicates the way the record batch body was compressed + public var method: org_apache_arrow_flatbuf_BodyCompressionMethod { let o = _accessor.offset(VTOFFSET.method.v); return o == 0 ? .buffer : org_apache_arrow_flatbuf_BodyCompressionMethod(rawValue: _accessor.readBuffer(of: Int8.self, at: o)) ?? .buffer } + public static func startBodyCompression(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(codec: org_apache_arrow_flatbuf_CompressionType, _ fbb: inout FlatBufferBuilder) { fbb.add(element: codec.rawValue, def: 0, at: VTOFFSET.codec.p) } + public static func add(method: org_apache_arrow_flatbuf_BodyCompressionMethod, _ fbb: inout FlatBufferBuilder) { fbb.add(element: method.rawValue, def: 0, at: VTOFFSET.method.p) } + public static func endBodyCompression(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createBodyCompression( + _ fbb: inout FlatBufferBuilder, + codec: org_apache_arrow_flatbuf_CompressionType = .lz4Frame, + method: org_apache_arrow_flatbuf_BodyCompressionMethod = .buffer + ) -> Offset { + let __start = org_apache_arrow_flatbuf_BodyCompression.startBodyCompression(&fbb) + org_apache_arrow_flatbuf_BodyCompression.add(codec: codec, &fbb) + org_apache_arrow_flatbuf_BodyCompression.add(method: method, &fbb) + return org_apache_arrow_flatbuf_BodyCompression.endBodyCompression(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.codec.p, fieldName: "codec", required: false, type: org_apache_arrow_flatbuf_CompressionType.self) + try _v.visit(field: VTOFFSET.method.p, fieldName: "method", required: false, type: org_apache_arrow_flatbuf_BodyCompressionMethod.self) + _v.finish() + } } /// A data header describing the shared memory layout of a "record" or "row" @@ -208,79 +208,79 @@ public struct org_apache_arrow_flatbuf_BodyCompression: FlatBufferObject, Verifi /// batch". public struct org_apache_arrow_flatbuf_RecordBatch: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsRecordBatch(bb: ByteBuffer) -> org_apache_arrow_flatbuf_RecordBatch { return org_apache_arrow_flatbuf_RecordBatch(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case length = 4 - case nodes = 6 - case buffers = 8 - case compression = 10 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// number of records / rows. The arrays in the batch should all have this - /// length - public var length: Int64 { let o = _accessor.offset(VTOFFSET.length.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - /// Nodes correspond to the pre-ordered flattened logical schema - public var hasNodes: Bool { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? false : true } - public var nodesCount: Int32 { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func nodes(at index: Int32) -> org_apache_arrow_flatbuf_FieldNode? { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_FieldNode.self, offset: _accessor.vector(at: o) + index * 16) } - public func mutableNodes(at index: Int32) -> org_apache_arrow_flatbuf_FieldNode_Mutable? { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? nil : org_apache_arrow_flatbuf_FieldNode_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } - /// Buffers correspond to the pre-ordered flattened buffer tree - /// - /// The number of buffers appended to this list depends on the schema. For - /// example, most primitive arrays will have 2 buffers, 1 for the validity - /// bitmap and 1 for the values. For struct arrays, there will only be a - /// single buffer for the validity (nulls) bitmap - public var hasBuffers: Bool { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? false : true } - public var buffersCount: Int32 { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func buffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } - public func mutableBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } - /// Optional compression of the message body - public var compression: org_apache_arrow_flatbuf_BodyCompression? { let o = _accessor.offset(VTOFFSET.compression.v); return o == 0 ? nil : org_apache_arrow_flatbuf_BodyCompression(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - public static func startRecordBatch(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } - public static func add(length: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: length, def: 0, at: VTOFFSET.length.p) } - public static func addVectorOf(nodes: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: nodes, at: VTOFFSET.nodes.p) } - public static func startVectorOfNodes(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func addVectorOf(buffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: buffers, at: VTOFFSET.buffers.p) } - public static func startVectorOfBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func add(compression: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: compression, at: VTOFFSET.compression.p) } - public static func endRecordBatch(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createRecordBatch( - _ fbb: inout FlatBufferBuilder, - length: Int64 = 0, - nodesVectorOffset nodes: Offset = Offset(), - buffersVectorOffset buffers: Offset = Offset(), - compressionOffset compression: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_RecordBatch.startRecordBatch(&fbb) - org_apache_arrow_flatbuf_RecordBatch.add(length: length, &fbb) - org_apache_arrow_flatbuf_RecordBatch.addVectorOf(nodes: nodes, &fbb) - org_apache_arrow_flatbuf_RecordBatch.addVectorOf(buffers: buffers, &fbb) - org_apache_arrow_flatbuf_RecordBatch.add(compression: compression, &fbb) - return org_apache_arrow_flatbuf_RecordBatch.endRecordBatch(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.length.p, fieldName: "length", required: false, type: Int64.self) - try _v.visit(field: VTOFFSET.nodes.p, fieldName: "nodes", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.buffers.p, fieldName: "buffers", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.compression.p, fieldName: "compression", required: false, type: ForwardOffset.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsRecordBatch(bb: ByteBuffer) -> org_apache_arrow_flatbuf_RecordBatch { return org_apache_arrow_flatbuf_RecordBatch(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case length = 4 + case nodes = 6 + case buffers = 8 + case compression = 10 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// number of records / rows. The arrays in the batch should all have this + /// length + public var length: Int64 { let o = _accessor.offset(VTOFFSET.length.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + /// Nodes correspond to the pre-ordered flattened logical schema + public var hasNodes: Bool { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? false : true } + public var nodesCount: Int32 { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func nodes(at index: Int32) -> org_apache_arrow_flatbuf_FieldNode? { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_FieldNode.self, offset: _accessor.vector(at: o) + index * 16) } + public func mutableNodes(at index: Int32) -> org_apache_arrow_flatbuf_FieldNode_Mutable? { let o = _accessor.offset(VTOFFSET.nodes.v); return o == 0 ? nil : org_apache_arrow_flatbuf_FieldNode_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } + /// Buffers correspond to the pre-ordered flattened buffer tree + /// + /// The number of buffers appended to this list depends on the schema. For + /// example, most primitive arrays will have 2 buffers, 1 for the validity + /// bitmap and 1 for the values. For struct arrays, there will only be a + /// single buffer for the validity (nulls) bitmap + public var hasBuffers: Bool { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? false : true } + public var buffersCount: Int32 { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func buffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } + public func mutableBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.buffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } + /// Optional compression of the message body + public var compression: org_apache_arrow_flatbuf_BodyCompression? { let o = _accessor.offset(VTOFFSET.compression.v); return o == 0 ? nil : org_apache_arrow_flatbuf_BodyCompression(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + public static func startRecordBatch(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } + public static func add(length: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: length, def: 0, at: VTOFFSET.length.p) } + public static func addVectorOf(nodes: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: nodes, at: VTOFFSET.nodes.p) } + public static func startVectorOfNodes(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func addVectorOf(buffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: buffers, at: VTOFFSET.buffers.p) } + public static func startVectorOfBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func add(compression: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: compression, at: VTOFFSET.compression.p) } + public static func endRecordBatch(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createRecordBatch( + _ fbb: inout FlatBufferBuilder, + length: Int64 = 0, + nodesVectorOffset nodes: Offset = Offset(), + buffersVectorOffset buffers: Offset = Offset(), + compressionOffset compression: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_RecordBatch.startRecordBatch(&fbb) + org_apache_arrow_flatbuf_RecordBatch.add(length: length, &fbb) + org_apache_arrow_flatbuf_RecordBatch.addVectorOf(nodes: nodes, &fbb) + org_apache_arrow_flatbuf_RecordBatch.addVectorOf(buffers: buffers, &fbb) + org_apache_arrow_flatbuf_RecordBatch.add(compression: compression, &fbb) + return org_apache_arrow_flatbuf_RecordBatch.endRecordBatch(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.length.p, fieldName: "length", required: false, type: Int64.self) + try _v.visit(field: VTOFFSET.nodes.p, fieldName: "nodes", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.buffers.p, fieldName: "buffers", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.compression.p, fieldName: "compression", required: false, type: ForwardOffset.self) + _v.finish() + } } /// For sending dictionary encoding information. Any Field can be @@ -291,131 +291,131 @@ public struct org_apache_arrow_flatbuf_RecordBatch: FlatBufferObject, Verifiable /// flag public struct org_apache_arrow_flatbuf_DictionaryBatch: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsDictionaryBatch(bb: ByteBuffer) -> org_apache_arrow_flatbuf_DictionaryBatch { return org_apache_arrow_flatbuf_DictionaryBatch(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case id = 4 - case data = 6 - case isDelta = 8 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var id: Int64 { let o = _accessor.offset(VTOFFSET.id.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - public var data: org_apache_arrow_flatbuf_RecordBatch? { let o = _accessor.offset(VTOFFSET.data.v); return o == 0 ? nil : org_apache_arrow_flatbuf_RecordBatch(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// If isDelta is true the values in the dictionary are to be appended to a - /// dictionary with the indicated id. If isDelta is false this dictionary - /// should replace the existing dictionary. - public var isDelta: Bool { let o = _accessor.offset(VTOFFSET.isDelta.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public static func startDictionaryBatch(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 3) } - public static func add(id: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: id, def: 0, at: VTOFFSET.id.p) } - public static func add(data: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: data, at: VTOFFSET.data.p) } - public static func add(isDelta: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isDelta, def: false, - at: VTOFFSET.isDelta.p) } - public static func endDictionaryBatch(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createDictionaryBatch( - _ fbb: inout FlatBufferBuilder, - id: Int64 = 0, - dataOffset data: Offset = Offset(), - isDelta: Bool = false - ) -> Offset { - let __start = org_apache_arrow_flatbuf_DictionaryBatch.startDictionaryBatch(&fbb) - org_apache_arrow_flatbuf_DictionaryBatch.add(id: id, &fbb) - org_apache_arrow_flatbuf_DictionaryBatch.add(data: data, &fbb) - org_apache_arrow_flatbuf_DictionaryBatch.add(isDelta: isDelta, &fbb) - return org_apache_arrow_flatbuf_DictionaryBatch.endDictionaryBatch(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.id.p, fieldName: "id", required: false, type: Int64.self) - try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.isDelta.p, fieldName: "isDelta", required: false, type: Bool.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsDictionaryBatch(bb: ByteBuffer) -> org_apache_arrow_flatbuf_DictionaryBatch { return org_apache_arrow_flatbuf_DictionaryBatch(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case id = 4 + case data = 6 + case isDelta = 8 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var id: Int64 { let o = _accessor.offset(VTOFFSET.id.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + public var data: org_apache_arrow_flatbuf_RecordBatch? { let o = _accessor.offset(VTOFFSET.data.v); return o == 0 ? nil : org_apache_arrow_flatbuf_RecordBatch(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// If isDelta is true the values in the dictionary are to be appended to a + /// dictionary with the indicated id. If isDelta is false this dictionary + /// should replace the existing dictionary. + public var isDelta: Bool { let o = _accessor.offset(VTOFFSET.isDelta.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public static func startDictionaryBatch(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 3) } + public static func add(id: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: id, def: 0, at: VTOFFSET.id.p) } + public static func add(data: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: data, at: VTOFFSET.data.p) } + public static func add(isDelta: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isDelta, def: false, + at: VTOFFSET.isDelta.p) } + public static func endDictionaryBatch(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createDictionaryBatch( + _ fbb: inout FlatBufferBuilder, + id: Int64 = 0, + dataOffset data: Offset = Offset(), + isDelta: Bool = false + ) -> Offset { + let __start = org_apache_arrow_flatbuf_DictionaryBatch.startDictionaryBatch(&fbb) + org_apache_arrow_flatbuf_DictionaryBatch.add(id: id, &fbb) + org_apache_arrow_flatbuf_DictionaryBatch.add(data: data, &fbb) + org_apache_arrow_flatbuf_DictionaryBatch.add(isDelta: isDelta, &fbb) + return org_apache_arrow_flatbuf_DictionaryBatch.endDictionaryBatch(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.id.p, fieldName: "id", required: false, type: Int64.self) + try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.isDelta.p, fieldName: "isDelta", required: false, type: Bool.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Message: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsMessage(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Message { return org_apache_arrow_flatbuf_Message(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case version = 4 - case headerType = 6 - case header = 8 - case bodyLength = 10 - case customMetadata = 12 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var version: org_apache_arrow_flatbuf_MetadataVersion { let o = _accessor.offset(VTOFFSET.version.v); return o == 0 ? .v1 : org_apache_arrow_flatbuf_MetadataVersion(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .v1 } - public var headerType: org_apache_arrow_flatbuf_MessageHeader { let o = _accessor.offset(VTOFFSET.headerType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_MessageHeader(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } - public func header(type: T.Type) -> T? { let o = _accessor.offset(VTOFFSET.header.v); return o == 0 ? nil : _accessor.union(o) } - public var bodyLength: Int64 { let o = _accessor.offset(VTOFFSET.bodyLength.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } - public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - public static func startMessage(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } - public static func add(version: org_apache_arrow_flatbuf_MetadataVersion, _ fbb: inout FlatBufferBuilder) { fbb.add(element: version.rawValue, def: 0, at: VTOFFSET.version.p) } - public static func add(headerType: org_apache_arrow_flatbuf_MessageHeader, _ fbb: inout FlatBufferBuilder) { fbb.add(element: headerType.rawValue, def: 0, at: VTOFFSET.headerType.p) } - public static func add(header: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: header, at: VTOFFSET.header.p) } - public static func add(bodyLength: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bodyLength, def: 0, at: VTOFFSET.bodyLength.p) } - public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } - public static func endMessage(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createMessage( - _ fbb: inout FlatBufferBuilder, - version: org_apache_arrow_flatbuf_MetadataVersion = .v1, - headerType: org_apache_arrow_flatbuf_MessageHeader = .none_, - headerOffset header: Offset = Offset(), - bodyLength: Int64 = 0, - customMetadataVectorOffset customMetadata: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Message.startMessage(&fbb) - org_apache_arrow_flatbuf_Message.add(version: version, &fbb) - org_apache_arrow_flatbuf_Message.add(headerType: headerType, &fbb) - org_apache_arrow_flatbuf_Message.add(header: header, &fbb) - org_apache_arrow_flatbuf_Message.add(bodyLength: bodyLength, &fbb) - org_apache_arrow_flatbuf_Message.addVectorOf(customMetadata: customMetadata, &fbb) - return org_apache_arrow_flatbuf_Message.endMessage(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.version.p, fieldName: "version", required: false, type: org_apache_arrow_flatbuf_MetadataVersion.self) - try _v.visit(unionKey: VTOFFSET.headerType.p, unionField: VTOFFSET.header.p, unionKeyName: "headerType", fieldName: "header", required: false, completion: { (verifier, key: org_apache_arrow_flatbuf_MessageHeader, pos) in - switch key { - case .none_: - break // NOTE - SWIFT doesnt support none - case .schema: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Schema.self) - case .dictionarybatch: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_DictionaryBatch.self) - case .recordbatch: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RecordBatch.self) - case .tensor: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Tensor.self) - case .sparsetensor: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensor.self) - } - }) - try _v.visit(field: VTOFFSET.bodyLength.p, fieldName: "bodyLength", required: false, type: Int64.self) - try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsMessage(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Message { return org_apache_arrow_flatbuf_Message(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case version = 4 + case headerType = 6 + case header = 8 + case bodyLength = 10 + case customMetadata = 12 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var version: org_apache_arrow_flatbuf_MetadataVersion { let o = _accessor.offset(VTOFFSET.version.v); return o == 0 ? .v1 : org_apache_arrow_flatbuf_MetadataVersion(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .v1 } + public var headerType: org_apache_arrow_flatbuf_MessageHeader { let o = _accessor.offset(VTOFFSET.headerType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_MessageHeader(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } + public func header(type: T.Type) -> T? { let o = _accessor.offset(VTOFFSET.header.v); return o == 0 ? nil : _accessor.union(o) } + public var bodyLength: Int64 { let o = _accessor.offset(VTOFFSET.bodyLength.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } + public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + public static func startMessage(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } + public static func add(version: org_apache_arrow_flatbuf_MetadataVersion, _ fbb: inout FlatBufferBuilder) { fbb.add(element: version.rawValue, def: 0, at: VTOFFSET.version.p) } + public static func add(headerType: org_apache_arrow_flatbuf_MessageHeader, _ fbb: inout FlatBufferBuilder) { fbb.add(element: headerType.rawValue, def: 0, at: VTOFFSET.headerType.p) } + public static func add(header: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: header, at: VTOFFSET.header.p) } + public static func add(bodyLength: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bodyLength, def: 0, at: VTOFFSET.bodyLength.p) } + public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } + public static func endMessage(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createMessage( + _ fbb: inout FlatBufferBuilder, + version: org_apache_arrow_flatbuf_MetadataVersion = .v1, + headerType: org_apache_arrow_flatbuf_MessageHeader = .none_, + headerOffset header: Offset = Offset(), + bodyLength: Int64 = 0, + customMetadataVectorOffset customMetadata: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Message.startMessage(&fbb) + org_apache_arrow_flatbuf_Message.add(version: version, &fbb) + org_apache_arrow_flatbuf_Message.add(headerType: headerType, &fbb) + org_apache_arrow_flatbuf_Message.add(header: header, &fbb) + org_apache_arrow_flatbuf_Message.add(bodyLength: bodyLength, &fbb) + org_apache_arrow_flatbuf_Message.addVectorOf(customMetadata: customMetadata, &fbb) + return org_apache_arrow_flatbuf_Message.endMessage(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.version.p, fieldName: "version", required: false, type: org_apache_arrow_flatbuf_MetadataVersion.self) + try _v.visit(unionKey: VTOFFSET.headerType.p, unionField: VTOFFSET.header.p, unionKeyName: "headerType", fieldName: "header", required: false, completion: { (verifier, key: org_apache_arrow_flatbuf_MessageHeader, pos) in + switch key { + case .none_: + break // NOTE - SWIFT doesnt support none + case .schema: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Schema.self) + case .dictionarybatch: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_DictionaryBatch.self) + case .recordbatch: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RecordBatch.self) + case .tensor: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Tensor.self) + case .sparsetensor: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensor.self) + } + }) + try _v.visit(field: VTOFFSET.bodyLength.p, fieldName: "bodyLength", required: false, type: Int64.self) + try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) + _v.finish() + } } diff --git a/Sources/Arrow/Schema_generated.swift b/Sources/Arrow/Schema_generated.swift index d508444..a757c1e 100644 --- a/Sources/Arrow/Schema_generated.swift +++ b/Sources/Arrow/Schema_generated.swift @@ -22,28 +22,28 @@ import FlatBuffers public enum org_apache_arrow_flatbuf_MetadataVersion: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - /// 0.1.0 (October 2016). - case v1 = 0 - /// 0.2.0 (February 2017). Non-backwards compatible with V1. - case v2 = 1 - /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2. - case v3 = 2 - /// >= 0.8.0 (December 2017). Non-backwards compatible with V3. - case v4 = 3 - /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4 - /// metadata and IPC messages). Implementations are recommended to provide a - /// V4 compatibility mode with V5 format changes disabled. - /// - /// Incompatible changes between V4 and V5: - /// - Union buffer layout has changed. In V5, Unions don't have a validity - /// bitmap buffer. - case v5 = 4 - - public static var max: org_apache_arrow_flatbuf_MetadataVersion { return .v5 } - public static var min: org_apache_arrow_flatbuf_MetadataVersion { return .v1 } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + /// 0.1.0 (October 2016). + case v1 = 0 + /// 0.2.0 (February 2017). Non-backwards compatible with V1. + case v2 = 1 + /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2. + case v3 = 2 + /// >= 0.8.0 (December 2017). Non-backwards compatible with V3. + case v4 = 3 + /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4 + /// metadata and IPC messages). Implementations are recommended to provide a + /// V4 compatibility mode with V5 format changes disabled. + /// + /// Incompatible changes between V4 and V5: + /// - Union buffer layout has changed. In V5, Unions don't have a validity + /// bitmap buffer. + case v5 = 4 + + public static var max: org_apache_arrow_flatbuf_MetadataVersion { return .v5 } + public static var min: org_apache_arrow_flatbuf_MetadataVersion { return .v1 } } @@ -60,90 +60,90 @@ public enum org_apache_arrow_flatbuf_MetadataVersion: Int16, Enum, Verifiable { /// values here are intented to represent higher level /// features, additional details maybe negotiated /// with key-value pairs specific to the protocol. -/// +/// /// Enums added to this list should be assigned power-of-two values /// to facilitate exchanging and comparing bitmaps for supported /// features. public enum org_apache_arrow_flatbuf_Feature: Int64, Enum, Verifiable { - public typealias T = Int64 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int64 { return self.rawValue } - /// Needed to make flatbuffers happy. - case unused = 0 - /// The stream makes use of multiple full dictionaries with the - /// same ID and assumes clients implement dictionary replacement - /// correctly. - case dictionaryReplacement = 1 - /// The stream makes use of compressed bodies as described - /// in Message.fbs. - case compressedBody = 2 - - public static var max: org_apache_arrow_flatbuf_Feature { return .compressedBody } - public static var min: org_apache_arrow_flatbuf_Feature { return .unused } + public typealias T = Int64 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int64 { return self.rawValue } + /// Needed to make flatbuffers happy. + case unused = 0 + /// The stream makes use of multiple full dictionaries with the + /// same ID and assumes clients implement dictionary replacement + /// correctly. + case dictionaryReplacement = 1 + /// The stream makes use of compressed bodies as described + /// in Message.fbs. + case compressedBody = 2 + + public static var max: org_apache_arrow_flatbuf_Feature { return .compressedBody } + public static var min: org_apache_arrow_flatbuf_Feature { return .unused } } public enum org_apache_arrow_flatbuf_UnionMode: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case sparse = 0 - case dense = 1 - - public static var max: org_apache_arrow_flatbuf_UnionMode { return .dense } - public static var min: org_apache_arrow_flatbuf_UnionMode { return .sparse } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case sparse = 0 + case dense = 1 + + public static var max: org_apache_arrow_flatbuf_UnionMode { return .dense } + public static var min: org_apache_arrow_flatbuf_UnionMode { return .sparse } } public enum org_apache_arrow_flatbuf_Precision: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case half = 0 - case single = 1 - case double = 2 - - public static var max: org_apache_arrow_flatbuf_Precision { return .double } - public static var min: org_apache_arrow_flatbuf_Precision { return .half } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case half = 0 + case single = 1 + case double = 2 + + public static var max: org_apache_arrow_flatbuf_Precision { return .double } + public static var min: org_apache_arrow_flatbuf_Precision { return .half } } public enum org_apache_arrow_flatbuf_DateUnit: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case day = 0 - case millisecond = 1 - - public static var max: org_apache_arrow_flatbuf_DateUnit { return .millisecond } - public static var min: org_apache_arrow_flatbuf_DateUnit { return .day } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case day = 0 + case millisecond = 1 + + public static var max: org_apache_arrow_flatbuf_DateUnit { return .millisecond } + public static var min: org_apache_arrow_flatbuf_DateUnit { return .day } } public enum org_apache_arrow_flatbuf_TimeUnit: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case second = 0 - case millisecond = 1 - case microsecond = 2 - case nanosecond = 3 - - public static var max: org_apache_arrow_flatbuf_TimeUnit { return .nanosecond } - public static var min: org_apache_arrow_flatbuf_TimeUnit { return .second } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case second = 0 + case millisecond = 1 + case microsecond = 2 + case nanosecond = 3 + + public static var max: org_apache_arrow_flatbuf_TimeUnit { return .nanosecond } + public static var min: org_apache_arrow_flatbuf_TimeUnit { return .second } } public enum org_apache_arrow_flatbuf_IntervalUnit: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case yearMonth = 0 - case dayTime = 1 - case monthDayNano = 2 - - public static var max: org_apache_arrow_flatbuf_IntervalUnit { return .monthDayNano } - public static var min: org_apache_arrow_flatbuf_IntervalUnit { return .yearMonth } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case yearMonth = 0 + case dayTime = 1 + case monthDayNano = 2 + + public static var max: org_apache_arrow_flatbuf_IntervalUnit { return .monthDayNano } + public static var min: org_apache_arrow_flatbuf_IntervalUnit { return .yearMonth } } @@ -151,40 +151,40 @@ public enum org_apache_arrow_flatbuf_IntervalUnit: Int16, Enum, Verifiable { /// Top-level Type value, enabling extensible type-specific metadata. We can /// add new logical types to Type without breaking backwards compatibility public enum org_apache_arrow_flatbuf_Type_: UInt8, UnionEnum { - public typealias T = UInt8 - - public init?(value: T) { - self.init(rawValue: value) - } - - public static var byteSize: Int { return MemoryLayout.size } - public var value: UInt8 { return self.rawValue } - case none_ = 0 - case null = 1 - case int = 2 - case floatingpoint = 3 - case binary = 4 - case utf8 = 5 - case bool = 6 - case decimal = 7 - case date = 8 - case time = 9 - case timestamp = 10 - case interval = 11 - case list = 12 - case struct_ = 13 - case union = 14 - case fixedsizebinary = 15 - case fixedsizelist = 16 - case map = 17 - case duration = 18 - case largebinary = 19 - case largeutf8 = 20 - case largelist = 21 - case runendencoded = 22 - - public static var max: org_apache_arrow_flatbuf_Type_ { return .runendencoded } - public static var min: org_apache_arrow_flatbuf_Type_ { return .none_ } + public typealias T = UInt8 + + public init?(value: T) { + self.init(rawValue: value) + } + + public static var byteSize: Int { return MemoryLayout.size } + public var value: UInt8 { return self.rawValue } + case none_ = 0 + case null = 1 + case int = 2 + case floatingpoint = 3 + case binary = 4 + case utf8 = 5 + case bool = 6 + case decimal = 7 + case date = 8 + case time = 9 + case timestamp = 10 + case interval = 11 + case list = 12 + case struct_ = 13 + case union = 14 + case fixedsizebinary = 15 + case fixedsizelist = 16 + case map = 17 + case duration = 18 + case largebinary = 19 + case largeutf8 = 20 + case largelist = 21 + case runendencoded = 22 + + public static var max: org_apache_arrow_flatbuf_Type_ { return .runendencoded } + public static var min: org_apache_arrow_flatbuf_Type_ { return .none_ } } @@ -194,27 +194,27 @@ public enum org_apache_arrow_flatbuf_Type_: UInt8, UnionEnum { /// Dictionaries might be explicit maps between integers and values /// allowing for non-contiguous index values public enum org_apache_arrow_flatbuf_DictionaryKind: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case densearray = 0 + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case densearray = 0 - public static var max: org_apache_arrow_flatbuf_DictionaryKind { return .densearray } - public static var min: org_apache_arrow_flatbuf_DictionaryKind { return .densearray } + public static var max: org_apache_arrow_flatbuf_DictionaryKind { return .densearray } + public static var min: org_apache_arrow_flatbuf_DictionaryKind { return .densearray } } /// ---------------------------------------------------------------------- /// Endianness of the platform producing the data public enum org_apache_arrow_flatbuf_Endianness: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case little = 0 - case big = 1 - - public static var max: org_apache_arrow_flatbuf_Endianness { return .big } - public static var min: org_apache_arrow_flatbuf_Endianness { return .little } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case little = 0 + case big = 1 + + public static var max: org_apache_arrow_flatbuf_Endianness { return .big } + public static var min: org_apache_arrow_flatbuf_Endianness { return .little } } @@ -222,82 +222,82 @@ public enum org_apache_arrow_flatbuf_Endianness: Int16, Enum, Verifiable { /// A Buffer represents a single contiguous memory segment public struct org_apache_arrow_flatbuf_Buffer: NativeStruct, Verifiable, FlatbuffersInitializable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - - /// The relative offset into the shared memory page where the bytes for this - /// buffer starts - private var _offset: Int64 - /// The absolute length (in bytes) of the memory buffer. The memory is found - /// from offset (inclusive) to offset + length (non-inclusive). When building - /// messages using the encapsulated IPC message, padding bytes may be written - /// after a buffer, but such padding bytes do not need to be accounted for in - /// the size here. - private var _length: Int64 - - public init(_ bb: ByteBuffer, o: Int32) { - let _accessor = Struct(bb: bb, position: o) - _offset = _accessor.readBuffer(of: Int64.self, at: 0) - _length = _accessor.readBuffer(of: Int64.self, at: 8) - } - - public init(offset: Int64, length: Int64) { - _offset = offset - _length = length - } - - public init() { - _offset = 0 - _length = 0 - } - - /// The relative offset into the shared memory page where the bytes for this - /// buffer starts - public var offset: Int64 { _offset } - /// The absolute length (in bytes) of the memory buffer. The memory is found - /// from offset (inclusive) to offset + length (non-inclusive). When building - /// messages using the encapsulated IPC message, padding bytes may be written - /// after a buffer, but such padding bytes do not need to be accounted for in - /// the size here. - public var length: Int64 { _length } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_Buffer.self) - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + + /// The relative offset into the shared memory page where the bytes for this + /// buffer starts + private var _offset: Int64 + /// The absolute length (in bytes) of the memory buffer. The memory is found + /// from offset (inclusive) to offset + length (non-inclusive). When building + /// messages using the encapsulated IPC message, padding bytes may be written + /// after a buffer, but such padding bytes do not need to be accounted for in + /// the size here. + private var _length: Int64 + + public init(_ bb: ByteBuffer, o: Int32) { + let _accessor = Struct(bb: bb, position: o) + _offset = _accessor.readBuffer(of: Int64.self, at: 0) + _length = _accessor.readBuffer(of: Int64.self, at: 8) + } + + public init(offset: Int64, length: Int64) { + _offset = offset + _length = length + } + + public init() { + _offset = 0 + _length = 0 + } + + /// The relative offset into the shared memory page where the bytes for this + /// buffer starts + public var offset: Int64 { _offset } + /// The absolute length (in bytes) of the memory buffer. The memory is found + /// from offset (inclusive) to offset + length (non-inclusive). When building + /// messages using the encapsulated IPC message, padding bytes may be written + /// after a buffer, but such padding bytes do not need to be accounted for in + /// the size here. + public var length: Int64 { _length } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + try verifier.inBuffer(position: position, of: org_apache_arrow_flatbuf_Buffer.self) + } } /// ---------------------------------------------------------------------- /// A Buffer represents a single contiguous memory segment public struct org_apache_arrow_flatbuf_Buffer_Mutable: FlatBufferObject { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Struct + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Struct - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Struct(bb: bb, position: o) } - public var offset: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } - public var length: Int64 { return _accessor.readBuffer(of: Int64.self, at: 8) } + public var offset: Int64 { return _accessor.readBuffer(of: Int64.self, at: 0) } + public var length: Int64 { return _accessor.readBuffer(of: Int64.self, at: 8) } } /// These are stored in the flatbuffer in the Type union below public struct org_apache_arrow_flatbuf_Null: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsNull(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Null { return org_apache_arrow_flatbuf_Null(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsNull(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Null { return org_apache_arrow_flatbuf_Null(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startNull(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endNull(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startNull(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endNull(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct @@ -305,118 +305,118 @@ public struct org_apache_arrow_flatbuf_Null: FlatBufferObject, Verifiable { /// Struct is a reserved word in Flatbuffers public struct org_apache_arrow_flatbuf_Struct_: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsStruct_(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Struct_ { return org_apache_arrow_flatbuf_Struct_(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsStruct_(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Struct_ { return org_apache_arrow_flatbuf_Struct_(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startStruct_(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endStruct_(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startStruct_(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endStruct_(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } public struct org_apache_arrow_flatbuf_List: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_List { return org_apache_arrow_flatbuf_List(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_List { return org_apache_arrow_flatbuf_List(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Same as List, but with 64-bit offsets, allowing to represent /// extremely large data values. public struct org_apache_arrow_flatbuf_LargeList: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsLargeList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeList { return org_apache_arrow_flatbuf_LargeList(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsLargeList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeList { return org_apache_arrow_flatbuf_LargeList(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startLargeList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endLargeList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startLargeList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endLargeList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } public struct org_apache_arrow_flatbuf_FixedSizeList: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsFixedSizeList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FixedSizeList { return org_apache_arrow_flatbuf_FixedSizeList(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case listSize = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Number of list items per value - public var listSize: Int32 { let o = _accessor.offset(VTOFFSET.listSize.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } - public static func startFixedSizeList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(listSize: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: listSize, def: 0, at: VTOFFSET.listSize.p) } - public static func endFixedSizeList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createFixedSizeList( - _ fbb: inout FlatBufferBuilder, - listSize: Int32 = 0 - ) -> Offset { - let __start = org_apache_arrow_flatbuf_FixedSizeList.startFixedSizeList(&fbb) - org_apache_arrow_flatbuf_FixedSizeList.add(listSize: listSize, &fbb) - return org_apache_arrow_flatbuf_FixedSizeList.endFixedSizeList(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.listSize.p, fieldName: "listSize", required: false, type: Int32.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsFixedSizeList(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FixedSizeList { return org_apache_arrow_flatbuf_FixedSizeList(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case listSize = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Number of list items per value + public var listSize: Int32 { let o = _accessor.offset(VTOFFSET.listSize.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } + public static func startFixedSizeList(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(listSize: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: listSize, def: 0, at: VTOFFSET.listSize.p) } + public static func endFixedSizeList(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createFixedSizeList( + _ fbb: inout FlatBufferBuilder, + listSize: Int32 = 0 + ) -> Offset { + let __start = org_apache_arrow_flatbuf_FixedSizeList.startFixedSizeList(&fbb) + org_apache_arrow_flatbuf_FixedSizeList.add(listSize: listSize, &fbb) + return org_apache_arrow_flatbuf_FixedSizeList.endFixedSizeList(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.listSize.p, fieldName: "listSize", required: false, type: Int32.self) + _v.finish() + } } /// A Map is a logical nested type that is represented as -/// +/// /// List> -/// +/// /// In this layout, the keys and values are each respectively contiguous. We do /// not constrain the key and value types, so the application is responsible /// for ensuring that the keys are hashable and unique. Whether the keys are sorted /// may be set in the metadata for this field. -/// +/// /// In a field with Map type, the field has a child Struct field, which then /// has two children: key type and the second the value type. The names of the /// child fields may be respectively "entries", "key", and "value", but this is /// not enforced. -/// +/// /// Map /// ```text /// - child[0] entries: Struct @@ -424,47 +424,47 @@ public struct org_apache_arrow_flatbuf_FixedSizeList: FlatBufferObject, Verifiab /// - child[1] value: V /// ``` /// Neither the "entries" field nor the "key" field may be nullable. -/// +/// /// The metadata is structured so that Arrow systems without special handling /// for Map can make Map an alias for List. The "layout" attribute for the Map /// field must have the same contents as a List. public struct org_apache_arrow_flatbuf_Map: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsMap(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Map { return org_apache_arrow_flatbuf_Map(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case keysSorted = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Set to true if the keys within each value are sorted - public var keysSorted: Bool { let o = _accessor.offset(VTOFFSET.keysSorted.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public static func startMap(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(keysSorted: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: keysSorted, def: false, - at: VTOFFSET.keysSorted.p) } - public static func endMap(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createMap( - _ fbb: inout FlatBufferBuilder, - keysSorted: Bool = false - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Map.startMap(&fbb) - org_apache_arrow_flatbuf_Map.add(keysSorted: keysSorted, &fbb) - return org_apache_arrow_flatbuf_Map.endMap(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.keysSorted.p, fieldName: "keysSorted", required: false, type: Bool.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsMap(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Map { return org_apache_arrow_flatbuf_Map(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case keysSorted = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Set to true if the keys within each value are sorted + public var keysSorted: Bool { let o = _accessor.offset(VTOFFSET.keysSorted.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public static func startMap(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(keysSorted: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: keysSorted, def: false, + at: VTOFFSET.keysSorted.p) } + public static func endMap(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createMap( + _ fbb: inout FlatBufferBuilder, + keysSorted: Bool = false + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Map.startMap(&fbb) + org_apache_arrow_flatbuf_Map.add(keysSorted: keysSorted, &fbb) + return org_apache_arrow_flatbuf_Map.endMap(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.keysSorted.p, fieldName: "keysSorted", required: false, type: Bool.self) + _v.finish() + } } /// A union is a complex type with children in Field @@ -473,298 +473,298 @@ public struct org_apache_arrow_flatbuf_Map: FlatBufferObject, Verifiable { /// for each child `typeIds[offset]` is the id used in the type vector public struct org_apache_arrow_flatbuf_Union: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsUnion(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Union { return org_apache_arrow_flatbuf_Union(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case mode = 4 - case typeIds = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var mode: org_apache_arrow_flatbuf_UnionMode { let o = _accessor.offset(VTOFFSET.mode.v); return o == 0 ? .sparse : org_apache_arrow_flatbuf_UnionMode(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .sparse } - public var hasTypeIds: Bool { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? false : true } - public var typeIdsCount: Int32 { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func typeIds(at index: Int32) -> Int32 { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? 0 : _accessor.directRead(of: Int32.self, offset: _accessor.vector(at: o) + index * 4) } - public var typeIds: [Int32] { return _accessor.getVector(at: VTOFFSET.typeIds.v) ?? [] } - public static func startUnion(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(mode: org_apache_arrow_flatbuf_UnionMode, _ fbb: inout FlatBufferBuilder) { fbb.add(element: mode.rawValue, def: 0, at: VTOFFSET.mode.p) } - public static func addVectorOf(typeIds: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: typeIds, at: VTOFFSET.typeIds.p) } - public static func endUnion(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createUnion( - _ fbb: inout FlatBufferBuilder, - mode: org_apache_arrow_flatbuf_UnionMode = .sparse, - typeIdsVectorOffset typeIds: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Union.startUnion(&fbb) - org_apache_arrow_flatbuf_Union.add(mode: mode, &fbb) - org_apache_arrow_flatbuf_Union.addVectorOf(typeIds: typeIds, &fbb) - return org_apache_arrow_flatbuf_Union.endUnion(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.mode.p, fieldName: "mode", required: false, type: org_apache_arrow_flatbuf_UnionMode.self) - try _v.visit(field: VTOFFSET.typeIds.p, fieldName: "typeIds", required: false, type: ForwardOffset>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsUnion(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Union { return org_apache_arrow_flatbuf_Union(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case mode = 4 + case typeIds = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var mode: org_apache_arrow_flatbuf_UnionMode { let o = _accessor.offset(VTOFFSET.mode.v); return o == 0 ? .sparse : org_apache_arrow_flatbuf_UnionMode(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .sparse } + public var hasTypeIds: Bool { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? false : true } + public var typeIdsCount: Int32 { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func typeIds(at index: Int32) -> Int32 { let o = _accessor.offset(VTOFFSET.typeIds.v); return o == 0 ? 0 : _accessor.directRead(of: Int32.self, offset: _accessor.vector(at: o) + index * 4) } + public var typeIds: [Int32] { return _accessor.getVector(at: VTOFFSET.typeIds.v) ?? [] } + public static func startUnion(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(mode: org_apache_arrow_flatbuf_UnionMode, _ fbb: inout FlatBufferBuilder) { fbb.add(element: mode.rawValue, def: 0, at: VTOFFSET.mode.p) } + public static func addVectorOf(typeIds: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: typeIds, at: VTOFFSET.typeIds.p) } + public static func endUnion(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createUnion( + _ fbb: inout FlatBufferBuilder, + mode: org_apache_arrow_flatbuf_UnionMode = .sparse, + typeIdsVectorOffset typeIds: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Union.startUnion(&fbb) + org_apache_arrow_flatbuf_Union.add(mode: mode, &fbb) + org_apache_arrow_flatbuf_Union.addVectorOf(typeIds: typeIds, &fbb) + return org_apache_arrow_flatbuf_Union.endUnion(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.mode.p, fieldName: "mode", required: false, type: org_apache_arrow_flatbuf_UnionMode.self) + try _v.visit(field: VTOFFSET.typeIds.p, fieldName: "typeIds", required: false, type: ForwardOffset>.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Int: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsInt(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Int { return org_apache_arrow_flatbuf_Int(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case bitWidth = 4 - case isSigned = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } - public var isSigned: Bool { let o = _accessor.offset(VTOFFSET.isSigned.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public static func startInt(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 0, at: VTOFFSET.bitWidth.p) } - public static func add(isSigned: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isSigned, def: false, - at: VTOFFSET.isSigned.p) } - public static func endInt(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createInt( - _ fbb: inout FlatBufferBuilder, - bitWidth: Int32 = 0, - isSigned: Bool = false - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Int.startInt(&fbb) - org_apache_arrow_flatbuf_Int.add(bitWidth: bitWidth, &fbb) - org_apache_arrow_flatbuf_Int.add(isSigned: isSigned, &fbb) - return org_apache_arrow_flatbuf_Int.endInt(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) - try _v.visit(field: VTOFFSET.isSigned.p, fieldName: "isSigned", required: false, type: Bool.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsInt(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Int { return org_apache_arrow_flatbuf_Int(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case bitWidth = 4 + case isSigned = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } + public var isSigned: Bool { let o = _accessor.offset(VTOFFSET.isSigned.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public static func startInt(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 0, at: VTOFFSET.bitWidth.p) } + public static func add(isSigned: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isSigned, def: false, + at: VTOFFSET.isSigned.p) } + public static func endInt(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createInt( + _ fbb: inout FlatBufferBuilder, + bitWidth: Int32 = 0, + isSigned: Bool = false + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Int.startInt(&fbb) + org_apache_arrow_flatbuf_Int.add(bitWidth: bitWidth, &fbb) + org_apache_arrow_flatbuf_Int.add(isSigned: isSigned, &fbb) + return org_apache_arrow_flatbuf_Int.endInt(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) + try _v.visit(field: VTOFFSET.isSigned.p, fieldName: "isSigned", required: false, type: Bool.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_FloatingPoint: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsFloatingPoint(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FloatingPoint { return org_apache_arrow_flatbuf_FloatingPoint(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case precision = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var precision: org_apache_arrow_flatbuf_Precision { let o = _accessor.offset(VTOFFSET.precision.v); return o == 0 ? .half : org_apache_arrow_flatbuf_Precision(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .half } - public static func startFloatingPoint(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(precision: org_apache_arrow_flatbuf_Precision, _ fbb: inout FlatBufferBuilder) { fbb.add(element: precision.rawValue, def: 0, at: VTOFFSET.precision.p) } - public static func endFloatingPoint(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createFloatingPoint( - _ fbb: inout FlatBufferBuilder, - precision: org_apache_arrow_flatbuf_Precision = .half - ) -> Offset { - let __start = org_apache_arrow_flatbuf_FloatingPoint.startFloatingPoint(&fbb) - org_apache_arrow_flatbuf_FloatingPoint.add(precision: precision, &fbb) - return org_apache_arrow_flatbuf_FloatingPoint.endFloatingPoint(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.precision.p, fieldName: "precision", required: false, type: org_apache_arrow_flatbuf_Precision.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsFloatingPoint(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FloatingPoint { return org_apache_arrow_flatbuf_FloatingPoint(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case precision = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var precision: org_apache_arrow_flatbuf_Precision { let o = _accessor.offset(VTOFFSET.precision.v); return o == 0 ? .half : org_apache_arrow_flatbuf_Precision(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .half } + public static func startFloatingPoint(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(precision: org_apache_arrow_flatbuf_Precision, _ fbb: inout FlatBufferBuilder) { fbb.add(element: precision.rawValue, def: 0, at: VTOFFSET.precision.p) } + public static func endFloatingPoint(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createFloatingPoint( + _ fbb: inout FlatBufferBuilder, + precision: org_apache_arrow_flatbuf_Precision = .half + ) -> Offset { + let __start = org_apache_arrow_flatbuf_FloatingPoint.startFloatingPoint(&fbb) + org_apache_arrow_flatbuf_FloatingPoint.add(precision: precision, &fbb) + return org_apache_arrow_flatbuf_FloatingPoint.endFloatingPoint(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.precision.p, fieldName: "precision", required: false, type: org_apache_arrow_flatbuf_Precision.self) + _v.finish() + } } /// Unicode with UTF-8 encoding public struct org_apache_arrow_flatbuf_Utf8: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsUtf8(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Utf8 { return org_apache_arrow_flatbuf_Utf8(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsUtf8(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Utf8 { return org_apache_arrow_flatbuf_Utf8(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startUtf8(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endUtf8(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startUtf8(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endUtf8(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Opaque binary data public struct org_apache_arrow_flatbuf_Binary: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Binary { return org_apache_arrow_flatbuf_Binary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Binary { return org_apache_arrow_flatbuf_Binary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Same as Utf8, but with 64-bit offsets, allowing to represent /// extremely large data values. public struct org_apache_arrow_flatbuf_LargeUtf8: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsLargeUtf8(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeUtf8 { return org_apache_arrow_flatbuf_LargeUtf8(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsLargeUtf8(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeUtf8 { return org_apache_arrow_flatbuf_LargeUtf8(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startLargeUtf8(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endLargeUtf8(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startLargeUtf8(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endLargeUtf8(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Same as Binary, but with 64-bit offsets, allowing to represent /// extremely large data values. public struct org_apache_arrow_flatbuf_LargeBinary: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsLargeBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeBinary { return org_apache_arrow_flatbuf_LargeBinary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsLargeBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_LargeBinary { return org_apache_arrow_flatbuf_LargeBinary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startLargeBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endLargeBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startLargeBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endLargeBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } public struct org_apache_arrow_flatbuf_FixedSizeBinary: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsFixedSizeBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FixedSizeBinary { return org_apache_arrow_flatbuf_FixedSizeBinary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case byteWidth = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Number of bytes per value - public var byteWidth: Int32 { let o = _accessor.offset(VTOFFSET.byteWidth.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } - public static func startFixedSizeBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(byteWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: byteWidth, def: 0, at: VTOFFSET.byteWidth.p) } - public static func endFixedSizeBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createFixedSizeBinary( - _ fbb: inout FlatBufferBuilder, - byteWidth: Int32 = 0 - ) -> Offset { - let __start = org_apache_arrow_flatbuf_FixedSizeBinary.startFixedSizeBinary(&fbb) - org_apache_arrow_flatbuf_FixedSizeBinary.add(byteWidth: byteWidth, &fbb) - return org_apache_arrow_flatbuf_FixedSizeBinary.endFixedSizeBinary(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.byteWidth.p, fieldName: "byteWidth", required: false, type: Int32.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsFixedSizeBinary(bb: ByteBuffer) -> org_apache_arrow_flatbuf_FixedSizeBinary { return org_apache_arrow_flatbuf_FixedSizeBinary(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case byteWidth = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Number of bytes per value + public var byteWidth: Int32 { let o = _accessor.offset(VTOFFSET.byteWidth.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } + public static func startFixedSizeBinary(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(byteWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: byteWidth, def: 0, at: VTOFFSET.byteWidth.p) } + public static func endFixedSizeBinary(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createFixedSizeBinary( + _ fbb: inout FlatBufferBuilder, + byteWidth: Int32 = 0 + ) -> Offset { + let __start = org_apache_arrow_flatbuf_FixedSizeBinary.startFixedSizeBinary(&fbb) + org_apache_arrow_flatbuf_FixedSizeBinary.add(byteWidth: byteWidth, &fbb) + return org_apache_arrow_flatbuf_FixedSizeBinary.endFixedSizeBinary(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.byteWidth.p, fieldName: "byteWidth", required: false, type: Int32.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Bool: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsBool(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Bool { return org_apache_arrow_flatbuf_Bool(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsBool(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Bool { return org_apache_arrow_flatbuf_Bool(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startBool(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endBool(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startBool(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endBool(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Contains two child arrays, run_ends and values. /// The run_ends child array must be a 16/32/64-bit integer array -/// which encodes the indices at which the run with the value in +/// which encodes the indices at which the run with the value in /// each corresponding index in the values child array ends. /// Like list/struct types, the value array can be of any type. public struct org_apache_arrow_flatbuf_RunEndEncoded: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table - public static func getRootAsRunEndEncoded(bb: ByteBuffer) -> org_apache_arrow_flatbuf_RunEndEncoded { return org_apache_arrow_flatbuf_RunEndEncoded(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + public static func getRootAsRunEndEncoded(bb: ByteBuffer) -> org_apache_arrow_flatbuf_RunEndEncoded { return org_apache_arrow_flatbuf_RunEndEncoded(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - public static func startRunEndEncoded(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } - public static func endRunEndEncoded(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func startRunEndEncoded(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 0) } + public static func endRunEndEncoded(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - _v.finish() - } + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + _v.finish() + } } /// Exact decimal value represented as an integer value in two's @@ -773,108 +773,108 @@ public struct org_apache_arrow_flatbuf_RunEndEncoded: FlatBufferObject, Verifiab /// in the Schema. public struct org_apache_arrow_flatbuf_Decimal: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsDecimal(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Decimal { return org_apache_arrow_flatbuf_Decimal(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case precision = 4 - case scale = 6 - case bitWidth = 8 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Total number of decimal digits - public var precision: Int32 { let o = _accessor.offset(VTOFFSET.precision.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } - /// Number of digits after the decimal point "." - public var scale: Int32 { let o = _accessor.offset(VTOFFSET.scale.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } - /// Number of bits per value. The only accepted widths are 128 and 256. - /// We use bitWidth for consistency with Int::bitWidth. - public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 128 : _accessor.readBuffer(of: Int32.self, at: o) } - public static func startDecimal(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 3) } - public static func add(precision: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: precision, def: 0, at: VTOFFSET.precision.p) } - public static func add(scale: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: scale, def: 0, at: VTOFFSET.scale.p) } - public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 128, at: VTOFFSET.bitWidth.p) } - public static func endDecimal(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createDecimal( - _ fbb: inout FlatBufferBuilder, - precision: Int32 = 0, - scale: Int32 = 0, - bitWidth: Int32 = 128 - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Decimal.startDecimal(&fbb) - org_apache_arrow_flatbuf_Decimal.add(precision: precision, &fbb) - org_apache_arrow_flatbuf_Decimal.add(scale: scale, &fbb) - org_apache_arrow_flatbuf_Decimal.add(bitWidth: bitWidth, &fbb) - return org_apache_arrow_flatbuf_Decimal.endDecimal(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.precision.p, fieldName: "precision", required: false, type: Int32.self) - try _v.visit(field: VTOFFSET.scale.p, fieldName: "scale", required: false, type: Int32.self) - try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsDecimal(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Decimal { return org_apache_arrow_flatbuf_Decimal(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case precision = 4 + case scale = 6 + case bitWidth = 8 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Total number of decimal digits + public var precision: Int32 { let o = _accessor.offset(VTOFFSET.precision.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } + /// Number of digits after the decimal point "." + public var scale: Int32 { let o = _accessor.offset(VTOFFSET.scale.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) } + /// Number of bits per value. The only accepted widths are 128 and 256. + /// We use bitWidth for consistency with Int::bitWidth. + public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 128 : _accessor.readBuffer(of: Int32.self, at: o) } + public static func startDecimal(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 3) } + public static func add(precision: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: precision, def: 0, at: VTOFFSET.precision.p) } + public static func add(scale: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: scale, def: 0, at: VTOFFSET.scale.p) } + public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 128, at: VTOFFSET.bitWidth.p) } + public static func endDecimal(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createDecimal( + _ fbb: inout FlatBufferBuilder, + precision: Int32 = 0, + scale: Int32 = 0, + bitWidth: Int32 = 128 + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Decimal.startDecimal(&fbb) + org_apache_arrow_flatbuf_Decimal.add(precision: precision, &fbb) + org_apache_arrow_flatbuf_Decimal.add(scale: scale, &fbb) + org_apache_arrow_flatbuf_Decimal.add(bitWidth: bitWidth, &fbb) + return org_apache_arrow_flatbuf_Decimal.endDecimal(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.precision.p, fieldName: "precision", required: false, type: Int32.self) + try _v.visit(field: VTOFFSET.scale.p, fieldName: "scale", required: false, type: Int32.self) + try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) + _v.finish() + } } /// Date is either a 32-bit or 64-bit signed integer type representing an /// elapsed time since UNIX epoch (1970-01-01), stored in either of two units: -/// +/// /// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no /// leap seconds), where the values are evenly divisible by 86400000 /// * Days (32 bits) since the UNIX epoch public struct org_apache_arrow_flatbuf_Date: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsDate(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Date { return org_apache_arrow_flatbuf_Date(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case unit = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var unit: org_apache_arrow_flatbuf_DateUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_DateUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } - public static func startDate(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(unit: org_apache_arrow_flatbuf_DateUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } - public static func endDate(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createDate( - _ fbb: inout FlatBufferBuilder, - unit: org_apache_arrow_flatbuf_DateUnit = .millisecond - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Date.startDate(&fbb) - org_apache_arrow_flatbuf_Date.add(unit: unit, &fbb) - return org_apache_arrow_flatbuf_Date.endDate(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_DateUnit.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsDate(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Date { return org_apache_arrow_flatbuf_Date(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case unit = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var unit: org_apache_arrow_flatbuf_DateUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_DateUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } + public static func startDate(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(unit: org_apache_arrow_flatbuf_DateUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } + public static func endDate(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createDate( + _ fbb: inout FlatBufferBuilder, + unit: org_apache_arrow_flatbuf_DateUnit = .millisecond + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Date.startDate(&fbb) + org_apache_arrow_flatbuf_Date.add(unit: unit, &fbb) + return org_apache_arrow_flatbuf_Date.endDate(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_DateUnit.self) + _v.finish() + } } /// Time is either a 32-bit or 64-bit signed integer type representing an /// elapsed time since midnight, stored in either of four units: seconds, /// milliseconds, microseconds or nanoseconds. -/// +/// /// The integer `bitWidth` depends on the `unit` and must be one of the following: /// * SECOND and MILLISECOND: 32 bits /// * MICROSECOND and NANOSECOND: 64 bits -/// +/// /// The allowed values are between 0 (inclusive) and 86400 (=24*60*60) seconds /// (exclusive), adjusted for the time unit (for example, up to 86400000 /// exclusive for the MILLISECOND unit). @@ -883,102 +883,102 @@ public struct org_apache_arrow_flatbuf_Date: FlatBufferObject, Verifiable { /// into Arrow (for example by replacing the value 86400 with 86399). public struct org_apache_arrow_flatbuf_Time: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsTime(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Time { return org_apache_arrow_flatbuf_Time(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case unit = 4 - case bitWidth = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } - public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 32 : _accessor.readBuffer(of: Int32.self, at: o) } - public static func startTime(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } - public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 32, at: VTOFFSET.bitWidth.p) } - public static func endTime(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createTime( - _ fbb: inout FlatBufferBuilder, - unit: org_apache_arrow_flatbuf_TimeUnit = .millisecond, - bitWidth: Int32 = 32 - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Time.startTime(&fbb) - org_apache_arrow_flatbuf_Time.add(unit: unit, &fbb) - org_apache_arrow_flatbuf_Time.add(bitWidth: bitWidth, &fbb) - return org_apache_arrow_flatbuf_Time.endTime(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) - try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsTime(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Time { return org_apache_arrow_flatbuf_Time(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case unit = 4 + case bitWidth = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } + public var bitWidth: Int32 { let o = _accessor.offset(VTOFFSET.bitWidth.v); return o == 0 ? 32 : _accessor.readBuffer(of: Int32.self, at: o) } + public static func startTime(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } + public static func add(bitWidth: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: bitWidth, def: 32, at: VTOFFSET.bitWidth.p) } + public static func endTime(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createTime( + _ fbb: inout FlatBufferBuilder, + unit: org_apache_arrow_flatbuf_TimeUnit = .millisecond, + bitWidth: Int32 = 32 + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Time.startTime(&fbb) + org_apache_arrow_flatbuf_Time.add(unit: unit, &fbb) + org_apache_arrow_flatbuf_Time.add(bitWidth: bitWidth, &fbb) + return org_apache_arrow_flatbuf_Time.endTime(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) + try _v.visit(field: VTOFFSET.bitWidth.p, fieldName: "bitWidth", required: false, type: Int32.self) + _v.finish() + } } /// Timestamp is a 64-bit signed integer representing an elapsed time since a /// fixed epoch, stored in either of four units: seconds, milliseconds, /// microseconds or nanoseconds, and is optionally annotated with a timezone. -/// +/// /// Timestamp values do not include any leap seconds (in other words, all /// days are considered 86400 seconds long). -/// +/// /// Timestamps with a non-empty timezone /// ------------------------------------ -/// +/// /// If a Timestamp column has a non-empty timezone value, its epoch is /// 1970-01-01 00:00:00 (January 1st 1970, midnight) in the *UTC* timezone /// (the Unix epoch), regardless of the Timestamp's own timezone. -/// +/// /// Therefore, timestamp values with a non-empty timezone correspond to /// physical points in time together with some additional information about /// how the data was obtained and/or how to display it (the timezone). -/// +/// /// For example, the timestamp value 0 with the timezone string "Europe/Paris" /// corresponds to "January 1st 1970, 00h00" in the UTC timezone, but the /// application may prefer to display it as "January 1st 1970, 01h00" in /// the Europe/Paris timezone (which is the same physical point in time). -/// +/// /// One consequence is that timestamp values with a non-empty timezone /// can be compared and ordered directly, since they all share the same /// well-known point of reference (the Unix epoch). -/// +/// /// Timestamps with an unset / empty timezone /// ----------------------------------------- -/// +/// /// If a Timestamp column has no timezone value, its epoch is /// 1970-01-01 00:00:00 (January 1st 1970, midnight) in an *unknown* timezone. -/// +/// /// Therefore, timestamp values without a timezone cannot be meaningfully /// interpreted as physical points in time, but only as calendar / clock /// indications ("wall clock time") in an unspecified timezone. -/// +/// /// For example, the timestamp value 0 with an empty timezone string /// corresponds to "January 1st 1970, 00h00" in an unknown timezone: there /// is not enough information to interpret it as a well-defined physical /// point in time. -/// +/// /// One consequence is that timestamp values without a timezone cannot /// be reliably compared or ordered, since they may have different points of /// reference. In particular, it is *not* possible to interpret an unset /// or empty timezone as the same as "UTC". -/// +/// /// Conversion between timezones /// ---------------------------- -/// +/// /// If a Timestamp column has a non-empty timezone, changing the timezone /// to a different non-empty value is a metadata-only operation: /// the timestamp values need not change as their point of reference remains /// the same (the Unix epoch). -/// +/// /// However, if a Timestamp column has no timezone value, changing it to a /// non-empty value requires to think about the desired semantics. /// One possibility is to assume that the original timestamp values are @@ -987,37 +987,37 @@ public struct org_apache_arrow_flatbuf_Time: FlatBufferObject, Verifiable { /// empty to "Europe/Paris" would require converting the timestamp values /// from "Europe/Paris" to "UTC", which seems counter-intuitive but is /// nevertheless correct). -/// +/// /// Guidelines for encoding data from external libraries /// ---------------------------------------------------- -/// +/// /// Date & time libraries often have multiple different data types for temporal /// data. In order to ease interoperability between different implementations the /// Arrow project has some recommendations for encoding these types into a Timestamp /// column. -/// +/// /// An "instant" represents a physical point in time that has no relevant timezone /// (for example, astronomical data). To encode an instant, use a Timestamp with /// the timezone string set to "UTC", and make sure the Timestamp values /// are relative to the UTC epoch (January 1st 1970, midnight). -/// +/// /// A "zoned date-time" represents a physical point in time annotated with an /// informative timezone (for example, the timezone in which the data was /// recorded). To encode a zoned date-time, use a Timestamp with the timezone /// string set to the name of the timezone, and make sure the Timestamp values /// are relative to the UTC epoch (January 1st 1970, midnight). -/// +/// /// (There is some ambiguity between an instant and a zoned date-time with the /// UTC timezone. Both of these are stored the same in Arrow. Typically, /// this distinction does not matter. If it does, then an application should /// use custom metadata or an extension type to distinguish between the two cases.) -/// +/// /// An "offset date-time" represents a physical point in time combined with an /// explicit offset from UTC. To encode an offset date-time, use a Timestamp /// with the timezone string set to the numeric timezone offset string /// (e.g. "+03:00"), and make sure the Timestamp values are relative to /// the UTC epoch (January 1st 1970, midnight). -/// +/// /// A "naive date-time" (also called "local date-time" in some libraries) /// represents a wall clock time combined with a calendar date, but with /// no indication of how to map this information to a physical point in time. @@ -1031,130 +1031,130 @@ public struct org_apache_arrow_flatbuf_Time: FlatBufferObject, Verifiable { /// be encoded as timestamp value 0. public struct org_apache_arrow_flatbuf_Timestamp: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsTimestamp(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Timestamp { return org_apache_arrow_flatbuf_Timestamp(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case unit = 4 - case timezone = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .second : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .second } - /// The timezone is an optional string indicating the name of a timezone, - /// one of: - /// - /// * As used in the Olson timezone database (the "tz database" or - /// "tzdata"), such as "America/New_York". - /// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", - /// such as "+07:30". - /// - /// Whether a timezone string is present indicates different semantics about - /// the data (see above). - public var timezone: String? { let o = _accessor.offset(VTOFFSET.timezone.v); return o == 0 ? nil : _accessor.string(at: o) } - public var timezoneSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.timezone.v) } - public static func startTimestamp(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 0, at: VTOFFSET.unit.p) } - public static func add(timezone: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: timezone, at: VTOFFSET.timezone.p) } - public static func endTimestamp(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createTimestamp( - _ fbb: inout FlatBufferBuilder, - unit: org_apache_arrow_flatbuf_TimeUnit = .second, - timezoneOffset timezone: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Timestamp.startTimestamp(&fbb) - org_apache_arrow_flatbuf_Timestamp.add(unit: unit, &fbb) - org_apache_arrow_flatbuf_Timestamp.add(timezone: timezone, &fbb) - return org_apache_arrow_flatbuf_Timestamp.endTimestamp(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) - try _v.visit(field: VTOFFSET.timezone.p, fieldName: "timezone", required: false, type: ForwardOffset.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsTimestamp(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Timestamp { return org_apache_arrow_flatbuf_Timestamp(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case unit = 4 + case timezone = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .second : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .second } + /// The timezone is an optional string indicating the name of a timezone, + /// one of: + /// + /// * As used in the Olson timezone database (the "tz database" or + /// "tzdata"), such as "America/New_York". + /// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", + /// such as "+07:30". + /// + /// Whether a timezone string is present indicates different semantics about + /// the data (see above). + public var timezone: String? { let o = _accessor.offset(VTOFFSET.timezone.v); return o == 0 ? nil : _accessor.string(at: o) } + public var timezoneSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.timezone.v) } + public static func startTimestamp(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 0, at: VTOFFSET.unit.p) } + public static func add(timezone: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: timezone, at: VTOFFSET.timezone.p) } + public static func endTimestamp(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createTimestamp( + _ fbb: inout FlatBufferBuilder, + unit: org_apache_arrow_flatbuf_TimeUnit = .second, + timezoneOffset timezone: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Timestamp.startTimestamp(&fbb) + org_apache_arrow_flatbuf_Timestamp.add(unit: unit, &fbb) + org_apache_arrow_flatbuf_Timestamp.add(timezone: timezone, &fbb) + return org_apache_arrow_flatbuf_Timestamp.endTimestamp(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) + try _v.visit(field: VTOFFSET.timezone.p, fieldName: "timezone", required: false, type: ForwardOffset.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Interval: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsInterval(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Interval { return org_apache_arrow_flatbuf_Interval(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case unit = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var unit: org_apache_arrow_flatbuf_IntervalUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .yearMonth : org_apache_arrow_flatbuf_IntervalUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .yearMonth } - public static func startInterval(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(unit: org_apache_arrow_flatbuf_IntervalUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 0, at: VTOFFSET.unit.p) } - public static func endInterval(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createInterval( - _ fbb: inout FlatBufferBuilder, - unit: org_apache_arrow_flatbuf_IntervalUnit = .yearMonth - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Interval.startInterval(&fbb) - org_apache_arrow_flatbuf_Interval.add(unit: unit, &fbb) - return org_apache_arrow_flatbuf_Interval.endInterval(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_IntervalUnit.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsInterval(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Interval { return org_apache_arrow_flatbuf_Interval(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case unit = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var unit: org_apache_arrow_flatbuf_IntervalUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .yearMonth : org_apache_arrow_flatbuf_IntervalUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .yearMonth } + public static func startInterval(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(unit: org_apache_arrow_flatbuf_IntervalUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 0, at: VTOFFSET.unit.p) } + public static func endInterval(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createInterval( + _ fbb: inout FlatBufferBuilder, + unit: org_apache_arrow_flatbuf_IntervalUnit = .yearMonth + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Interval.startInterval(&fbb) + org_apache_arrow_flatbuf_Interval.add(unit: unit, &fbb) + return org_apache_arrow_flatbuf_Interval.endInterval(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_IntervalUnit.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Duration: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsDuration(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Duration { return org_apache_arrow_flatbuf_Duration(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case unit = 4 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } - public static func startDuration(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } - public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } - public static func endDuration(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createDuration( - _ fbb: inout FlatBufferBuilder, - unit: org_apache_arrow_flatbuf_TimeUnit = .millisecond - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Duration.startDuration(&fbb) - org_apache_arrow_flatbuf_Duration.add(unit: unit, &fbb) - return org_apache_arrow_flatbuf_Duration.endDuration(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsDuration(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Duration { return org_apache_arrow_flatbuf_Duration(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case unit = 4 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var unit: org_apache_arrow_flatbuf_TimeUnit { let o = _accessor.offset(VTOFFSET.unit.v); return o == 0 ? .millisecond : org_apache_arrow_flatbuf_TimeUnit(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .millisecond } + public static func startDuration(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 1) } + public static func add(unit: org_apache_arrow_flatbuf_TimeUnit, _ fbb: inout FlatBufferBuilder) { fbb.add(element: unit.rawValue, def: 1, at: VTOFFSET.unit.p) } + public static func endDuration(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createDuration( + _ fbb: inout FlatBufferBuilder, + unit: org_apache_arrow_flatbuf_TimeUnit = .millisecond + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Duration.startDuration(&fbb) + org_apache_arrow_flatbuf_Duration.add(unit: unit, &fbb) + return org_apache_arrow_flatbuf_Duration.endDuration(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.unit.p, fieldName: "unit", required: false, type: org_apache_arrow_flatbuf_TimeUnit.self) + _v.finish() + } } /// ---------------------------------------------------------------------- @@ -1162,115 +1162,115 @@ public struct org_apache_arrow_flatbuf_Duration: FlatBufferObject, Verifiable { /// key namespacing is the responsibility of the user public struct org_apache_arrow_flatbuf_KeyValue: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsKeyValue(bb: ByteBuffer) -> org_apache_arrow_flatbuf_KeyValue { return org_apache_arrow_flatbuf_KeyValue(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case key = 4 - case value = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var key: String? { let o = _accessor.offset(VTOFFSET.key.v); return o == 0 ? nil : _accessor.string(at: o) } - public var keySegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.key.v) } - public var value: String? { let o = _accessor.offset(VTOFFSET.value.v); return o == 0 ? nil : _accessor.string(at: o) } - public var valueSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.value.v) } - public static func startKeyValue(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(key: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: key, at: VTOFFSET.key.p) } - public static func add(value: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: value, at: VTOFFSET.value.p) } - public static func endKeyValue(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createKeyValue( - _ fbb: inout FlatBufferBuilder, - keyOffset key: Offset = Offset(), - valueOffset value: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_KeyValue.startKeyValue(&fbb) - org_apache_arrow_flatbuf_KeyValue.add(key: key, &fbb) - org_apache_arrow_flatbuf_KeyValue.add(value: value, &fbb) - return org_apache_arrow_flatbuf_KeyValue.endKeyValue(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.key.p, fieldName: "key", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.value.p, fieldName: "value", required: false, type: ForwardOffset.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsKeyValue(bb: ByteBuffer) -> org_apache_arrow_flatbuf_KeyValue { return org_apache_arrow_flatbuf_KeyValue(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case key = 4 + case value = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var key: String? { let o = _accessor.offset(VTOFFSET.key.v); return o == 0 ? nil : _accessor.string(at: o) } + public var keySegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.key.v) } + public var value: String? { let o = _accessor.offset(VTOFFSET.value.v); return o == 0 ? nil : _accessor.string(at: o) } + public var valueSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.value.v) } + public static func startKeyValue(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(key: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: key, at: VTOFFSET.key.p) } + public static func add(value: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: value, at: VTOFFSET.value.p) } + public static func endKeyValue(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createKeyValue( + _ fbb: inout FlatBufferBuilder, + keyOffset key: Offset = Offset(), + valueOffset value: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_KeyValue.startKeyValue(&fbb) + org_apache_arrow_flatbuf_KeyValue.add(key: key, &fbb) + org_apache_arrow_flatbuf_KeyValue.add(value: value, &fbb) + return org_apache_arrow_flatbuf_KeyValue.endKeyValue(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.key.p, fieldName: "key", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.value.p, fieldName: "value", required: false, type: ForwardOffset.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_DictionaryEncoding: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsDictionaryEncoding(bb: ByteBuffer) -> org_apache_arrow_flatbuf_DictionaryEncoding { return org_apache_arrow_flatbuf_DictionaryEncoding(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case id = 4 - case indexType = 6 - case isOrdered = 8 - case dictionaryKind = 10 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// The known dictionary id in the application where this data is used. In - /// the file or streaming formats, the dictionary ids are found in the - /// DictionaryBatch messages - public var id: Int64 { let o = _accessor.offset(VTOFFSET.id.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - /// The dictionary indices are constrained to be non-negative integers. If - /// this field is null, the indices must be signed int32. To maximize - /// cross-language compatibility and performance, implementations are - /// recommended to prefer signed integer types over unsigned integer types - /// and to avoid uint64 indices unless they are required by an application. - public var indexType: org_apache_arrow_flatbuf_Int? { let o = _accessor.offset(VTOFFSET.indexType.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// By default, dictionaries are not ordered, or the order does not have - /// semantic meaning. In some statistical, applications, dictionary-encoding - /// is used to represent ordered categorical data, and we provide a way to - /// preserve that metadata here - public var isOrdered: Bool { let o = _accessor.offset(VTOFFSET.isOrdered.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public var dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind { let o = _accessor.offset(VTOFFSET.dictionaryKind.v); return o == 0 ? .densearray : org_apache_arrow_flatbuf_DictionaryKind(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .densearray } - public static func startDictionaryEncoding(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } - public static func add(id: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: id, def: 0, at: VTOFFSET.id.p) } - public static func add(indexType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indexType, at: VTOFFSET.indexType.p) } - public static func add(isOrdered: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isOrdered, def: false, - at: VTOFFSET.isOrdered.p) } - public static func add(dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind, _ fbb: inout FlatBufferBuilder) { fbb.add(element: dictionaryKind.rawValue, def: 0, at: VTOFFSET.dictionaryKind.p) } - public static func endDictionaryEncoding(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createDictionaryEncoding( - _ fbb: inout FlatBufferBuilder, - id: Int64 = 0, - indexTypeOffset indexType: Offset = Offset(), - isOrdered: Bool = false, - dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind = .densearray - ) -> Offset { - let __start = org_apache_arrow_flatbuf_DictionaryEncoding.startDictionaryEncoding(&fbb) - org_apache_arrow_flatbuf_DictionaryEncoding.add(id: id, &fbb) - org_apache_arrow_flatbuf_DictionaryEncoding.add(indexType: indexType, &fbb) - org_apache_arrow_flatbuf_DictionaryEncoding.add(isOrdered: isOrdered, &fbb) - org_apache_arrow_flatbuf_DictionaryEncoding.add(dictionaryKind: dictionaryKind, &fbb) - return org_apache_arrow_flatbuf_DictionaryEncoding.endDictionaryEncoding(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.id.p, fieldName: "id", required: false, type: Int64.self) - try _v.visit(field: VTOFFSET.indexType.p, fieldName: "indexType", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.isOrdered.p, fieldName: "isOrdered", required: false, type: Bool.self) - try _v.visit(field: VTOFFSET.dictionaryKind.p, fieldName: "dictionaryKind", required: false, type: org_apache_arrow_flatbuf_DictionaryKind.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsDictionaryEncoding(bb: ByteBuffer) -> org_apache_arrow_flatbuf_DictionaryEncoding { return org_apache_arrow_flatbuf_DictionaryEncoding(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case id = 4 + case indexType = 6 + case isOrdered = 8 + case dictionaryKind = 10 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// The known dictionary id in the application where this data is used. In + /// the file or streaming formats, the dictionary ids are found in the + /// DictionaryBatch messages + public var id: Int64 { let o = _accessor.offset(VTOFFSET.id.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + /// The dictionary indices are constrained to be non-negative integers. If + /// this field is null, the indices must be signed int32. To maximize + /// cross-language compatibility and performance, implementations are + /// recommended to prefer signed integer types over unsigned integer types + /// and to avoid uint64 indices unless they are required by an application. + public var indexType: org_apache_arrow_flatbuf_Int? { let o = _accessor.offset(VTOFFSET.indexType.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// By default, dictionaries are not ordered, or the order does not have + /// semantic meaning. In some statistical, applications, dictionary-encoding + /// is used to represent ordered categorical data, and we provide a way to + /// preserve that metadata here + public var isOrdered: Bool { let o = _accessor.offset(VTOFFSET.isOrdered.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public var dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind { let o = _accessor.offset(VTOFFSET.dictionaryKind.v); return o == 0 ? .densearray : org_apache_arrow_flatbuf_DictionaryKind(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .densearray } + public static func startDictionaryEncoding(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } + public static func add(id: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: id, def: 0, at: VTOFFSET.id.p) } + public static func add(indexType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indexType, at: VTOFFSET.indexType.p) } + public static func add(isOrdered: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isOrdered, def: false, + at: VTOFFSET.isOrdered.p) } + public static func add(dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind, _ fbb: inout FlatBufferBuilder) { fbb.add(element: dictionaryKind.rawValue, def: 0, at: VTOFFSET.dictionaryKind.p) } + public static func endDictionaryEncoding(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createDictionaryEncoding( + _ fbb: inout FlatBufferBuilder, + id: Int64 = 0, + indexTypeOffset indexType: Offset = Offset(), + isOrdered: Bool = false, + dictionaryKind: org_apache_arrow_flatbuf_DictionaryKind = .densearray + ) -> Offset { + let __start = org_apache_arrow_flatbuf_DictionaryEncoding.startDictionaryEncoding(&fbb) + org_apache_arrow_flatbuf_DictionaryEncoding.add(id: id, &fbb) + org_apache_arrow_flatbuf_DictionaryEncoding.add(indexType: indexType, &fbb) + org_apache_arrow_flatbuf_DictionaryEncoding.add(isOrdered: isOrdered, &fbb) + org_apache_arrow_flatbuf_DictionaryEncoding.add(dictionaryKind: dictionaryKind, &fbb) + return org_apache_arrow_flatbuf_DictionaryEncoding.endDictionaryEncoding(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.id.p, fieldName: "id", required: false, type: Int64.self) + try _v.visit(field: VTOFFSET.indexType.p, fieldName: "indexType", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.isOrdered.p, fieldName: "isOrdered", required: false, type: Bool.self) + try _v.visit(field: VTOFFSET.dictionaryKind.p, fieldName: "dictionaryKind", required: false, type: org_apache_arrow_flatbuf_DictionaryKind.self) + _v.finish() + } } /// ---------------------------------------------------------------------- @@ -1278,202 +1278,202 @@ public struct org_apache_arrow_flatbuf_DictionaryEncoding: FlatBufferObject, Ver /// nested type. public struct org_apache_arrow_flatbuf_Field: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsField(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Field { return org_apache_arrow_flatbuf_Field(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case name = 4 - case nullable = 6 - case typeType = 8 - case type = 10 - case dictionary = 12 - case children = 14 - case customMetadata = 16 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Name is not required, in i.e. a List - public var name: String? { let o = _accessor.offset(VTOFFSET.name.v); return o == 0 ? nil : _accessor.string(at: o) } - public var nameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.name.v) } - /// Whether or not this field can contain nulls. Should be true in general. - public var nullable: Bool { let o = _accessor.offset(VTOFFSET.nullable.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } - /// This is the type of the decoded value if the field is dictionary encoded. - public func type(type: T.Type) -> T? { let o = _accessor.offset(VTOFFSET.type.v); return o == 0 ? nil : _accessor.union(o) } - /// Present only if the field is dictionary encoded. - public var dictionary: org_apache_arrow_flatbuf_DictionaryEncoding? { let o = _accessor.offset(VTOFFSET.dictionary.v); return o == 0 ? nil : org_apache_arrow_flatbuf_DictionaryEncoding(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// children apply only to nested data types like Struct, List and Union. For - /// primitive types children will have length 0. - public var hasChildren: Bool { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? false : true } - public var childrenCount: Int32 { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func children(at index: Int32) -> org_apache_arrow_flatbuf_Field? { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Field(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - /// User-defined metadata - public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } - public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - public static func startField(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 7) } - public static func add(name: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: name, at: VTOFFSET.name.p) } - public static func add(nullable: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: nullable, def: false, - at: VTOFFSET.nullable.p) } - public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } - public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } - public static func add(dictionary: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: dictionary, at: VTOFFSET.dictionary.p) } - public static func addVectorOf(children: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: children, at: VTOFFSET.children.p) } - public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } - public static func endField(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createField( - _ fbb: inout FlatBufferBuilder, - nameOffset name: Offset = Offset(), - nullable: Bool = false, - typeType: org_apache_arrow_flatbuf_Type_ = .none_, - typeOffset type: Offset = Offset(), - dictionaryOffset dictionary: Offset = Offset(), - childrenVectorOffset children: Offset = Offset(), - customMetadataVectorOffset customMetadata: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Field.startField(&fbb) - org_apache_arrow_flatbuf_Field.add(name: name, &fbb) - org_apache_arrow_flatbuf_Field.add(nullable: nullable, &fbb) - org_apache_arrow_flatbuf_Field.add(typeType: typeType, &fbb) - org_apache_arrow_flatbuf_Field.add(type: type, &fbb) - org_apache_arrow_flatbuf_Field.add(dictionary: dictionary, &fbb) - org_apache_arrow_flatbuf_Field.addVectorOf(children: children, &fbb) - org_apache_arrow_flatbuf_Field.addVectorOf(customMetadata: customMetadata, &fbb) - return org_apache_arrow_flatbuf_Field.endField(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.name.p, fieldName: "name", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.nullable.p, fieldName: "nullable", required: false, type: Bool.self) - try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: false, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in - switch key { - case .none_: - break // NOTE - SWIFT doesnt support none - case .null: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) - case .int: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) - case .floatingpoint: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) - case .binary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) - case .utf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) - case .bool: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) - case .decimal: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) - case .date: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) - case .time: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) - case .timestamp: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) - case .interval: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) - case .list: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) - case .struct_: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) - case .union: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) - case .fixedsizebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) - case .fixedsizelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) - case .map: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) - case .duration: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) - case .largebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) - case .largeutf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) - case .largelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) - case .runendencoded: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) - } - }) - try _v.visit(field: VTOFFSET.dictionary.p, fieldName: "dictionary", required: false, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.children.p, fieldName: "children", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_Field>>.self) - try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsField(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Field { return org_apache_arrow_flatbuf_Field(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case name = 4 + case nullable = 6 + case typeType = 8 + case type = 10 + case dictionary = 12 + case children = 14 + case customMetadata = 16 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Name is not required, in i.e. a List + public var name: String? { let o = _accessor.offset(VTOFFSET.name.v); return o == 0 ? nil : _accessor.string(at: o) } + public var nameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.name.v) } + /// Whether or not this field can contain nulls. Should be true in general. + public var nullable: Bool { let o = _accessor.offset(VTOFFSET.nullable.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } + /// This is the type of the decoded value if the field is dictionary encoded. + public func type(type: T.Type) -> T? { let o = _accessor.offset(VTOFFSET.type.v); return o == 0 ? nil : _accessor.union(o) } + /// Present only if the field is dictionary encoded. + public var dictionary: org_apache_arrow_flatbuf_DictionaryEncoding? { let o = _accessor.offset(VTOFFSET.dictionary.v); return o == 0 ? nil : org_apache_arrow_flatbuf_DictionaryEncoding(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// children apply only to nested data types like Struct, List and Union. For + /// primitive types children will have length 0. + public var hasChildren: Bool { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? false : true } + public var childrenCount: Int32 { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func children(at index: Int32) -> org_apache_arrow_flatbuf_Field? { let o = _accessor.offset(VTOFFSET.children.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Field(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + /// User-defined metadata + public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } + public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + public static func startField(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 7) } + public static func add(name: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: name, at: VTOFFSET.name.p) } + public static func add(nullable: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: nullable, def: false, + at: VTOFFSET.nullable.p) } + public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } + public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } + public static func add(dictionary: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: dictionary, at: VTOFFSET.dictionary.p) } + public static func addVectorOf(children: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: children, at: VTOFFSET.children.p) } + public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } + public static func endField(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createField( + _ fbb: inout FlatBufferBuilder, + nameOffset name: Offset = Offset(), + nullable: Bool = false, + typeType: org_apache_arrow_flatbuf_Type_ = .none_, + typeOffset type: Offset = Offset(), + dictionaryOffset dictionary: Offset = Offset(), + childrenVectorOffset children: Offset = Offset(), + customMetadataVectorOffset customMetadata: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Field.startField(&fbb) + org_apache_arrow_flatbuf_Field.add(name: name, &fbb) + org_apache_arrow_flatbuf_Field.add(nullable: nullable, &fbb) + org_apache_arrow_flatbuf_Field.add(typeType: typeType, &fbb) + org_apache_arrow_flatbuf_Field.add(type: type, &fbb) + org_apache_arrow_flatbuf_Field.add(dictionary: dictionary, &fbb) + org_apache_arrow_flatbuf_Field.addVectorOf(children: children, &fbb) + org_apache_arrow_flatbuf_Field.addVectorOf(customMetadata: customMetadata, &fbb) + return org_apache_arrow_flatbuf_Field.endField(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.name.p, fieldName: "name", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.nullable.p, fieldName: "nullable", required: false, type: Bool.self) + try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: false, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in + switch key { + case .none_: + break // NOTE - SWIFT doesnt support none + case .null: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) + case .int: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) + case .floatingpoint: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) + case .binary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) + case .utf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) + case .bool: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) + case .decimal: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) + case .date: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) + case .time: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) + case .timestamp: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) + case .interval: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) + case .list: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) + case .struct_: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) + case .union: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) + case .fixedsizebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) + case .fixedsizelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) + case .map: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) + case .duration: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) + case .largebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) + case .largeutf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) + case .largelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) + case .runendencoded: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) + } + }) + try _v.visit(field: VTOFFSET.dictionary.p, fieldName: "dictionary", required: false, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.children.p, fieldName: "children", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_Field>>.self) + try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) + _v.finish() + } } /// ---------------------------------------------------------------------- /// A Schema describes the columns in a row batch public struct org_apache_arrow_flatbuf_Schema: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsSchema(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Schema { return org_apache_arrow_flatbuf_Schema(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case endianness = 4 - case fields = 6 - case customMetadata = 8 - case features = 10 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// endianness of the buffer - /// it is Little Endian by default - /// if endianness doesn't match the underlying system then the vectors need to be converted - public var endianness: org_apache_arrow_flatbuf_Endianness { let o = _accessor.offset(VTOFFSET.endianness.v); return o == 0 ? .little : org_apache_arrow_flatbuf_Endianness(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .little } - public var hasFields: Bool { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? false : true } - public var fieldsCount: Int32 { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func fields(at index: Int32) -> org_apache_arrow_flatbuf_Field? { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Field(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } - public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - /// Features used in the stream/file. - public var hasFeatures: Bool { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? false : true } - public var featuresCount: Int32 { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func features(at index: Int32) -> org_apache_arrow_flatbuf_Feature? { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? org_apache_arrow_flatbuf_Feature.unused : org_apache_arrow_flatbuf_Feature(rawValue: _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8)) } - public static func startSchema(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } - public static func add(endianness: org_apache_arrow_flatbuf_Endianness, _ fbb: inout FlatBufferBuilder) { fbb.add(element: endianness.rawValue, def: 0, at: VTOFFSET.endianness.p) } - public static func addVectorOf(fields: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: fields, at: VTOFFSET.fields.p) } - public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } - public static func addVectorOf(features: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: features, at: VTOFFSET.features.p) } - public static func endSchema(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createSchema( - _ fbb: inout FlatBufferBuilder, - endianness: org_apache_arrow_flatbuf_Endianness = .little, - fieldsVectorOffset fields: Offset = Offset(), - customMetadataVectorOffset customMetadata: Offset = Offset(), - featuresVectorOffset features: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Schema.startSchema(&fbb) - org_apache_arrow_flatbuf_Schema.add(endianness: endianness, &fbb) - org_apache_arrow_flatbuf_Schema.addVectorOf(fields: fields, &fbb) - org_apache_arrow_flatbuf_Schema.addVectorOf(customMetadata: customMetadata, &fbb) - org_apache_arrow_flatbuf_Schema.addVectorOf(features: features, &fbb) - return org_apache_arrow_flatbuf_Schema.endSchema(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.endianness.p, fieldName: "endianness", required: false, type: org_apache_arrow_flatbuf_Endianness.self) - try _v.visit(field: VTOFFSET.fields.p, fieldName: "fields", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_Field>>.self) - try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) - try _v.visit(field: VTOFFSET.features.p, fieldName: "features", required: false, type: ForwardOffset>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsSchema(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Schema { return org_apache_arrow_flatbuf_Schema(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case endianness = 4 + case fields = 6 + case customMetadata = 8 + case features = 10 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// endianness of the buffer + /// it is Little Endian by default + /// if endianness doesn't match the underlying system then the vectors need to be converted + public var endianness: org_apache_arrow_flatbuf_Endianness { let o = _accessor.offset(VTOFFSET.endianness.v); return o == 0 ? .little : org_apache_arrow_flatbuf_Endianness(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .little } + public var hasFields: Bool { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? false : true } + public var fieldsCount: Int32 { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func fields(at index: Int32) -> org_apache_arrow_flatbuf_Field? { let o = _accessor.offset(VTOFFSET.fields.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Field(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + public var hasCustomMetadata: Bool { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? false : true } + public var customMetadataCount: Int32 { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func customMetadata(at index: Int32) -> org_apache_arrow_flatbuf_KeyValue? { let o = _accessor.offset(VTOFFSET.customMetadata.v); return o == 0 ? nil : org_apache_arrow_flatbuf_KeyValue(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + /// Features used in the stream/file. + public var hasFeatures: Bool { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? false : true } + public var featuresCount: Int32 { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func features(at index: Int32) -> org_apache_arrow_flatbuf_Feature? { let o = _accessor.offset(VTOFFSET.features.v); return o == 0 ? org_apache_arrow_flatbuf_Feature.unused : org_apache_arrow_flatbuf_Feature(rawValue: _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8)) } + public static func startSchema(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } + public static func add(endianness: org_apache_arrow_flatbuf_Endianness, _ fbb: inout FlatBufferBuilder) { fbb.add(element: endianness.rawValue, def: 0, at: VTOFFSET.endianness.p) } + public static func addVectorOf(fields: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: fields, at: VTOFFSET.fields.p) } + public static func addVectorOf(customMetadata: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: customMetadata, at: VTOFFSET.customMetadata.p) } + public static func addVectorOf(features: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: features, at: VTOFFSET.features.p) } + public static func endSchema(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createSchema( + _ fbb: inout FlatBufferBuilder, + endianness: org_apache_arrow_flatbuf_Endianness = .little, + fieldsVectorOffset fields: Offset = Offset(), + customMetadataVectorOffset customMetadata: Offset = Offset(), + featuresVectorOffset features: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Schema.startSchema(&fbb) + org_apache_arrow_flatbuf_Schema.add(endianness: endianness, &fbb) + org_apache_arrow_flatbuf_Schema.addVectorOf(fields: fields, &fbb) + org_apache_arrow_flatbuf_Schema.addVectorOf(customMetadata: customMetadata, &fbb) + org_apache_arrow_flatbuf_Schema.addVectorOf(features: features, &fbb) + return org_apache_arrow_flatbuf_Schema.endSchema(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.endianness.p, fieldName: "endianness", required: false, type: org_apache_arrow_flatbuf_Endianness.self) + try _v.visit(field: VTOFFSET.fields.p, fieldName: "fields", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_Field>>.self) + try _v.visit(field: VTOFFSET.customMetadata.p, fieldName: "customMetadata", required: false, type: ForwardOffset, org_apache_arrow_flatbuf_KeyValue>>.self) + try _v.visit(field: VTOFFSET.features.p, fieldName: "features", required: false, type: ForwardOffset>.self) + _v.finish() + } } diff --git a/Sources/Arrow/SparseTensor_generated.swift b/Sources/Arrow/SparseTensor_generated.swift index a2dfbdb..d3b52bd 100644 --- a/Sources/Arrow/SparseTensor_generated.swift +++ b/Sources/Arrow/SparseTensor_generated.swift @@ -22,48 +22,48 @@ import FlatBuffers public enum org_apache_arrow_flatbuf_SparseMatrixCompressedAxis: Int16, Enum, Verifiable { - public typealias T = Int16 - public static var byteSize: Int { return MemoryLayout.size } - public var value: Int16 { return self.rawValue } - case row = 0 - case column = 1 - - public static var max: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { return .column } - public static var min: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { return .row } + public typealias T = Int16 + public static var byteSize: Int { return MemoryLayout.size } + public var value: Int16 { return self.rawValue } + case row = 0 + case column = 1 + + public static var max: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { return .column } + public static var min: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { return .row } } public enum org_apache_arrow_flatbuf_SparseTensorIndex: UInt8, UnionEnum { - public typealias T = UInt8 + public typealias T = UInt8 - public init?(value: T) { - self.init(rawValue: value) - } + public init?(value: T) { + self.init(rawValue: value) + } - public static var byteSize: Int { return MemoryLayout.size } - public var value: UInt8 { return self.rawValue } - case none_ = 0 - case sparsetensorindexcoo = 1 - case sparsematrixindexcsx = 2 - case sparsetensorindexcsf = 3 + public static var byteSize: Int { return MemoryLayout.size } + public var value: UInt8 { return self.rawValue } + case none_ = 0 + case sparsetensorindexcoo = 1 + case sparsematrixindexcsx = 2 + case sparsetensorindexcsf = 3 - public static var max: org_apache_arrow_flatbuf_SparseTensorIndex { return .sparsetensorindexcsf } - public static var min: org_apache_arrow_flatbuf_SparseTensorIndex { return .none_ } + public static var max: org_apache_arrow_flatbuf_SparseTensorIndex { return .sparsetensorindexcsf } + public static var min: org_apache_arrow_flatbuf_SparseTensorIndex { return .none_ } } /// ---------------------------------------------------------------------- /// EXPERIMENTAL: Data structures for sparse tensors /// Coordinate (COO) format of sparse tensor index. -/// +/// /// COO's index list are represented as a NxM matrix, /// where N is the number of non-zero values, /// and M is the number of dimensions of a sparse tensor. -/// +/// /// indicesBuffer stores the location and size of the data of this indices /// matrix. The value type and the stride of the indices matrix is /// specified in indicesType and indicesStrides fields. -/// +/// /// For example, let X be a 2x3x4x5 tensor, and it has the following /// 6 non-zero values: /// ```text @@ -86,450 +86,450 @@ public enum org_apache_arrow_flatbuf_SparseTensorIndex: UInt8, UnionEnum { /// the indices may not be sorted, or may have duplicated entries. public struct org_apache_arrow_flatbuf_SparseTensorIndexCOO: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsSparseTensorIndexCOO(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensorIndexCOO { return org_apache_arrow_flatbuf_SparseTensorIndexCOO(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case indicesType = 4 - case indicesStrides = 6 - case indicesBuffer = 8 - case isCanonical = 10 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// The type of values in indicesBuffer - public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// Non-negative byte offsets to advance one value cell along each dimension - /// If omitted, default to row-major order (C-like). - public var hasIndicesStrides: Bool { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? false : true } - public var indicesStridesCount: Int32 { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func indicesStrides(at index: Int32) -> Int64 { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? 0 : _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8) } - public var indicesStrides: [Int64] { return _accessor.getVector(at: VTOFFSET.indicesStrides.v) ?? [] } - /// The location and size of the indices matrix's data - public var indicesBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } - public var mutableIndicesBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } - /// This flag is true if and only if the indices matrix is sorted in - /// row-major order, and does not have duplicated entries. - /// This sort order is the same as of Tensorflow's SparseTensor, - /// but it is inverse order of SciPy's canonical coo_matrix - /// (SciPy employs column-major order for its coo_matrix). - public var isCanonical: Bool { let o = _accessor.offset(VTOFFSET.isCanonical.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } - public static func startSparseTensorIndexCOO(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } - public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } - public static func addVectorOf(indicesStrides: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesStrides, at: VTOFFSET.indicesStrides.p) } - public static func add(indicesBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indicesBuffer = indicesBuffer else { return }; fbb.create(struct: indicesBuffer, position: VTOFFSET.indicesBuffer.p) } - public static func add(isCanonical: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isCanonical, def: false, - at: VTOFFSET.isCanonical.p) } - public static func endSparseTensorIndexCOO(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [4, 8]); return end } - public static func createSparseTensorIndexCOO( - _ fbb: inout FlatBufferBuilder, - indicesTypeOffset indicesType: Offset, - indicesStridesVectorOffset indicesStrides: Offset = Offset(), - indicesBuffer: org_apache_arrow_flatbuf_Buffer, - isCanonical: Bool = false - ) -> Offset { - let __start = org_apache_arrow_flatbuf_SparseTensorIndexCOO.startSparseTensorIndexCOO(&fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(indicesType: indicesType, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCOO.addVectorOf(indicesStrides: indicesStrides, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(indicesBuffer: indicesBuffer, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(isCanonical: isCanonical, &fbb) - return org_apache_arrow_flatbuf_SparseTensorIndexCOO.endSparseTensorIndexCOO(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.indicesStrides.p, fieldName: "indicesStrides", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.indicesBuffer.p, fieldName: "indicesBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) - try _v.visit(field: VTOFFSET.isCanonical.p, fieldName: "isCanonical", required: false, type: Bool.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsSparseTensorIndexCOO(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensorIndexCOO { return org_apache_arrow_flatbuf_SparseTensorIndexCOO(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case indicesType = 4 + case indicesStrides = 6 + case indicesBuffer = 8 + case isCanonical = 10 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// The type of values in indicesBuffer + public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// Non-negative byte offsets to advance one value cell along each dimension + /// If omitted, default to row-major order (C-like). + public var hasIndicesStrides: Bool { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? false : true } + public var indicesStridesCount: Int32 { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func indicesStrides(at index: Int32) -> Int64 { let o = _accessor.offset(VTOFFSET.indicesStrides.v); return o == 0 ? 0 : _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8) } + public var indicesStrides: [Int64] { return _accessor.getVector(at: VTOFFSET.indicesStrides.v) ?? [] } + /// The location and size of the indices matrix's data + public var indicesBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } + public var mutableIndicesBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } + /// This flag is true if and only if the indices matrix is sorted in + /// row-major order, and does not have duplicated entries. + /// This sort order is the same as of Tensorflow's SparseTensor, + /// but it is inverse order of SciPy's canonical coo_matrix + /// (SciPy employs column-major order for its coo_matrix). + public var isCanonical: Bool { let o = _accessor.offset(VTOFFSET.isCanonical.v); return o == 0 ? false : 0 != _accessor.readBuffer(of: Byte.self, at: o) } + public static func startSparseTensorIndexCOO(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 4) } + public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } + public static func addVectorOf(indicesStrides: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesStrides, at: VTOFFSET.indicesStrides.p) } + public static func add(indicesBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indicesBuffer = indicesBuffer else { return }; fbb.create(struct: indicesBuffer, position: VTOFFSET.indicesBuffer.p) } + public static func add(isCanonical: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: isCanonical, def: false, + at: VTOFFSET.isCanonical.p) } + public static func endSparseTensorIndexCOO(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [4, 8]); return end } + public static func createSparseTensorIndexCOO( + _ fbb: inout FlatBufferBuilder, + indicesTypeOffset indicesType: Offset, + indicesStridesVectorOffset indicesStrides: Offset = Offset(), + indicesBuffer: org_apache_arrow_flatbuf_Buffer, + isCanonical: Bool = false + ) -> Offset { + let __start = org_apache_arrow_flatbuf_SparseTensorIndexCOO.startSparseTensorIndexCOO(&fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(indicesType: indicesType, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCOO.addVectorOf(indicesStrides: indicesStrides, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(indicesBuffer: indicesBuffer, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCOO.add(isCanonical: isCanonical, &fbb) + return org_apache_arrow_flatbuf_SparseTensorIndexCOO.endSparseTensorIndexCOO(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.indicesStrides.p, fieldName: "indicesStrides", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.indicesBuffer.p, fieldName: "indicesBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) + try _v.visit(field: VTOFFSET.isCanonical.p, fieldName: "isCanonical", required: false, type: Bool.self) + _v.finish() + } } /// Compressed Sparse format, that is matrix-specific. public struct org_apache_arrow_flatbuf_SparseMatrixIndexCSX: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsSparseMatrixIndexCSX(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseMatrixIndexCSX { return org_apache_arrow_flatbuf_SparseMatrixIndexCSX(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case compressedAxis = 4 - case indptrType = 6 - case indptrBuffer = 8 - case indicesType = 10 - case indicesBuffer = 12 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Which axis, row or column, is compressed - public var compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { let o = _accessor.offset(VTOFFSET.compressedAxis.v); return o == 0 ? .row : org_apache_arrow_flatbuf_SparseMatrixCompressedAxis(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .row } - /// The type of values in indptrBuffer - public var indptrType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indptrType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// indptrBuffer stores the location and size of indptr array that - /// represents the range of the rows. - /// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. - /// The length of this array is 1 + (the number of rows), and the type - /// of index value is long. - /// - /// For example, let X be the following 6x4 matrix: - /// ```text - /// X := [[0, 1, 2, 0], - /// [0, 0, 3, 0], - /// [0, 4, 0, 5], - /// [0, 0, 0, 0], - /// [6, 0, 7, 8], - /// [0, 9, 0, 0]]. - /// ``` - /// The array of non-zero values in X is: - /// ```text - /// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. - /// ``` - /// And the indptr of X is: - /// ```text - /// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. - /// ``` - public var indptrBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indptrBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } - public var mutableIndptrBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indptrBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } - /// The type of values in indicesBuffer - public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// indicesBuffer stores the location and size of the array that - /// contains the column indices of the corresponding non-zero values. - /// The type of index value is long. - /// - /// For example, the indices of the above X is: - /// ```text - /// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. - /// ``` - /// Note that the indices are sorted in lexicographical order for each row. - public var indicesBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } - public var mutableIndicesBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } - public static func startSparseMatrixIndexCSX(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } - public static func add(compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis, _ fbb: inout FlatBufferBuilder) { fbb.add(element: compressedAxis.rawValue, def: 0, at: VTOFFSET.compressedAxis.p) } - public static func add(indptrType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrType, at: VTOFFSET.indptrType.p) } - public static func add(indptrBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indptrBuffer = indptrBuffer else { return }; fbb.create(struct: indptrBuffer, position: VTOFFSET.indptrBuffer.p) } - public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } - public static func add(indicesBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indicesBuffer = indicesBuffer else { return }; fbb.create(struct: indicesBuffer, position: VTOFFSET.indicesBuffer.p) } - public static func endSparseMatrixIndexCSX(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 10, 12]); return end } - public static func createSparseMatrixIndexCSX( - _ fbb: inout FlatBufferBuilder, - compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis = .row, - indptrTypeOffset indptrType: Offset, - indptrBuffer: org_apache_arrow_flatbuf_Buffer, - indicesTypeOffset indicesType: Offset, - indicesBuffer: org_apache_arrow_flatbuf_Buffer - ) -> Offset { - let __start = org_apache_arrow_flatbuf_SparseMatrixIndexCSX.startSparseMatrixIndexCSX(&fbb) - org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(compressedAxis: compressedAxis, &fbb) - org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indptrType: indptrType, &fbb) - org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indptrBuffer: indptrBuffer, &fbb) - org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indicesType: indicesType, &fbb) - org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indicesBuffer: indicesBuffer, &fbb) - return org_apache_arrow_flatbuf_SparseMatrixIndexCSX.endSparseMatrixIndexCSX(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.compressedAxis.p, fieldName: "compressedAxis", required: false, type: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis.self) - try _v.visit(field: VTOFFSET.indptrType.p, fieldName: "indptrType", required: true, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.indptrBuffer.p, fieldName: "indptrBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) - try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.indicesBuffer.p, fieldName: "indicesBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsSparseMatrixIndexCSX(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseMatrixIndexCSX { return org_apache_arrow_flatbuf_SparseMatrixIndexCSX(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case compressedAxis = 4 + case indptrType = 6 + case indptrBuffer = 8 + case indicesType = 10 + case indicesBuffer = 12 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Which axis, row or column, is compressed + public var compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis { let o = _accessor.offset(VTOFFSET.compressedAxis.v); return o == 0 ? .row : org_apache_arrow_flatbuf_SparseMatrixCompressedAxis(rawValue: _accessor.readBuffer(of: Int16.self, at: o)) ?? .row } + /// The type of values in indptrBuffer + public var indptrType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indptrType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// indptrBuffer stores the location and size of indptr array that + /// represents the range of the rows. + /// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. + /// The length of this array is 1 + (the number of rows), and the type + /// of index value is long. + /// + /// For example, let X be the following 6x4 matrix: + /// ```text + /// X := [[0, 1, 2, 0], + /// [0, 0, 3, 0], + /// [0, 4, 0, 5], + /// [0, 0, 0, 0], + /// [6, 0, 7, 8], + /// [0, 9, 0, 0]]. + /// ``` + /// The array of non-zero values in X is: + /// ```text + /// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. + /// ``` + /// And the indptr of X is: + /// ```text + /// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. + /// ``` + public var indptrBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indptrBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } + public var mutableIndptrBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indptrBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } + /// The type of values in indicesBuffer + public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// indicesBuffer stores the location and size of the array that + /// contains the column indices of the corresponding non-zero values. + /// The type of index value is long. + /// + /// For example, the indices of the above X is: + /// ```text + /// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. + /// ``` + /// Note that the indices are sorted in lexicographical order for each row. + public var indicesBuffer: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } + public var mutableIndicesBuffer: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.indicesBuffer.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } + public static func startSparseMatrixIndexCSX(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } + public static func add(compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis, _ fbb: inout FlatBufferBuilder) { fbb.add(element: compressedAxis.rawValue, def: 0, at: VTOFFSET.compressedAxis.p) } + public static func add(indptrType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrType, at: VTOFFSET.indptrType.p) } + public static func add(indptrBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indptrBuffer = indptrBuffer else { return }; fbb.create(struct: indptrBuffer, position: VTOFFSET.indptrBuffer.p) } + public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } + public static func add(indicesBuffer: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let indicesBuffer = indicesBuffer else { return }; fbb.create(struct: indicesBuffer, position: VTOFFSET.indicesBuffer.p) } + public static func endSparseMatrixIndexCSX(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 10, 12]); return end } + public static func createSparseMatrixIndexCSX( + _ fbb: inout FlatBufferBuilder, + compressedAxis: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis = .row, + indptrTypeOffset indptrType: Offset, + indptrBuffer: org_apache_arrow_flatbuf_Buffer, + indicesTypeOffset indicesType: Offset, + indicesBuffer: org_apache_arrow_flatbuf_Buffer + ) -> Offset { + let __start = org_apache_arrow_flatbuf_SparseMatrixIndexCSX.startSparseMatrixIndexCSX(&fbb) + org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(compressedAxis: compressedAxis, &fbb) + org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indptrType: indptrType, &fbb) + org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indptrBuffer: indptrBuffer, &fbb) + org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indicesType: indicesType, &fbb) + org_apache_arrow_flatbuf_SparseMatrixIndexCSX.add(indicesBuffer: indicesBuffer, &fbb) + return org_apache_arrow_flatbuf_SparseMatrixIndexCSX.endSparseMatrixIndexCSX(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.compressedAxis.p, fieldName: "compressedAxis", required: false, type: org_apache_arrow_flatbuf_SparseMatrixCompressedAxis.self) + try _v.visit(field: VTOFFSET.indptrType.p, fieldName: "indptrType", required: true, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.indptrBuffer.p, fieldName: "indptrBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) + try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.indicesBuffer.p, fieldName: "indicesBuffer", required: true, type: org_apache_arrow_flatbuf_Buffer.self) + _v.finish() + } } /// Compressed Sparse Fiber (CSF) sparse tensor index. public struct org_apache_arrow_flatbuf_SparseTensorIndexCSF: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsSparseTensorIndexCSF(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensorIndexCSF { return org_apache_arrow_flatbuf_SparseTensorIndexCSF(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case indptrType = 4 - case indptrBuffers = 6 - case indicesType = 8 - case indicesBuffers = 10 - case axisOrder = 12 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// CSF is a generalization of compressed sparse row (CSR) index. - /// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) - /// - /// CSF index recursively compresses each dimension of a tensor into a set - /// of prefix trees. Each path from a root to leaf forms one tensor - /// non-zero index. CSF is implemented with two arrays of buffers and one - /// arrays of integers. - /// - /// For example, let X be a 2x3x4x5 tensor and let it have the following - /// 8 non-zero values: - /// ```text - /// X[0, 0, 0, 1] := 1 - /// X[0, 0, 0, 2] := 2 - /// X[0, 1, 0, 0] := 3 - /// X[0, 1, 0, 2] := 4 - /// X[0, 1, 1, 0] := 5 - /// X[1, 1, 1, 0] := 6 - /// X[1, 1, 1, 1] := 7 - /// X[1, 1, 1, 2] := 8 - /// ``` - /// As a prefix tree this would be represented as: - /// ```text - /// 0 1 - /// / \ | - /// 0 1 1 - /// / / \ | - /// 0 0 1 1 - /// /| /| | /| | - /// 1 2 0 2 0 0 1 2 - /// ``` - /// The type of values in indptrBuffers - public var indptrType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indptrType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// indptrBuffers stores the sparsity structure. - /// Each two consecutive dimensions in a tensor correspond to a buffer in - /// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` - /// and `indptrBuffers[dim][i + 1]` signify a range of nodes in - /// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. - /// - /// For example, the indptrBuffers for the above X is: - /// ```text - /// indptrBuffer(X) = [ - /// [0, 2, 3], - /// [0, 1, 3, 4], - /// [0, 2, 4, 5, 8] - /// ]. - /// ``` - public var hasIndptrBuffers: Bool { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? false : true } - public var indptrBuffersCount: Int32 { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func indptrBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } - public func mutableIndptrBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } - /// The type of values in indicesBuffers - public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } - /// indicesBuffers stores values of nodes. - /// Each tensor dimension corresponds to a buffer in indicesBuffers. - /// For example, the indicesBuffers for the above X is: - /// ```text - /// indicesBuffer(X) = [ - /// [0, 1], - /// [0, 1, 1], - /// [0, 0, 1, 1], - /// [1, 2, 0, 2, 0, 0, 1, 2] - /// ]. - /// ``` - public var hasIndicesBuffers: Bool { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? false : true } - public var indicesBuffersCount: Int32 { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func indicesBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } - public func mutableIndicesBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } - /// axisOrder stores the sequence in which dimensions were traversed to - /// produce the prefix tree. - /// For example, the axisOrder for the above X is: - /// ```text - /// axisOrder(X) = [0, 1, 2, 3]. - /// ``` - public var hasAxisOrder: Bool { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? false : true } - public var axisOrderCount: Int32 { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func axisOrder(at index: Int32) -> Int32 { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? 0 : _accessor.directRead(of: Int32.self, offset: _accessor.vector(at: o) + index * 4) } - public var axisOrder: [Int32] { return _accessor.getVector(at: VTOFFSET.axisOrder.v) ?? [] } - public static func startSparseTensorIndexCSF(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } - public static func add(indptrType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrType, at: VTOFFSET.indptrType.p) } - public static func addVectorOf(indptrBuffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrBuffers, at: VTOFFSET.indptrBuffers.p) } - public static func startVectorOfIndptrBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } - public static func addVectorOf(indicesBuffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesBuffers, at: VTOFFSET.indicesBuffers.p) } - public static func startVectorOfIndicesBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { - builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) - } - public static func addVectorOf(axisOrder: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: axisOrder, at: VTOFFSET.axisOrder.p) } - public static func endSparseTensorIndexCSF(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [4, 6, 8, 10, 12]); return end } - public static func createSparseTensorIndexCSF( - _ fbb: inout FlatBufferBuilder, - indptrTypeOffset indptrType: Offset, - indptrBuffersVectorOffset indptrBuffers: Offset, - indicesTypeOffset indicesType: Offset, - indicesBuffersVectorOffset indicesBuffers: Offset, - axisOrderVectorOffset axisOrder: Offset - ) -> Offset { - let __start = org_apache_arrow_flatbuf_SparseTensorIndexCSF.startSparseTensorIndexCSF(&fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCSF.add(indptrType: indptrType, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(indptrBuffers: indptrBuffers, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCSF.add(indicesType: indicesType, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(indicesBuffers: indicesBuffers, &fbb) - org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(axisOrder: axisOrder, &fbb) - return org_apache_arrow_flatbuf_SparseTensorIndexCSF.endSparseTensorIndexCSF(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.indptrType.p, fieldName: "indptrType", required: true, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.indptrBuffers.p, fieldName: "indptrBuffers", required: true, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) - try _v.visit(field: VTOFFSET.indicesBuffers.p, fieldName: "indicesBuffers", required: true, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.axisOrder.p, fieldName: "axisOrder", required: true, type: ForwardOffset>.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsSparseTensorIndexCSF(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensorIndexCSF { return org_apache_arrow_flatbuf_SparseTensorIndexCSF(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case indptrType = 4 + case indptrBuffers = 6 + case indicesType = 8 + case indicesBuffers = 10 + case axisOrder = 12 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// CSF is a generalization of compressed sparse row (CSR) index. + /// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) + /// + /// CSF index recursively compresses each dimension of a tensor into a set + /// of prefix trees. Each path from a root to leaf forms one tensor + /// non-zero index. CSF is implemented with two arrays of buffers and one + /// arrays of integers. + /// + /// For example, let X be a 2x3x4x5 tensor and let it have the following + /// 8 non-zero values: + /// ```text + /// X[0, 0, 0, 1] := 1 + /// X[0, 0, 0, 2] := 2 + /// X[0, 1, 0, 0] := 3 + /// X[0, 1, 0, 2] := 4 + /// X[0, 1, 1, 0] := 5 + /// X[1, 1, 1, 0] := 6 + /// X[1, 1, 1, 1] := 7 + /// X[1, 1, 1, 2] := 8 + /// ``` + /// As a prefix tree this would be represented as: + /// ```text + /// 0 1 + /// / \ | + /// 0 1 1 + /// / / \ | + /// 0 0 1 1 + /// /| /| | /| | + /// 1 2 0 2 0 0 1 2 + /// ``` + /// The type of values in indptrBuffers + public var indptrType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indptrType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// indptrBuffers stores the sparsity structure. + /// Each two consecutive dimensions in a tensor correspond to a buffer in + /// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` + /// and `indptrBuffers[dim][i + 1]` signify a range of nodes in + /// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. + /// + /// For example, the indptrBuffers for the above X is: + /// ```text + /// indptrBuffer(X) = [ + /// [0, 2, 3], + /// [0, 1, 3, 4], + /// [0, 2, 4, 5, 8] + /// ]. + /// ``` + public var hasIndptrBuffers: Bool { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? false : true } + public var indptrBuffersCount: Int32 { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func indptrBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } + public func mutableIndptrBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.indptrBuffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } + /// The type of values in indicesBuffers + public var indicesType: org_apache_arrow_flatbuf_Int! { let o = _accessor.offset(VTOFFSET.indicesType.v); return org_apache_arrow_flatbuf_Int(_accessor.bb, o: _accessor.indirect(o + _accessor.position)) } + /// indicesBuffers stores values of nodes. + /// Each tensor dimension corresponds to a buffer in indicesBuffers. + /// For example, the indicesBuffers for the above X is: + /// ```text + /// indicesBuffer(X) = [ + /// [0, 1], + /// [0, 1, 1], + /// [0, 0, 1, 1], + /// [1, 2, 0, 2, 0, 0, 1, 2] + /// ]. + /// ``` + public var hasIndicesBuffers: Bool { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? false : true } + public var indicesBuffersCount: Int32 { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func indicesBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer? { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? nil : _accessor.directRead(of: org_apache_arrow_flatbuf_Buffer.self, offset: _accessor.vector(at: o) + index * 16) } + public func mutableIndicesBuffers(at index: Int32) -> org_apache_arrow_flatbuf_Buffer_Mutable? { let o = _accessor.offset(VTOFFSET.indicesBuffers.v); return o == 0 ? nil : org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: _accessor.vector(at: o) + index * 16) } + /// axisOrder stores the sequence in which dimensions were traversed to + /// produce the prefix tree. + /// For example, the axisOrder for the above X is: + /// ```text + /// axisOrder(X) = [0, 1, 2, 3]. + /// ``` + public var hasAxisOrder: Bool { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? false : true } + public var axisOrderCount: Int32 { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func axisOrder(at index: Int32) -> Int32 { let o = _accessor.offset(VTOFFSET.axisOrder.v); return o == 0 ? 0 : _accessor.directRead(of: Int32.self, offset: _accessor.vector(at: o) + index * 4) } + public var axisOrder: [Int32] { return _accessor.getVector(at: VTOFFSET.axisOrder.v) ?? [] } + public static func startSparseTensorIndexCSF(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } + public static func add(indptrType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrType, at: VTOFFSET.indptrType.p) } + public static func addVectorOf(indptrBuffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indptrBuffers, at: VTOFFSET.indptrBuffers.p) } + public static func startVectorOfIndptrBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func add(indicesType: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesType, at: VTOFFSET.indicesType.p) } + public static func addVectorOf(indicesBuffers: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: indicesBuffers, at: VTOFFSET.indicesBuffers.p) } + public static func startVectorOfIndicesBuffers(_ size: Int, in builder: inout FlatBufferBuilder) { + builder.startVector(size * MemoryLayout.size, elementSize: MemoryLayout.alignment) + } + public static func addVectorOf(axisOrder: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: axisOrder, at: VTOFFSET.axisOrder.p) } + public static func endSparseTensorIndexCSF(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [4, 6, 8, 10, 12]); return end } + public static func createSparseTensorIndexCSF( + _ fbb: inout FlatBufferBuilder, + indptrTypeOffset indptrType: Offset, + indptrBuffersVectorOffset indptrBuffers: Offset, + indicesTypeOffset indicesType: Offset, + indicesBuffersVectorOffset indicesBuffers: Offset, + axisOrderVectorOffset axisOrder: Offset + ) -> Offset { + let __start = org_apache_arrow_flatbuf_SparseTensorIndexCSF.startSparseTensorIndexCSF(&fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCSF.add(indptrType: indptrType, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(indptrBuffers: indptrBuffers, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCSF.add(indicesType: indicesType, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(indicesBuffers: indicesBuffers, &fbb) + org_apache_arrow_flatbuf_SparseTensorIndexCSF.addVectorOf(axisOrder: axisOrder, &fbb) + return org_apache_arrow_flatbuf_SparseTensorIndexCSF.endSparseTensorIndexCSF(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.indptrType.p, fieldName: "indptrType", required: true, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.indptrBuffers.p, fieldName: "indptrBuffers", required: true, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.indicesType.p, fieldName: "indicesType", required: true, type: ForwardOffset.self) + try _v.visit(field: VTOFFSET.indicesBuffers.p, fieldName: "indicesBuffers", required: true, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.axisOrder.p, fieldName: "axisOrder", required: true, type: ForwardOffset>.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_SparseTensor: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsSparseTensor(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensor { return org_apache_arrow_flatbuf_SparseTensor(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case typeType = 4 - case type = 6 - case shape = 8 - case nonZeroLength = 10 - case sparseIndexType = 12 - case sparseIndex = 14 - case data = 16 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } - /// The type of data contained in a value cell. - /// Currently only fixed-width value types are supported, - /// no strings or nested types. - public func type(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.type.v); return _accessor.union(o) } - /// The dimensions of the tensor, optionally named. - public var hasShape: Bool { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? false : true } - public var shapeCount: Int32 { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func shape(at index: Int32) -> org_apache_arrow_flatbuf_TensorDim? { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? nil : org_apache_arrow_flatbuf_TensorDim(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - /// The number of non-zero values in a sparse tensor. - public var nonZeroLength: Int64 { let o = _accessor.offset(VTOFFSET.nonZeroLength.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - public var sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex { let o = _accessor.offset(VTOFFSET.sparseIndexType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_SparseTensorIndex(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } - /// Sparse tensor index - public func sparseIndex(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.sparseIndex.v); return _accessor.union(o) } - /// The location and size of the tensor's data - public var data: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.data.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } - public var mutableData: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.data.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } - public static func startSparseTensor(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 7) } - public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } - public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } - public static func addVectorOf(shape: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: shape, at: VTOFFSET.shape.p) } - public static func add(nonZeroLength: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: nonZeroLength, def: 0, at: VTOFFSET.nonZeroLength.p) } - public static func add(sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex, _ fbb: inout FlatBufferBuilder) { fbb.add(element: sparseIndexType.rawValue, def: 0, at: VTOFFSET.sparseIndexType.p) } - public static func add(sparseIndex: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: sparseIndex, at: VTOFFSET.sparseIndex.p) } - public static func add(data: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let data = data else { return }; fbb.create(struct: data, position: VTOFFSET.data.p) } - public static func endSparseTensor(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 14, 16]); return end } - public static func createSparseTensor( - _ fbb: inout FlatBufferBuilder, - typeType: org_apache_arrow_flatbuf_Type_ = .none_, - typeOffset type: Offset, - shapeVectorOffset shape: Offset, - nonZeroLength: Int64 = 0, - sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex = .none_, - sparseIndexOffset sparseIndex: Offset, - data: org_apache_arrow_flatbuf_Buffer - ) -> Offset { - let __start = org_apache_arrow_flatbuf_SparseTensor.startSparseTensor(&fbb) - org_apache_arrow_flatbuf_SparseTensor.add(typeType: typeType, &fbb) - org_apache_arrow_flatbuf_SparseTensor.add(type: type, &fbb) - org_apache_arrow_flatbuf_SparseTensor.addVectorOf(shape: shape, &fbb) - org_apache_arrow_flatbuf_SparseTensor.add(nonZeroLength: nonZeroLength, &fbb) - org_apache_arrow_flatbuf_SparseTensor.add(sparseIndexType: sparseIndexType, &fbb) - org_apache_arrow_flatbuf_SparseTensor.add(sparseIndex: sparseIndex, &fbb) - org_apache_arrow_flatbuf_SparseTensor.add(data: data, &fbb) - return org_apache_arrow_flatbuf_SparseTensor.endSparseTensor(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in - switch key { - case .none_: - break // NOTE - SWIFT doesnt support none - case .null: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) - case .int: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) - case .floatingpoint: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) - case .binary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) - case .utf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) - case .bool: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) - case .decimal: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) - case .date: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) - case .time: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) - case .timestamp: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) - case .interval: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) - case .list: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) - case .struct_: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) - case .union: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) - case .fixedsizebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) - case .fixedsizelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) - case .map: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) - case .duration: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) - case .largebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) - case .largeutf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) - case .largelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) - case .runendencoded: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) - } - }) - try _v.visit(field: VTOFFSET.shape.p, fieldName: "shape", required: true, type: ForwardOffset, org_apache_arrow_flatbuf_TensorDim>>.self) - try _v.visit(field: VTOFFSET.nonZeroLength.p, fieldName: "nonZeroLength", required: false, type: Int64.self) - try _v.visit(unionKey: VTOFFSET.sparseIndexType.p, unionField: VTOFFSET.sparseIndex.p, unionKeyName: "sparseIndexType", fieldName: "sparseIndex", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_SparseTensorIndex, pos) in - switch key { - case .none_: - break // NOTE - SWIFT doesnt support none - case .sparsetensorindexcoo: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensorIndexCOO.self) - case .sparsematrixindexcsx: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseMatrixIndexCSX.self) - case .sparsetensorindexcsf: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensorIndexCSF.self) - } - }) - try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: true, type: org_apache_arrow_flatbuf_Buffer.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsSparseTensor(bb: ByteBuffer) -> org_apache_arrow_flatbuf_SparseTensor { return org_apache_arrow_flatbuf_SparseTensor(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case typeType = 4 + case type = 6 + case shape = 8 + case nonZeroLength = 10 + case sparseIndexType = 12 + case sparseIndex = 14 + case data = 16 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } + /// The type of data contained in a value cell. + /// Currently only fixed-width value types are supported, + /// no strings or nested types. + public func type(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.type.v); return _accessor.union(o) } + /// The dimensions of the tensor, optionally named. + public var hasShape: Bool { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? false : true } + public var shapeCount: Int32 { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func shape(at index: Int32) -> org_apache_arrow_flatbuf_TensorDim? { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? nil : org_apache_arrow_flatbuf_TensorDim(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + /// The number of non-zero values in a sparse tensor. + public var nonZeroLength: Int64 { let o = _accessor.offset(VTOFFSET.nonZeroLength.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + public var sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex { let o = _accessor.offset(VTOFFSET.sparseIndexType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_SparseTensorIndex(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } + /// Sparse tensor index + public func sparseIndex(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.sparseIndex.v); return _accessor.union(o) } + /// The location and size of the tensor's data + public var data: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.data.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } + public var mutableData: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.data.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } + public static func startSparseTensor(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 7) } + public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } + public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } + public static func addVectorOf(shape: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: shape, at: VTOFFSET.shape.p) } + public static func add(nonZeroLength: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: nonZeroLength, def: 0, at: VTOFFSET.nonZeroLength.p) } + public static func add(sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex, _ fbb: inout FlatBufferBuilder) { fbb.add(element: sparseIndexType.rawValue, def: 0, at: VTOFFSET.sparseIndexType.p) } + public static func add(sparseIndex: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: sparseIndex, at: VTOFFSET.sparseIndex.p) } + public static func add(data: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let data = data else { return }; fbb.create(struct: data, position: VTOFFSET.data.p) } + public static func endSparseTensor(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 14, 16]); return end } + public static func createSparseTensor( + _ fbb: inout FlatBufferBuilder, + typeType: org_apache_arrow_flatbuf_Type_ = .none_, + typeOffset type: Offset, + shapeVectorOffset shape: Offset, + nonZeroLength: Int64 = 0, + sparseIndexType: org_apache_arrow_flatbuf_SparseTensorIndex = .none_, + sparseIndexOffset sparseIndex: Offset, + data: org_apache_arrow_flatbuf_Buffer + ) -> Offset { + let __start = org_apache_arrow_flatbuf_SparseTensor.startSparseTensor(&fbb) + org_apache_arrow_flatbuf_SparseTensor.add(typeType: typeType, &fbb) + org_apache_arrow_flatbuf_SparseTensor.add(type: type, &fbb) + org_apache_arrow_flatbuf_SparseTensor.addVectorOf(shape: shape, &fbb) + org_apache_arrow_flatbuf_SparseTensor.add(nonZeroLength: nonZeroLength, &fbb) + org_apache_arrow_flatbuf_SparseTensor.add(sparseIndexType: sparseIndexType, &fbb) + org_apache_arrow_flatbuf_SparseTensor.add(sparseIndex: sparseIndex, &fbb) + org_apache_arrow_flatbuf_SparseTensor.add(data: data, &fbb) + return org_apache_arrow_flatbuf_SparseTensor.endSparseTensor(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in + switch key { + case .none_: + break // NOTE - SWIFT doesnt support none + case .null: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) + case .int: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) + case .floatingpoint: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) + case .binary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) + case .utf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) + case .bool: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) + case .decimal: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) + case .date: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) + case .time: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) + case .timestamp: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) + case .interval: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) + case .list: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) + case .struct_: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) + case .union: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) + case .fixedsizebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) + case .fixedsizelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) + case .map: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) + case .duration: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) + case .largebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) + case .largeutf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) + case .largelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) + case .runendencoded: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) + } + }) + try _v.visit(field: VTOFFSET.shape.p, fieldName: "shape", required: true, type: ForwardOffset, org_apache_arrow_flatbuf_TensorDim>>.self) + try _v.visit(field: VTOFFSET.nonZeroLength.p, fieldName: "nonZeroLength", required: false, type: Int64.self) + try _v.visit(unionKey: VTOFFSET.sparseIndexType.p, unionField: VTOFFSET.sparseIndex.p, unionKeyName: "sparseIndexType", fieldName: "sparseIndex", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_SparseTensorIndex, pos) in + switch key { + case .none_: + break // NOTE - SWIFT doesnt support none + case .sparsetensorindexcoo: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensorIndexCOO.self) + case .sparsematrixindexcsx: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseMatrixIndexCSX.self) + case .sparsetensorindexcsf: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_SparseTensorIndexCSF.self) + } + }) + try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: true, type: org_apache_arrow_flatbuf_Buffer.self) + _v.finish() + } } diff --git a/Sources/Arrow/Tensor_generated.swift b/Sources/Arrow/Tensor_generated.swift index e9778d0..9e588e1 100644 --- a/Sources/Arrow/Tensor_generated.swift +++ b/Sources/Arrow/Tensor_generated.swift @@ -26,168 +26,168 @@ import FlatBuffers /// Shape data for a single axis in a tensor public struct org_apache_arrow_flatbuf_TensorDim: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsTensorDim(bb: ByteBuffer) -> org_apache_arrow_flatbuf_TensorDim { return org_apache_arrow_flatbuf_TensorDim(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case size = 4 - case name = 6 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - /// Length of dimension - public var size: Int64 { let o = _accessor.offset(VTOFFSET.size.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } - /// Name of the dimension, optional - public var name: String? { let o = _accessor.offset(VTOFFSET.name.v); return o == 0 ? nil : _accessor.string(at: o) } - public var nameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.name.v) } - public static func startTensorDim(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } - public static func add(size: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: size, def: 0, at: VTOFFSET.size.p) } - public static func add(name: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: name, at: VTOFFSET.name.p) } - public static func endTensorDim(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } - public static func createTensorDim( - _ fbb: inout FlatBufferBuilder, - size: Int64 = 0, - nameOffset name: Offset = Offset() - ) -> Offset { - let __start = org_apache_arrow_flatbuf_TensorDim.startTensorDim(&fbb) - org_apache_arrow_flatbuf_TensorDim.add(size: size, &fbb) - org_apache_arrow_flatbuf_TensorDim.add(name: name, &fbb) - return org_apache_arrow_flatbuf_TensorDim.endTensorDim(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(field: VTOFFSET.size.p, fieldName: "size", required: false, type: Int64.self) - try _v.visit(field: VTOFFSET.name.p, fieldName: "name", required: false, type: ForwardOffset.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsTensorDim(bb: ByteBuffer) -> org_apache_arrow_flatbuf_TensorDim { return org_apache_arrow_flatbuf_TensorDim(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case size = 4 + case name = 6 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + /// Length of dimension + public var size: Int64 { let o = _accessor.offset(VTOFFSET.size.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int64.self, at: o) } + /// Name of the dimension, optional + public var name: String? { let o = _accessor.offset(VTOFFSET.name.v); return o == 0 ? nil : _accessor.string(at: o) } + public var nameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.name.v) } + public static func startTensorDim(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 2) } + public static func add(size: Int64, _ fbb: inout FlatBufferBuilder) { fbb.add(element: size, def: 0, at: VTOFFSET.size.p) } + public static func add(name: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: name, at: VTOFFSET.name.p) } + public static func endTensorDim(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end } + public static func createTensorDim( + _ fbb: inout FlatBufferBuilder, + size: Int64 = 0, + nameOffset name: Offset = Offset() + ) -> Offset { + let __start = org_apache_arrow_flatbuf_TensorDim.startTensorDim(&fbb) + org_apache_arrow_flatbuf_TensorDim.add(size: size, &fbb) + org_apache_arrow_flatbuf_TensorDim.add(name: name, &fbb) + return org_apache_arrow_flatbuf_TensorDim.endTensorDim(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(field: VTOFFSET.size.p, fieldName: "size", required: false, type: Int64.self) + try _v.visit(field: VTOFFSET.name.p, fieldName: "name", required: false, type: ForwardOffset.self) + _v.finish() + } } public struct org_apache_arrow_flatbuf_Tensor: FlatBufferObject, Verifiable { - static func validateVersion() { FlatBuffersVersion_23_1_4() } - public var __buffer: ByteBuffer! { return _accessor.bb } - private var _accessor: Table - - public static func getRootAsTensor(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Tensor { return org_apache_arrow_flatbuf_Tensor(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } - - private init(_ t: Table) { _accessor = t } - public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } - - private enum VTOFFSET: VOffset { - case typeType = 4 - case type = 6 - case shape = 8 - case strides = 10 - case data = 12 - var v: Int32 { Int32(self.rawValue) } - var p: VOffset { self.rawValue } - } - - public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } - /// The type of data contained in a value cell. Currently only fixed-width - /// value types are supported, no strings or nested types - public func type(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.type.v); return _accessor.union(o) } - /// The dimensions of the tensor, optionally named - public var hasShape: Bool { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? false : true } - public var shapeCount: Int32 { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func shape(at index: Int32) -> org_apache_arrow_flatbuf_TensorDim? { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? nil : org_apache_arrow_flatbuf_TensorDim(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } - /// Non-negative byte offsets to advance one value cell along each dimension - /// If omitted, default to row-major order (C-like). - public var hasStrides: Bool { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? false : true } - public var stridesCount: Int32 { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? 0 : _accessor.vector(count: o) } - public func strides(at index: Int32) -> Int64 { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? 0 : _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8) } - public var strides: [Int64] { return _accessor.getVector(at: VTOFFSET.strides.v) ?? [] } - /// The location and size of the tensor's data - public var data: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.data.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } - public var mutableData: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.data.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } - public static func startTensor(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } - public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } - public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } - public static func addVectorOf(shape: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: shape, at: VTOFFSET.shape.p) } - public static func addVectorOf(strides: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: strides, at: VTOFFSET.strides.p) } - public static func add(data: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let data = data else { return }; fbb.create(struct: data, position: VTOFFSET.data.p) } - public static func endTensor(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 12]); return end } - public static func createTensor( - _ fbb: inout FlatBufferBuilder, - typeType: org_apache_arrow_flatbuf_Type_ = .none_, - typeOffset type: Offset, - shapeVectorOffset shape: Offset, - stridesVectorOffset strides: Offset = Offset(), - data: org_apache_arrow_flatbuf_Buffer - ) -> Offset { - let __start = org_apache_arrow_flatbuf_Tensor.startTensor(&fbb) - org_apache_arrow_flatbuf_Tensor.add(typeType: typeType, &fbb) - org_apache_arrow_flatbuf_Tensor.add(type: type, &fbb) - org_apache_arrow_flatbuf_Tensor.addVectorOf(shape: shape, &fbb) - org_apache_arrow_flatbuf_Tensor.addVectorOf(strides: strides, &fbb) - org_apache_arrow_flatbuf_Tensor.add(data: data, &fbb) - return org_apache_arrow_flatbuf_Tensor.endTensor(&fbb, start: __start) - } - - public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { - var _v = try verifier.visitTable(at: position) - try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in - switch key { - case .none_: - break // NOTE - SWIFT doesnt support none - case .null: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) - case .int: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) - case .floatingpoint: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) - case .binary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) - case .utf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) - case .bool: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) - case .decimal: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) - case .date: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) - case .time: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) - case .timestamp: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) - case .interval: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) - case .list: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) - case .struct_: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) - case .union: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) - case .fixedsizebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) - case .fixedsizelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) - case .map: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) - case .duration: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) - case .largebinary: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) - case .largeutf8: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) - case .largelist: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) - case .runendencoded: - try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) - } - }) - try _v.visit(field: VTOFFSET.shape.p, fieldName: "shape", required: true, type: ForwardOffset, org_apache_arrow_flatbuf_TensorDim>>.self) - try _v.visit(field: VTOFFSET.strides.p, fieldName: "strides", required: false, type: ForwardOffset>.self) - try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: true, type: org_apache_arrow_flatbuf_Buffer.self) - _v.finish() - } + static func validateVersion() { FlatBuffersVersion_23_1_4() } + public var __buffer: ByteBuffer! { return _accessor.bb } + private var _accessor: Table + + public static func getRootAsTensor(bb: ByteBuffer) -> org_apache_arrow_flatbuf_Tensor { return org_apache_arrow_flatbuf_Tensor(Table(bb: bb, position: Int32(bb.read(def: UOffset.self, position: bb.reader)) + Int32(bb.reader))) } + + private init(_ t: Table) { _accessor = t } + public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) } + + private enum VTOFFSET: VOffset { + case typeType = 4 + case type = 6 + case shape = 8 + case strides = 10 + case data = 12 + var v: Int32 { Int32(self.rawValue) } + var p: VOffset { self.rawValue } + } + + public var typeType: org_apache_arrow_flatbuf_Type_ { let o = _accessor.offset(VTOFFSET.typeType.v); return o == 0 ? .none_ : org_apache_arrow_flatbuf_Type_(rawValue: _accessor.readBuffer(of: UInt8.self, at: o)) ?? .none_ } + /// The type of data contained in a value cell. Currently only fixed-width + /// value types are supported, no strings or nested types + public func type(type: T.Type) -> T! { let o = _accessor.offset(VTOFFSET.type.v); return _accessor.union(o) } + /// The dimensions of the tensor, optionally named + public var hasShape: Bool { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? false : true } + public var shapeCount: Int32 { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func shape(at index: Int32) -> org_apache_arrow_flatbuf_TensorDim? { let o = _accessor.offset(VTOFFSET.shape.v); return o == 0 ? nil : org_apache_arrow_flatbuf_TensorDim(_accessor.bb, o: _accessor.indirect(_accessor.vector(at: o) + index * 4)) } + /// Non-negative byte offsets to advance one value cell along each dimension + /// If omitted, default to row-major order (C-like). + public var hasStrides: Bool { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? false : true } + public var stridesCount: Int32 { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? 0 : _accessor.vector(count: o) } + public func strides(at index: Int32) -> Int64 { let o = _accessor.offset(VTOFFSET.strides.v); return o == 0 ? 0 : _accessor.directRead(of: Int64.self, offset: _accessor.vector(at: o) + index * 8) } + public var strides: [Int64] { return _accessor.getVector(at: VTOFFSET.strides.v) ?? [] } + /// The location and size of the tensor's data + public var data: org_apache_arrow_flatbuf_Buffer! { let o = _accessor.offset(VTOFFSET.data.v); return _accessor.readBuffer(of: org_apache_arrow_flatbuf_Buffer.self, at: o) } + public var mutableData: org_apache_arrow_flatbuf_Buffer_Mutable! { let o = _accessor.offset(VTOFFSET.data.v); return org_apache_arrow_flatbuf_Buffer_Mutable(_accessor.bb, o: o + _accessor.position) } + public static func startTensor(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 5) } + public static func add(typeType: org_apache_arrow_flatbuf_Type_, _ fbb: inout FlatBufferBuilder) { fbb.add(element: typeType.rawValue, def: 0, at: VTOFFSET.typeType.p) } + public static func add(type: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: type, at: VTOFFSET.type.p) } + public static func addVectorOf(shape: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: shape, at: VTOFFSET.shape.p) } + public static func addVectorOf(strides: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: strides, at: VTOFFSET.strides.p) } + public static func add(data: org_apache_arrow_flatbuf_Buffer?, _ fbb: inout FlatBufferBuilder) { guard let data = data else { return }; fbb.create(struct: data, position: VTOFFSET.data.p) } + public static func endTensor(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); fbb.require(table: end, fields: [6, 8, 12]); return end } + public static func createTensor( + _ fbb: inout FlatBufferBuilder, + typeType: org_apache_arrow_flatbuf_Type_ = .none_, + typeOffset type: Offset, + shapeVectorOffset shape: Offset, + stridesVectorOffset strides: Offset = Offset(), + data: org_apache_arrow_flatbuf_Buffer + ) -> Offset { + let __start = org_apache_arrow_flatbuf_Tensor.startTensor(&fbb) + org_apache_arrow_flatbuf_Tensor.add(typeType: typeType, &fbb) + org_apache_arrow_flatbuf_Tensor.add(type: type, &fbb) + org_apache_arrow_flatbuf_Tensor.addVectorOf(shape: shape, &fbb) + org_apache_arrow_flatbuf_Tensor.addVectorOf(strides: strides, &fbb) + org_apache_arrow_flatbuf_Tensor.add(data: data, &fbb) + return org_apache_arrow_flatbuf_Tensor.endTensor(&fbb, start: __start) + } + + public static func verify(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable { + var _v = try verifier.visitTable(at: position) + try _v.visit(unionKey: VTOFFSET.typeType.p, unionField: VTOFFSET.type.p, unionKeyName: "typeType", fieldName: "type", required: true, completion: { (verifier, key: org_apache_arrow_flatbuf_Type_, pos) in + switch key { + case .none_: + break // NOTE - SWIFT doesnt support none + case .null: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Null.self) + case .int: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Int.self) + case .floatingpoint: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FloatingPoint.self) + case .binary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Binary.self) + case .utf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Utf8.self) + case .bool: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Bool.self) + case .decimal: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Decimal.self) + case .date: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Date.self) + case .time: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Time.self) + case .timestamp: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Timestamp.self) + case .interval: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Interval.self) + case .list: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_List.self) + case .struct_: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Struct_.self) + case .union: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Union.self) + case .fixedsizebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeBinary.self) + case .fixedsizelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_FixedSizeList.self) + case .map: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Map.self) + case .duration: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_Duration.self) + case .largebinary: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeBinary.self) + case .largeutf8: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeUtf8.self) + case .largelist: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_LargeList.self) + case .runendencoded: + try ForwardOffset.verify(&verifier, at: pos, of: org_apache_arrow_flatbuf_RunEndEncoded.self) + } + }) + try _v.visit(field: VTOFFSET.shape.p, fieldName: "shape", required: true, type: ForwardOffset, org_apache_arrow_flatbuf_TensorDim>>.self) + try _v.visit(field: VTOFFSET.strides.p, fieldName: "strides", required: false, type: ForwardOffset>.self) + try _v.visit(field: VTOFFSET.data.p, fieldName: "data", required: true, type: org_apache_arrow_flatbuf_Buffer.self) + _v.finish() + } } diff --git a/Sources/ArrowFlight/Flight.grpc.swift b/Sources/ArrowFlight/Flight.grpc.swift index 45317c1..6534ff3 100644 --- a/Sources/ArrowFlight/Flight.grpc.swift +++ b/Sources/ArrowFlight/Flight.grpc.swift @@ -754,7 +754,7 @@ enum Arrow_Flight_Protocol_FlightServiceClientMetadata { Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doPut, Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doExchange, Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.doAction, - Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions, + Arrow_Flight_Protocol_FlightServiceClientMetadata.Methods.listActions ] ) @@ -1279,7 +1279,7 @@ enum Arrow_Flight_Protocol_FlightServiceServerMetadata { Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doPut, Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doExchange, Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.doAction, - Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listActions, + Arrow_Flight_Protocol_FlightServiceServerMetadata.Methods.listActions ] ) diff --git a/Sources/ArrowFlight/Flight.pb.swift b/Sources/ArrowFlight/Flight.pb.swift index 175bd86..fd9d76b 100644 --- a/Sources/ArrowFlight/Flight.pb.swift +++ b/Sources/ArrowFlight/Flight.pb.swift @@ -95,7 +95,7 @@ extension Arrow_Flight_Protocol_CancelStatus: CaseIterable { .unspecified, .cancelled, .cancelling, - .notCancellable, + .notCancellable ] } @@ -118,7 +118,6 @@ struct Arrow_Flight_Protocol_HandshakeRequest { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } struct Arrow_Flight_Protocol_HandshakeResponse { @@ -136,7 +135,6 @@ struct Arrow_Flight_Protocol_HandshakeResponse { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -152,7 +150,6 @@ struct Arrow_Flight_Protocol_BasicAuth { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } struct Arrow_Flight_Protocol_Empty { @@ -162,7 +159,6 @@ struct Arrow_Flight_Protocol_Empty { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -179,7 +175,6 @@ struct Arrow_Flight_Protocol_ActionType { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -194,7 +189,6 @@ struct Arrow_Flight_Protocol_Criteria { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -210,7 +204,6 @@ struct Arrow_Flight_Protocol_Action { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -234,8 +227,6 @@ struct Arrow_Flight_Protocol_CancelFlightInfoRequest { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _info: Arrow_Flight_Protocol_FlightInfo? } @@ -260,8 +251,6 @@ struct Arrow_Flight_Protocol_RenewFlightEndpointRequest { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _endpoint: Arrow_Flight_Protocol_FlightEndpoint? } @@ -276,7 +265,6 @@ struct Arrow_Flight_Protocol_Result { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -292,7 +280,6 @@ struct Arrow_Flight_Protocol_CancelFlightInfoResult { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -310,7 +297,6 @@ struct Arrow_Flight_Protocol_SchemaResult { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -377,7 +363,6 @@ struct Arrow_Flight_Protocol_FlightDescriptor { } } - init() {} } #if swift(>=4.2) @@ -387,7 +372,7 @@ extension Arrow_Flight_Protocol_FlightDescriptor.DescriptorType: CaseIterable { static var allCases: [Arrow_Flight_Protocol_FlightDescriptor.DescriptorType] = [ .unknown, .path, - .cmd, + .cmd ] } @@ -455,8 +440,6 @@ struct Arrow_Flight_Protocol_FlightInfo { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? } @@ -512,8 +495,6 @@ struct Arrow_Flight_Protocol_FlightEndpoint { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _ticket: Arrow_Flight_Protocol_Ticket? fileprivate var _expirationTime: SwiftProtobuf.Google_Protobuf_Timestamp? } @@ -530,7 +511,6 @@ struct Arrow_Flight_Protocol_Location { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -548,7 +528,6 @@ struct Arrow_Flight_Protocol_Ticket { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } /// @@ -588,8 +567,6 @@ struct Arrow_Flight_Protocol_FlightData { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _flightDescriptor: Arrow_Flight_Protocol_FlightDescriptor? } @@ -604,7 +581,6 @@ struct Arrow_Flight_Protocol_PutResult { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} } #if swift(>=5.5) && canImport(_Concurrency) @@ -640,7 +616,7 @@ extension Arrow_Flight_Protocol_CancelStatus: SwiftProtobuf._ProtoNameProviding 0: .same(proto: "CANCEL_STATUS_UNSPECIFIED"), 1: .same(proto: "CANCEL_STATUS_CANCELLED"), 2: .same(proto: "CANCEL_STATUS_CANCELLING"), - 3: .same(proto: "CANCEL_STATUS_NOT_CANCELLABLE"), + 3: .same(proto: "CANCEL_STATUS_NOT_CANCELLABLE") ] } @@ -648,7 +624,7 @@ extension Arrow_Flight_Protocol_HandshakeRequest: SwiftProtobuf.Message, SwiftPr static let protoMessageName: String = _protobuf_package + ".HandshakeRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "protocol_version"), - 2: .same(proto: "payload"), + 2: .same(proto: "payload") ] mutating func decodeMessage(decoder: inout D) throws { @@ -686,7 +662,7 @@ extension Arrow_Flight_Protocol_HandshakeResponse: SwiftProtobuf.Message, SwiftP static let protoMessageName: String = _protobuf_package + ".HandshakeResponse" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "protocol_version"), - 2: .same(proto: "payload"), + 2: .same(proto: "payload") ] mutating func decodeMessage(decoder: inout D) throws { @@ -724,7 +700,7 @@ extension Arrow_Flight_Protocol_BasicAuth: SwiftProtobuf.Message, SwiftProtobuf. static let protoMessageName: String = _protobuf_package + ".BasicAuth" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 2: .same(proto: "username"), - 3: .same(proto: "password"), + 3: .same(proto: "password") ] mutating func decodeMessage(decoder: inout D) throws { @@ -780,7 +756,7 @@ extension Arrow_Flight_Protocol_ActionType: SwiftProtobuf.Message, SwiftProtobuf static let protoMessageName: String = _protobuf_package + ".ActionType" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "type"), - 2: .same(proto: "description"), + 2: .same(proto: "description") ] mutating func decodeMessage(decoder: inout D) throws { @@ -817,7 +793,7 @@ extension Arrow_Flight_Protocol_ActionType: SwiftProtobuf.Message, SwiftProtobuf extension Arrow_Flight_Protocol_Criteria: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".Criteria" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "expression"), + 1: .same(proto: "expression") ] mutating func decodeMessage(decoder: inout D) throws { @@ -850,7 +826,7 @@ extension Arrow_Flight_Protocol_Action: SwiftProtobuf.Message, SwiftProtobuf._Me static let protoMessageName: String = _protobuf_package + ".Action" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "type"), - 2: .same(proto: "body"), + 2: .same(proto: "body") ] mutating func decodeMessage(decoder: inout D) throws { @@ -887,7 +863,7 @@ extension Arrow_Flight_Protocol_Action: SwiftProtobuf.Message, SwiftProtobuf._Me extension Arrow_Flight_Protocol_CancelFlightInfoRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), + 1: .same(proto: "info") ] mutating func decodeMessage(decoder: inout D) throws { @@ -923,7 +899,7 @@ extension Arrow_Flight_Protocol_CancelFlightInfoRequest: SwiftProtobuf.Message, extension Arrow_Flight_Protocol_RenewFlightEndpointRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".RenewFlightEndpointRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "endpoint"), + 1: .same(proto: "endpoint") ] mutating func decodeMessage(decoder: inout D) throws { @@ -959,7 +935,7 @@ extension Arrow_Flight_Protocol_RenewFlightEndpointRequest: SwiftProtobuf.Messag extension Arrow_Flight_Protocol_Result: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".Result" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "body"), + 1: .same(proto: "body") ] mutating func decodeMessage(decoder: inout D) throws { @@ -991,7 +967,7 @@ extension Arrow_Flight_Protocol_Result: SwiftProtobuf.Message, SwiftProtobuf._Me extension Arrow_Flight_Protocol_CancelFlightInfoResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CancelFlightInfoResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "status"), + 1: .same(proto: "status") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1023,7 +999,7 @@ extension Arrow_Flight_Protocol_CancelFlightInfoResult: SwiftProtobuf.Message, S extension Arrow_Flight_Protocol_SchemaResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".SchemaResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "schema"), + 1: .same(proto: "schema") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1057,7 +1033,7 @@ extension Arrow_Flight_Protocol_FlightDescriptor: SwiftProtobuf.Message, SwiftPr static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "type"), 2: .same(proto: "cmd"), - 3: .same(proto: "path"), + 3: .same(proto: "path") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1100,7 +1076,7 @@ extension Arrow_Flight_Protocol_FlightDescriptor.DescriptorType: SwiftProtobuf._ static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "UNKNOWN"), 1: .same(proto: "PATH"), - 2: .same(proto: "CMD"), + 2: .same(proto: "CMD") ] } @@ -1112,7 +1088,7 @@ extension Arrow_Flight_Protocol_FlightInfo: SwiftProtobuf.Message, SwiftProtobuf 3: .same(proto: "endpoint"), 4: .standard(proto: "total_records"), 5: .standard(proto: "total_bytes"), - 6: .same(proto: "ordered"), + 6: .same(proto: "ordered") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1175,7 +1151,7 @@ extension Arrow_Flight_Protocol_FlightEndpoint: SwiftProtobuf.Message, SwiftProt static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "ticket"), 2: .same(proto: "location"), - 3: .standard(proto: "expiration_time"), + 3: .standard(proto: "expiration_time") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1221,7 +1197,7 @@ extension Arrow_Flight_Protocol_FlightEndpoint: SwiftProtobuf.Message, SwiftProt extension Arrow_Flight_Protocol_Location: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".Location" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "uri"), + 1: .same(proto: "uri") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1253,7 +1229,7 @@ extension Arrow_Flight_Protocol_Location: SwiftProtobuf.Message, SwiftProtobuf._ extension Arrow_Flight_Protocol_Ticket: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".Ticket" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "ticket"), + 1: .same(proto: "ticket") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1288,7 +1264,7 @@ extension Arrow_Flight_Protocol_FlightData: SwiftProtobuf.Message, SwiftProtobuf 1: .standard(proto: "flight_descriptor"), 2: .standard(proto: "data_header"), 3: .standard(proto: "app_metadata"), - 1000: .standard(proto: "data_body"), + 1000: .standard(proto: "data_body") ] mutating func decodeMessage(decoder: inout D) throws { @@ -1339,7 +1315,7 @@ extension Arrow_Flight_Protocol_FlightData: SwiftProtobuf.Message, SwiftProtobuf extension Arrow_Flight_Protocol_PutResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".PutResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "app_metadata"), + 1: .standard(proto: "app_metadata") ] mutating func decodeMessage(decoder: inout D) throws { diff --git a/Sources/ArrowFlight/FlightSql.pb.swift b/Sources/ArrowFlight/FlightSql.pb.swift index 629a0b2..16e0465 100644 --- a/Sources/ArrowFlight/FlightSql.pb.swift +++ b/Sources/ArrowFlight/FlightSql.pb.swift @@ -1014,7 +1014,7 @@ extension Arrow_Flight_Protocol_Sql_SqlInfo: CaseIterable { .sqlSavepointsSupported, .sqlNamedParametersSupported, .sqlLocatorsUpdateCopy, - .sqlStoredFunctionsUsingCallSyntaxSupported, + .sqlStoredFunctionsUsingCallSyntaxSupported ] } @@ -1066,7 +1066,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedTransaction: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransaction] = [ .none, .transaction, - .savepoint, + .savepoint ] } @@ -1113,7 +1113,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity: CaseIterable { .sqlCaseSensitivityUnknown, .sqlCaseSensitivityCaseInsensitive, .sqlCaseSensitivityUppercase, - .sqlCaseSensitivityLowercase, + .sqlCaseSensitivityLowercase ] } @@ -1160,7 +1160,7 @@ extension Arrow_Flight_Protocol_Sql_SqlNullOrdering: CaseIterable { .sqlNullsSortedHigh, .sqlNullsSortedLow, .sqlNullsSortedAtStart, - .sqlNullsSortedAtEnd, + .sqlNullsSortedAtEnd ] } @@ -1203,7 +1203,7 @@ extension Arrow_Flight_Protocol_Sql_SupportedSqlGrammar: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_SupportedSqlGrammar] = [ .sqlMinimumGrammar, .sqlCoreGrammar, - .sqlExtendedGrammar, + .sqlExtendedGrammar ] } @@ -1246,7 +1246,7 @@ extension Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel: CaseIterable static var allCases: [Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel] = [ .ansi92EntrySql, .ansi92IntermediateSql, - .ansi92FullSql, + .ansi92FullSql ] } @@ -1289,7 +1289,7 @@ extension Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel] = [ .sqlJoinsUnsupported, .sqlLimitedOuterJoins, - .sqlFullOuterJoins, + .sqlFullOuterJoins ] } @@ -1328,7 +1328,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy: CaseIterable { // The compiler won't synthesize support with the UNRECOGNIZED case. static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy] = [ .sqlGroupByUnrelated, - .sqlGroupByBeyondSelect, + .sqlGroupByBeyondSelect ] } @@ -1371,7 +1371,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedElementActions: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedElementActions] = [ .sqlElementInProcedureCalls, .sqlElementInIndexDefinitions, - .sqlElementInPrivilegeDefinitions, + .sqlElementInPrivilegeDefinitions ] } @@ -1410,7 +1410,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands: CaseIterable // The compiler won't synthesize support with the UNRECOGNIZED case. static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands] = [ .sqlPositionedDelete, - .sqlPositionedUpdate, + .sqlPositionedUpdate ] } @@ -1457,7 +1457,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries: CaseIterable { .sqlSubqueriesInComparisons, .sqlSubqueriesInExists, .sqlSubqueriesInIns, - .sqlSubqueriesInQuantifieds, + .sqlSubqueriesInQuantifieds ] } @@ -1496,7 +1496,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedUnions: CaseIterable { // The compiler won't synthesize support with the UNRECOGNIZED case. static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedUnions] = [ .sqlUnion, - .sqlUnionAll, + .sqlUnionAll ] } @@ -1547,7 +1547,7 @@ extension Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel: CaseIterable { .sqlTransactionReadUncommitted, .sqlTransactionReadCommitted, .sqlTransactionRepeatableRead, - .sqlTransactionSerializable, + .sqlTransactionSerializable ] } @@ -1590,7 +1590,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedTransactions: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedTransactions] = [ .sqlTransactionUnspecified, .sqlDataDefinitionTransactions, - .sqlDataManipulationTransactions, + .sqlDataManipulationTransactions ] } @@ -1637,7 +1637,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType: CaseIterable { .sqlResultSetTypeUnspecified, .sqlResultSetTypeForwardOnly, .sqlResultSetTypeScrollInsensitive, - .sqlResultSetTypeScrollSensitive, + .sqlResultSetTypeScrollSensitive ] } @@ -1680,7 +1680,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency: CaseIterab static var allCases: [Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency] = [ .sqlResultSetConcurrencyUnspecified, .sqlResultSetConcurrencyReadOnly, - .sqlResultSetConcurrencyUpdatable, + .sqlResultSetConcurrencyUpdatable ] } @@ -1791,7 +1791,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportsConvert: CaseIterable { .sqlConvertTimestamp, .sqlConvertTinyint, .sqlConvertVarbinary, - .sqlConvertVarchar, + .sqlConvertVarchar ] } @@ -1921,7 +1921,7 @@ extension Arrow_Flight_Protocol_Sql_XdbcDataType: CaseIterable { .xdbcTinyint, .xdbcBit, .xdbcWchar, - .xdbcWvarchar, + .xdbcWvarchar ] } @@ -2067,7 +2067,7 @@ extension Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode: CaseIterable { .xdbcSubcodeIntervalDayToSecond, .xdbcSubcodeIntervalHourToMinute, .xdbcSubcodeIntervalHourToSecond, - .xdbcSubcodeIntervalMinuteToSecond, + .xdbcSubcodeIntervalMinuteToSecond ] } @@ -2119,7 +2119,7 @@ extension Arrow_Flight_Protocol_Sql_Nullable: CaseIterable { static var allCases: [Arrow_Flight_Protocol_Sql_Nullable] = [ .nullabilityNoNulls, .nullabilityNullable, - .nullabilityUnknown, + .nullabilityUnknown ] } @@ -2183,7 +2183,7 @@ extension Arrow_Flight_Protocol_Sql_Searchable: CaseIterable { .none, .char, .basic, - .full, + .full ] } @@ -2234,7 +2234,7 @@ extension Arrow_Flight_Protocol_Sql_UpdateDeleteRules: CaseIterable { .restrict, .setNull, .noAction, - .setDefault, + .setDefault ] } @@ -2284,8 +2284,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetSqlInfo { var info: [UInt32] = [] var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -2364,8 +2362,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _dataType: Int32? } @@ -2387,8 +2383,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetCatalogs { // methods supported on all messages. var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -2441,8 +2435,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetDbSchemas { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _catalog: String? fileprivate var _dbSchemaFilterPattern: String? } @@ -2536,8 +2528,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetTables { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _catalog: String? fileprivate var _dbSchemaFilterPattern: String? fileprivate var _tableNameFilterPattern: String? @@ -2562,8 +2552,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetTableTypes { // methods supported on all messages. var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -2620,8 +2608,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _catalog: String? fileprivate var _dbSchema: String? } @@ -2689,8 +2675,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetExportedKeys { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _catalog: String? fileprivate var _dbSchema: String? } @@ -2762,8 +2746,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetImportedKeys { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _catalog: String? fileprivate var _dbSchema: String? } @@ -2870,8 +2852,6 @@ struct Arrow_Flight_Protocol_Sql_CommandGetCrossReference { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _pkCatalog: String? fileprivate var _pkDbSchema: String? fileprivate var _fkCatalog: String? @@ -2902,8 +2882,6 @@ struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _transactionID: Data? } @@ -2926,8 +2904,6 @@ struct Arrow_Flight_Protocol_Sql_SubstraitPlan { var version: String = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -2962,8 +2938,6 @@ struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? fileprivate var _transactionID: Data? } @@ -2993,8 +2967,6 @@ struct Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult { var parameterSchema: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3009,8 +2981,6 @@ struct Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest { var preparedStatementHandle: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3022,8 +2992,6 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionRequest { // methods supported on all messages. var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3044,8 +3012,6 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest { var name: String = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3065,8 +3031,6 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult { var transactionID: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3086,8 +3050,6 @@ struct Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult { var savepointID: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3143,8 +3105,6 @@ struct Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest { } } } - - init() {} } #if swift(>=4.2) @@ -3154,7 +3114,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction: static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction] = [ .unspecified, .commit, - .rollback, + .rollback ] } @@ -3215,8 +3175,6 @@ struct Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest { } } } - - init() {} } #if swift(>=4.2) @@ -3226,7 +3184,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint: Case static var allCases: [Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint] = [ .unspecified, .release, - .rollback, + .rollback ] } @@ -3269,8 +3227,6 @@ struct Arrow_Flight_Protocol_Sql_CommandStatementQuery { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _transactionID: Data? } @@ -3320,8 +3276,6 @@ struct Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _plan: Arrow_Flight_Protocol_Sql_SubstraitPlan? fileprivate var _transactionID: Data? } @@ -3338,8 +3292,6 @@ struct Arrow_Flight_Protocol_Sql_TicketStatementQuery { var statementHandle: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3368,8 +3320,6 @@ struct Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery { var preparedStatementHandle: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3396,8 +3346,6 @@ struct Arrow_Flight_Protocol_Sql_CommandStatementUpdate { var unknownFields = SwiftProtobuf.UnknownStorage() - init() {} - fileprivate var _transactionID: Data? } @@ -3414,8 +3362,6 @@ struct Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate { var preparedStatementHandle: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3432,8 +3378,6 @@ struct Arrow_Flight_Protocol_Sql_DoPutUpdateResult { var recordCount: Int64 = 0 var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3464,8 +3408,6 @@ struct Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest { var info: Data = .init() var unknownFields = SwiftProtobuf.UnknownStorage() - - init() {} } /// @@ -3529,8 +3471,6 @@ struct Arrow_Flight_Protocol_Sql_ActionCancelQueryResult { } } } - - init() {} } #if swift(>=4.2) @@ -3541,7 +3481,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: CaseIt .unspecified, .cancelled, .cancelling, - .notCancellable, + .notCancellable ] } @@ -3640,7 +3580,7 @@ extension SwiftProtobuf.Google_Protobuf_MessageOptions { /// in parsing, or it can be combined with other `SwiftProtobuf.SimpleExtensionMap`s to create /// a larger `SwiftProtobuf.SimpleExtensionMap`. let Arrow_Flight_Protocol_Sql_FlightSql_Extensions: SwiftProtobuf.SimpleExtensionMap = [ - Arrow_Flight_Protocol_Sql_Extensions_experimental, + Arrow_Flight_Protocol_Sql_Extensions_experimental ] // Extension Objects - The only reason these might be needed is when manually @@ -3746,7 +3686,7 @@ extension Arrow_Flight_Protocol_Sql_SqlInfo: SwiftProtobuf._ProtoNameProviding { 573: .same(proto: "SQL_SAVEPOINTS_SUPPORTED"), 574: .same(proto: "SQL_NAMED_PARAMETERS_SUPPORTED"), 575: .same(proto: "SQL_LOCATORS_UPDATE_COPY"), - 576: .same(proto: "SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED"), + 576: .same(proto: "SQL_STORED_FUNCTIONS_USING_CALL_SYNTAX_SUPPORTED") ] } @@ -3754,7 +3694,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedTransaction: SwiftProtobuf._Prot static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_SUPPORTED_TRANSACTION_NONE"), 1: .same(proto: "SQL_SUPPORTED_TRANSACTION_TRANSACTION"), - 2: .same(proto: "SQL_SUPPORTED_TRANSACTION_SAVEPOINT"), + 2: .same(proto: "SQL_SUPPORTED_TRANSACTION_SAVEPOINT") ] } @@ -3763,7 +3703,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedCaseSensitivity: SwiftProtobuf._ 0: .same(proto: "SQL_CASE_SENSITIVITY_UNKNOWN"), 1: .same(proto: "SQL_CASE_SENSITIVITY_CASE_INSENSITIVE"), 2: .same(proto: "SQL_CASE_SENSITIVITY_UPPERCASE"), - 3: .same(proto: "SQL_CASE_SENSITIVITY_LOWERCASE"), + 3: .same(proto: "SQL_CASE_SENSITIVITY_LOWERCASE") ] } @@ -3772,7 +3712,7 @@ extension Arrow_Flight_Protocol_Sql_SqlNullOrdering: SwiftProtobuf._ProtoNamePro 0: .same(proto: "SQL_NULLS_SORTED_HIGH"), 1: .same(proto: "SQL_NULLS_SORTED_LOW"), 2: .same(proto: "SQL_NULLS_SORTED_AT_START"), - 3: .same(proto: "SQL_NULLS_SORTED_AT_END"), + 3: .same(proto: "SQL_NULLS_SORTED_AT_END") ] } @@ -3780,7 +3720,7 @@ extension Arrow_Flight_Protocol_Sql_SupportedSqlGrammar: SwiftProtobuf._ProtoNam static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_MINIMUM_GRAMMAR"), 1: .same(proto: "SQL_CORE_GRAMMAR"), - 2: .same(proto: "SQL_EXTENDED_GRAMMAR"), + 2: .same(proto: "SQL_EXTENDED_GRAMMAR") ] } @@ -3788,7 +3728,7 @@ extension Arrow_Flight_Protocol_Sql_SupportedAnsi92SqlGrammarLevel: SwiftProtobu static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "ANSI92_ENTRY_SQL"), 1: .same(proto: "ANSI92_INTERMEDIATE_SQL"), - 2: .same(proto: "ANSI92_FULL_SQL"), + 2: .same(proto: "ANSI92_FULL_SQL") ] } @@ -3796,14 +3736,14 @@ extension Arrow_Flight_Protocol_Sql_SqlOuterJoinsSupportLevel: SwiftProtobuf._Pr static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_JOINS_UNSUPPORTED"), 1: .same(proto: "SQL_LIMITED_OUTER_JOINS"), - 2: .same(proto: "SQL_FULL_OUTER_JOINS"), + 2: .same(proto: "SQL_FULL_OUTER_JOINS") ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedGroupBy: SwiftProtobuf._ProtoNameProviding { static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_GROUP_BY_UNRELATED"), - 1: .same(proto: "SQL_GROUP_BY_BEYOND_SELECT"), + 1: .same(proto: "SQL_GROUP_BY_BEYOND_SELECT") ] } @@ -3811,14 +3751,14 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedElementActions: SwiftProtobuf._P static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_ELEMENT_IN_PROCEDURE_CALLS"), 1: .same(proto: "SQL_ELEMENT_IN_INDEX_DEFINITIONS"), - 2: .same(proto: "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS"), + 2: .same(proto: "SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS") ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedPositionedCommands: SwiftProtobuf._ProtoNameProviding { static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_POSITIONED_DELETE"), - 1: .same(proto: "SQL_POSITIONED_UPDATE"), + 1: .same(proto: "SQL_POSITIONED_UPDATE") ] } @@ -3827,14 +3767,14 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedSubqueries: SwiftProtobuf._Proto 0: .same(proto: "SQL_SUBQUERIES_IN_COMPARISONS"), 1: .same(proto: "SQL_SUBQUERIES_IN_EXISTS"), 2: .same(proto: "SQL_SUBQUERIES_IN_INS"), - 3: .same(proto: "SQL_SUBQUERIES_IN_QUANTIFIEDS"), + 3: .same(proto: "SQL_SUBQUERIES_IN_QUANTIFIEDS") ] } extension Arrow_Flight_Protocol_Sql_SqlSupportedUnions: SwiftProtobuf._ProtoNameProviding { static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_UNION"), - 1: .same(proto: "SQL_UNION_ALL"), + 1: .same(proto: "SQL_UNION_ALL") ] } @@ -3844,7 +3784,7 @@ extension Arrow_Flight_Protocol_Sql_SqlTransactionIsolationLevel: SwiftProtobuf. 1: .same(proto: "SQL_TRANSACTION_READ_UNCOMMITTED"), 2: .same(proto: "SQL_TRANSACTION_READ_COMMITTED"), 3: .same(proto: "SQL_TRANSACTION_REPEATABLE_READ"), - 4: .same(proto: "SQL_TRANSACTION_SERIALIZABLE"), + 4: .same(proto: "SQL_TRANSACTION_SERIALIZABLE") ] } @@ -3852,7 +3792,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedTransactions: SwiftProtobuf._Pro static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_TRANSACTION_UNSPECIFIED"), 1: .same(proto: "SQL_DATA_DEFINITION_TRANSACTIONS"), - 2: .same(proto: "SQL_DATA_MANIPULATION_TRANSACTIONS"), + 2: .same(proto: "SQL_DATA_MANIPULATION_TRANSACTIONS") ] } @@ -3861,7 +3801,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetType: SwiftProtobuf._Pr 0: .same(proto: "SQL_RESULT_SET_TYPE_UNSPECIFIED"), 1: .same(proto: "SQL_RESULT_SET_TYPE_FORWARD_ONLY"), 2: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE"), - 3: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE"), + 3: .same(proto: "SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE") ] } @@ -3869,7 +3809,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportedResultSetConcurrency: SwiftProto static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED"), 1: .same(proto: "SQL_RESULT_SET_CONCURRENCY_READ_ONLY"), - 2: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UPDATABLE"), + 2: .same(proto: "SQL_RESULT_SET_CONCURRENCY_UPDATABLE") ] } @@ -3894,7 +3834,7 @@ extension Arrow_Flight_Protocol_Sql_SqlSupportsConvert: SwiftProtobuf._ProtoName 16: .same(proto: "SQL_CONVERT_TIMESTAMP"), 17: .same(proto: "SQL_CONVERT_TINYINT"), 18: .same(proto: "SQL_CONVERT_VARBINARY"), - 19: .same(proto: "SQL_CONVERT_VARCHAR"), + 19: .same(proto: "SQL_CONVERT_VARCHAR") ] } @@ -3923,7 +3863,7 @@ extension Arrow_Flight_Protocol_Sql_XdbcDataType: SwiftProtobuf._ProtoNameProvid 12: .same(proto: "XDBC_VARCHAR"), 91: .same(proto: "XDBC_DATE"), 92: .same(proto: "XDBC_TIME"), - 93: .same(proto: "XDBC_TIMESTAMP"), + 93: .same(proto: "XDBC_TIMESTAMP") ] } @@ -3955,7 +3895,7 @@ extension Arrow_Flight_Protocol_Sql_XdbcDatetimeSubcode: SwiftProtobuf._ProtoNam 110: .same(proto: "XDBC_SUBCODE_INTERVAL_DAY_TO_SECOND"), 111: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_MINUTE"), 112: .same(proto: "XDBC_SUBCODE_INTERVAL_HOUR_TO_SECOND"), - 113: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND"), + 113: .same(proto: "XDBC_SUBCODE_INTERVAL_MINUTE_TO_SECOND") ] } @@ -3963,7 +3903,7 @@ extension Arrow_Flight_Protocol_Sql_Nullable: SwiftProtobuf._ProtoNameProviding static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "NULLABILITY_NO_NULLS"), 1: .same(proto: "NULLABILITY_NULLABLE"), - 2: .same(proto: "NULLABILITY_UNKNOWN"), + 2: .same(proto: "NULLABILITY_UNKNOWN") ] } @@ -3972,7 +3912,7 @@ extension Arrow_Flight_Protocol_Sql_Searchable: SwiftProtobuf._ProtoNameProvidin 0: .same(proto: "SEARCHABLE_NONE"), 1: .same(proto: "SEARCHABLE_CHAR"), 2: .same(proto: "SEARCHABLE_BASIC"), - 3: .same(proto: "SEARCHABLE_FULL"), + 3: .same(proto: "SEARCHABLE_FULL") ] } @@ -3982,14 +3922,14 @@ extension Arrow_Flight_Protocol_Sql_UpdateDeleteRules: SwiftProtobuf._ProtoNameP 1: .same(proto: "RESTRICT"), 2: .same(proto: "SET_NULL"), 3: .same(proto: "NO_ACTION"), - 4: .same(proto: "SET_DEFAULT"), + 4: .same(proto: "SET_DEFAULT") ] } extension Arrow_Flight_Protocol_Sql_CommandGetSqlInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CommandGetSqlInfo" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), + 1: .same(proto: "info") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4021,7 +3961,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetSqlInfo: SwiftProtobuf.Message, Sw extension Arrow_Flight_Protocol_Sql_CommandGetXdbcTypeInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CommandGetXdbcTypeInfo" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "data_type"), + 1: .standard(proto: "data_type") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4076,7 +4016,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetDbSchemas: SwiftProtobuf.Message, static let protoMessageName: String = _protobuf_package + ".CommandGetDbSchemas" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "catalog"), - 2: .standard(proto: "db_schema_filter_pattern"), + 2: .standard(proto: "db_schema_filter_pattern") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4121,7 +4061,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetTables: SwiftProtobuf.Message, Swi 2: .standard(proto: "db_schema_filter_pattern"), 3: .standard(proto: "table_name_filter_pattern"), 4: .standard(proto: "table_types"), - 5: .standard(proto: "include_schema"), + 5: .standard(proto: "include_schema") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4197,7 +4137,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetPrimaryKeys: SwiftProtobuf.Message static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "catalog"), 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), + 3: .same(proto: "table") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4245,7 +4185,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetExportedKeys: SwiftProtobuf.Messag static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "catalog"), 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), + 3: .same(proto: "table") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4293,7 +4233,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetImportedKeys: SwiftProtobuf.Messag static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "catalog"), 2: .standard(proto: "db_schema"), - 3: .same(proto: "table"), + 3: .same(proto: "table") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4344,7 +4284,7 @@ extension Arrow_Flight_Protocol_Sql_CommandGetCrossReference: SwiftProtobuf.Mess 3: .standard(proto: "pk_table"), 4: .standard(proto: "fk_catalog"), 5: .standard(proto: "fk_db_schema"), - 6: .standard(proto: "fk_table"), + 6: .standard(proto: "fk_table") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4406,7 +4346,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementRequest: SwiftP static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedStatementRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), + 2: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4448,7 +4388,7 @@ extension Arrow_Flight_Protocol_Sql_SubstraitPlan: SwiftProtobuf.Message, SwiftP static let protoMessageName: String = _protobuf_package + ".SubstraitPlan" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "plan"), - 2: .same(proto: "version"), + 2: .same(proto: "version") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4486,7 +4426,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedSubstraitPlanRequest: Sw static let protoMessageName: String = _protobuf_package + ".ActionCreatePreparedSubstraitPlanRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "plan"), - 2: .standard(proto: "transaction_id"), + 2: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4529,7 +4469,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult: SwiftPr static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "prepared_statement_handle"), 2: .standard(proto: "dataset_schema"), - 3: .standard(proto: "parameter_schema"), + 3: .standard(proto: "parameter_schema") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4571,7 +4511,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCreatePreparedStatementResult: SwiftPr extension Arrow_Flight_Protocol_Sql_ActionClosePreparedStatementRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".ActionClosePreparedStatementRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), + 1: .standard(proto: "prepared_statement_handle") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4622,7 +4562,7 @@ extension Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest: SwiftProtobuf.M static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "transaction_id"), - 2: .same(proto: "name"), + 2: .same(proto: "name") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4659,7 +4599,7 @@ extension Arrow_Flight_Protocol_Sql_ActionBeginSavepointRequest: SwiftProtobuf.M extension Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".ActionBeginTransactionResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "transaction_id"), + 1: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4691,7 +4631,7 @@ extension Arrow_Flight_Protocol_Sql_ActionBeginTransactionResult: SwiftProtobuf. extension Arrow_Flight_Protocol_Sql_ActionBeginSavepointResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".ActionBeginSavepointResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "savepoint_id"), + 1: .standard(proto: "savepoint_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4724,7 +4664,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest: SwiftProtobuf.M static let protoMessageName: String = _protobuf_package + ".ActionEndTransactionRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "transaction_id"), - 2: .same(proto: "action"), + 2: .same(proto: "action") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4762,7 +4702,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndTransactionRequest.EndTransaction: static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "END_TRANSACTION_UNSPECIFIED"), 1: .same(proto: "END_TRANSACTION_COMMIT"), - 2: .same(proto: "END_TRANSACTION_ROLLBACK"), + 2: .same(proto: "END_TRANSACTION_ROLLBACK") ] } @@ -4770,7 +4710,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest: SwiftProtobuf.Mes static let protoMessageName: String = _protobuf_package + ".ActionEndSavepointRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .standard(proto: "savepoint_id"), - 2: .same(proto: "action"), + 2: .same(proto: "action") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4808,7 +4748,7 @@ extension Arrow_Flight_Protocol_Sql_ActionEndSavepointRequest.EndSavepoint: Swif static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 0: .same(proto: "END_SAVEPOINT_UNSPECIFIED"), 1: .same(proto: "END_SAVEPOINT_RELEASE"), - 2: .same(proto: "END_SAVEPOINT_ROLLBACK"), + 2: .same(proto: "END_SAVEPOINT_ROLLBACK") ] } @@ -4816,7 +4756,7 @@ extension Arrow_Flight_Protocol_Sql_CommandStatementQuery: SwiftProtobuf.Message static let protoMessageName: String = _protobuf_package + ".CommandStatementQuery" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), + 2: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4858,7 +4798,7 @@ extension Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan: SwiftProtobuf static let protoMessageName: String = _protobuf_package + ".CommandStatementSubstraitPlan" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "plan"), - 2: .standard(proto: "transaction_id"), + 2: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4899,7 +4839,7 @@ extension Arrow_Flight_Protocol_Sql_CommandStatementSubstraitPlan: SwiftProtobuf extension Arrow_Flight_Protocol_Sql_TicketStatementQuery: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".TicketStatementQuery" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "statement_handle"), + 1: .standard(proto: "statement_handle") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4931,7 +4871,7 @@ extension Arrow_Flight_Protocol_Sql_TicketStatementQuery: SwiftProtobuf.Message, extension Arrow_Flight_Protocol_Sql_CommandPreparedStatementQuery: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementQuery" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), + 1: .standard(proto: "prepared_statement_handle") ] mutating func decodeMessage(decoder: inout D) throws { @@ -4964,7 +4904,7 @@ extension Arrow_Flight_Protocol_Sql_CommandStatementUpdate: SwiftProtobuf.Messag static let protoMessageName: String = _protobuf_package + ".CommandStatementUpdate" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "query"), - 2: .standard(proto: "transaction_id"), + 2: .standard(proto: "transaction_id") ] mutating func decodeMessage(decoder: inout D) throws { @@ -5005,7 +4945,7 @@ extension Arrow_Flight_Protocol_Sql_CommandStatementUpdate: SwiftProtobuf.Messag extension Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".CommandPreparedStatementUpdate" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "prepared_statement_handle"), + 1: .standard(proto: "prepared_statement_handle") ] mutating func decodeMessage(decoder: inout D) throws { @@ -5037,7 +4977,7 @@ extension Arrow_Flight_Protocol_Sql_CommandPreparedStatementUpdate: SwiftProtobu extension Arrow_Flight_Protocol_Sql_DoPutUpdateResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".DoPutUpdateResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .standard(proto: "record_count"), + 1: .standard(proto: "record_count") ] mutating func decodeMessage(decoder: inout D) throws { @@ -5069,7 +5009,7 @@ extension Arrow_Flight_Protocol_Sql_DoPutUpdateResult: SwiftProtobuf.Message, Sw extension Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryRequest" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "info"), + 1: .same(proto: "info") ] mutating func decodeMessage(decoder: inout D) throws { @@ -5101,7 +5041,7 @@ extension Arrow_Flight_Protocol_Sql_ActionCancelQueryRequest: SwiftProtobuf.Mess extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { static let protoMessageName: String = _protobuf_package + ".ActionCancelQueryResult" static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "result"), + 1: .same(proto: "result") ] mutating func decodeMessage(decoder: inout D) throws { @@ -5135,6 +5075,6 @@ extension Arrow_Flight_Protocol_Sql_ActionCancelQueryResult.CancelResult: SwiftP 0: .same(proto: "CANCEL_RESULT_UNSPECIFIED"), 1: .same(proto: "CANCEL_RESULT_CANCELLED"), 2: .same(proto: "CANCEL_RESULT_CANCELLING"), - 3: .same(proto: "CANCEL_RESULT_NOT_CANCELLABLE"), + 3: .same(proto: "CANCEL_RESULT_NOT_CANCELLABLE") ] }